From 809344a6f5a1b2829add77983d7cc73ccfae5c35 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Thu, 21 Apr 2022 17:34:40 -0500 Subject: [PATCH] peering: initial sync (#12842) - Add endpoints related to peering: read, list, generate token, initiate peering - Update node/service/check table indexing to account for peers - Foundational changes for pushing service updates to a peer - Plumb peer name through Health.ServiceNodes path see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679, ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663, ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634, ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555 Co-authored-by: R.B. Boyer Co-authored-by: freddygv Co-authored-by: Chris S. Kim Co-authored-by: Evan Culver Co-authored-by: Nitya Dhanushkodi --- .changelog/_1679.txt | 3 + acl/acl_oss.go | 4 +- acl/enterprisemeta_oss.go | 4 + agent/agent.go | 5 + agent/auto-config/config.go | 2 +- agent/auto-config/mock_test.go | 6 +- agent/auto-config/tls.go | 4 +- .../cache-types/catalog_list_services_test.go | 7 +- agent/cache-types/mock_Agent.go | 92 + agent/cache/cache.go | 14 +- agent/cache/cache_test.go | 6 +- agent/cache/request.go | 12 +- agent/consul/acl.go | 30 +- agent/consul/auto_config_endpoint_test.go | 3 +- agent/consul/autopilot.go | 2 +- .../autopilotevents/mock_StateStore_test.go | 18 +- .../autopilotevents/ready_servers_events.go | 11 +- .../ready_servers_events_test.go | 20 +- agent/consul/catalog_endpoint.go | 28 +- agent/consul/client_test.go | 2 +- agent/consul/fsm/commands_oss.go | 86 +- agent/consul/fsm/commands_oss_test.go | 34 +- agent/consul/fsm/snapshot_oss.go | 73 +- agent/consul/fsm/snapshot_oss_test.go | 42 +- agent/consul/health_endpoint.go | 18 +- agent/consul/health_endpoint_test.go | 206 +- agent/consul/internal_endpoint.go | 8 +- agent/consul/issue_test.go | 4 +- agent/consul/leader.go | 22 +- agent/consul/leader_federation_state_ae.go | 2 +- agent/consul/leader_peering.go | 244 ++ agent/consul/leader_peering_test.go | 197 ++ agent/consul/leader_test.go | 52 +- agent/consul/peering_backend.go | 126 + agent/consul/peering_backend_oss.go | 15 + agent/consul/peering_backend_oss_test.go | 51 + agent/consul/peering_backend_test.go | 115 + agent/consul/prepared_query/walk_test.go | 5 +- agent/consul/prepared_query_endpoint.go | 4 +- agent/consul/server.go | 26 +- agent/consul/server_test.go | 2 + agent/consul/state/acl_schema.go | 20 + agent/consul/state/catalog.go | 876 ++++-- agent/consul/state/catalog_events.go | 73 +- agent/consul/state/catalog_events_oss.go | 17 + agent/consul/state/catalog_events_oss_test.go | 45 + agent/consul/state/catalog_events_test.go | 65 +- agent/consul/state/catalog_oss.go | 162 +- agent/consul/state/catalog_oss_test.go | 363 ++- agent/consul/state/catalog_schema.go | 153 +- agent/consul/state/catalog_test.go | 1365 +++++---- agent/consul/state/connect_ca_events.go | 5 + agent/consul/state/coordinate_test.go | 2 +- agent/consul/state/intention.go | 7 +- agent/consul/state/peering.go | 486 ++++ agent/consul/state/peering_oss.go | 66 + agent/consul/state/peering_test.go | 811 ++++++ agent/consul/state/query.go | 16 +- agent/consul/state/query_oss.go | 24 + agent/consul/state/schema.go | 6 +- agent/consul/state/schema_oss.go | 15 +- agent/consul/state/session_test.go | 10 +- agent/consul/state/state_store.go | 9 +- agent/consul/state/store_integration_test.go | 7 +- agent/consul/state/txn.go | 28 +- agent/consul/state/txn_test.go | 6 +- agent/consul/state/usage_test.go | 4 +- agent/consul/stream/event.go | 49 + agent/consul/stream/event_publisher_test.go | 5 + agent/consul/txn_endpoint_test.go | 6 +- .../private/services/subscribe/subscribe.go | 65 +- .../services/subscribe/subscribe_test.go | 2 +- .../dataplane/get_envoy_bootstrap_params.go | 9 +- .../grpc/public/services/dataplane/server.go | 2 +- agent/health_endpoint.go | 2 + agent/health_endpoint_test.go | 238 +- agent/http.go | 6 + agent/http_register.go | 4 + agent/peering_endpoint.go | 118 + agent/peering_endpoint_oss_test.go | 45 + agent/peering_endpoint_test.go | 312 ++ agent/rpc/peering/service.go | 741 +++++ agent/rpc/peering/service_oss_test.go | 39 + agent/rpc/peering/service_test.go | 414 +++ agent/rpc/peering/stream_test.go | 810 ++++++ agent/rpc/peering/stream_tracker.go | 212 ++ agent/rpc/peering/stream_tracker_test.go | 162 ++ agent/rpc/peering/subscription_manager.go | 149 + .../rpc/peering/subscription_manager_test.go | 362 +++ agent/rpc/peering/subscription_view.go | 141 + agent/rpc/peering/subscription_view_test.go | 338 +++ agent/rpc/peering/testing.go | 199 ++ agent/rpc/peering/testutil_oss_test.go | 16 + agent/rpc/peering/validate.go | 62 + agent/rpc/peering/validate_test.go | 107 + agent/rpcclient/health/health.go | 9 +- agent/rpcclient/health/view_test.go | 8 +- agent/structs/config_entry_export_oss_test.go | 62 + agent/structs/config_entry_exports.go | 36 +- agent/structs/config_entry_exports_test.go | 94 + agent/structs/config_entry_oss.go | 7 +- agent/structs/peering.go | 9 + agent/structs/prepared_query.go | 4 + agent/structs/structs.go | 81 +- agent/structs/structs_filtering_test.go | 29 +- agent/structs/structs_test.go | 2 + agent/submatview/local_materializer.go | 109 + agent/submatview/materializer.go | 289 +- agent/submatview/rpc_materializer.go | 125 + agent/submatview/store.go | 16 +- agent/submatview/store_integration_test.go | 1 - agent/submatview/store_test.go | 26 +- agent/testagent.go | 2 - api/agent.go | 1 + api/agent_test.go | 2 +- api/config_entry_exports.go | 5 + api/config_entry_exports_test.go | 102 + go.mod | 1 + go.sum | 11 +- logging/names.go | 1 + proto/pbpeering/generate.go | 9 + proto/pbpeering/peering.go | 202 ++ proto/pbpeering/peering.pb.binary.go | 248 ++ proto/pbpeering/peering.pb.go | 2569 +++++++++++++++++ proto/pbpeering/peering.proto | 283 ++ proto/pbpeering/peering_oss.go | 16 + proto/pbpeering/types.go | 5 + proto/pbservice/healthcheck.gen.go | 2 + proto/pbservice/healthcheck.pb.go | 288 +- proto/pbservice/healthcheck.proto | 1 + proto/pbservice/node.gen.go | 4 + proto/pbservice/node.pb.binary.go | 10 + proto/pbservice/node.pb.go | 370 ++- proto/pbservice/node.proto | 9 + proto/pbstatus/status.pb.binary.go | 18 + proto/pbstatus/status.pb.go | 204 ++ proto/pbstatus/status.proto | 47 + proto/pbsubscribe/subscribe.pb.go | 103 +- proto/pbsubscribe/subscribe.proto | 3 + testrpc/wait.go | 3 + 140 files changed, 14159 insertions(+), 2128 deletions(-) create mode 100644 .changelog/_1679.txt create mode 100644 agent/cache-types/mock_Agent.go create mode 100644 agent/consul/leader_peering.go create mode 100644 agent/consul/leader_peering_test.go create mode 100644 agent/consul/peering_backend.go create mode 100644 agent/consul/peering_backend_oss.go create mode 100644 agent/consul/peering_backend_oss_test.go create mode 100644 agent/consul/peering_backend_test.go create mode 100644 agent/consul/state/catalog_events_oss_test.go create mode 100644 agent/consul/state/peering.go create mode 100644 agent/consul/state/peering_oss.go create mode 100644 agent/consul/state/peering_test.go create mode 100644 agent/peering_endpoint.go create mode 100644 agent/peering_endpoint_oss_test.go create mode 100644 agent/peering_endpoint_test.go create mode 100644 agent/rpc/peering/service.go create mode 100644 agent/rpc/peering/service_oss_test.go create mode 100644 agent/rpc/peering/service_test.go create mode 100644 agent/rpc/peering/stream_test.go create mode 100644 agent/rpc/peering/stream_tracker.go create mode 100644 agent/rpc/peering/stream_tracker_test.go create mode 100644 agent/rpc/peering/subscription_manager.go create mode 100644 agent/rpc/peering/subscription_manager_test.go create mode 100644 agent/rpc/peering/subscription_view.go create mode 100644 agent/rpc/peering/subscription_view_test.go create mode 100644 agent/rpc/peering/testing.go create mode 100644 agent/rpc/peering/testutil_oss_test.go create mode 100644 agent/rpc/peering/validate.go create mode 100644 agent/rpc/peering/validate_test.go create mode 100644 agent/structs/config_entry_export_oss_test.go create mode 100644 agent/structs/config_entry_exports_test.go create mode 100644 agent/structs/peering.go create mode 100644 agent/submatview/local_materializer.go create mode 100644 agent/submatview/rpc_materializer.go create mode 100644 api/config_entry_exports_test.go create mode 100644 proto/pbpeering/generate.go create mode 100644 proto/pbpeering/peering.go create mode 100644 proto/pbpeering/peering.pb.binary.go create mode 100644 proto/pbpeering/peering.pb.go create mode 100644 proto/pbpeering/peering.proto create mode 100644 proto/pbpeering/peering_oss.go create mode 100644 proto/pbpeering/types.go create mode 100644 proto/pbstatus/status.pb.binary.go create mode 100644 proto/pbstatus/status.pb.go create mode 100644 proto/pbstatus/status.proto diff --git a/.changelog/_1679.txt b/.changelog/_1679.txt new file mode 100644 index 000000000..d7f524127 --- /dev/null +++ b/.changelog/_1679.txt @@ -0,0 +1,3 @@ +```release-note:breaking-change +config-entry: Exporting a specific service name across all namespace is invalid. +``` \ No newline at end of file diff --git a/acl/acl_oss.go b/acl/acl_oss.go index ca2974e4e..693280883 100644 --- a/acl/acl_oss.go +++ b/acl/acl_oss.go @@ -3,7 +3,9 @@ package acl -const DefaultPartitionName = "" +const ( + DefaultPartitionName = "" +) // Reviewer Note: This is a little bit strange; one might want it to be "" like partition name // However in consul/structs/intention.go we define IntentionDefaultNamespace as 'default' and so diff --git a/acl/enterprisemeta_oss.go b/acl/enterprisemeta_oss.go index 2296fdd43..f0f15bc05 100644 --- a/acl/enterprisemeta_oss.go +++ b/acl/enterprisemeta_oss.go @@ -106,3 +106,7 @@ func NewEnterpriseMetaWithPartition(_, _ string) EnterpriseMeta { // FillAuthzContext stub func (_ *EnterpriseMeta) FillAuthzContext(_ *AuthorizerContext) {} + +func NormalizeNamespace(_ string) string { + return "" +} diff --git a/agent/agent.go b/agent/agent.go index 72f861dc4..a3f3cfab8 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -20,6 +20,7 @@ import ( "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" + "github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/go-connlimit" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" @@ -357,6 +358,8 @@ type Agent struct { // into Agent, which will allow us to remove this field. rpcClientHealth *health.Client + rpcClientPeering pbpeering.PeeringServiceClient + // routineManager is responsible for managing longer running go routines // run by the Agent routineManager *routine.Manager @@ -434,6 +437,8 @@ func New(bd BaseDeps) (*Agent, error) { QueryOptionDefaults: config.ApplyDefaultQueryOptions(a.config), } + a.rpcClientPeering = pbpeering.NewPeeringServiceClient(conn) + a.serviceManager = NewServiceManager(&a) // We used to do this in the Start method. However it doesn't need to go diff --git a/agent/auto-config/config.go b/agent/auto-config/config.go index a20121fb9..94f45d1fc 100644 --- a/agent/auto-config/config.go +++ b/agent/auto-config/config.go @@ -27,7 +27,7 @@ type DirectRPC interface { // agent/cache.Cache struct that we care about type Cache interface { Notify(ctx context.Context, t string, r cache.Request, correlationID string, ch chan<- cache.UpdateEvent) error - Prepopulate(t string, result cache.FetchResult, dc string, token string, key string) error + Prepopulate(t string, result cache.FetchResult, dc string, peerName string, token string, key string) error } // ServerProvider is an interface that can be used to find one server in the local DC known to diff --git a/agent/auto-config/mock_test.go b/agent/auto-config/mock_test.go index 45fd42ef4..1ff53bc62 100644 --- a/agent/auto-config/mock_test.go +++ b/agent/auto-config/mock_test.go @@ -137,7 +137,7 @@ func (m *mockCache) Notify(ctx context.Context, t string, r cache.Request, corre return err } -func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, token string, key string) error { +func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, peerName string, token string, key string) error { var restore string cert, ok := result.Value.(*structs.IssuedCert) if ok { @@ -147,7 +147,7 @@ func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, t cert.PrivateKeyPEM = "redacted" } - ret := m.Called(t, result, dc, token, key) + ret := m.Called(t, result, dc, peerName, token, key) if ok && restore != "" { cert.PrivateKeyPEM = restore @@ -304,6 +304,7 @@ func (m *mockedConfig) expectInitialTLS(t *testing.T, agentName, datacenter, tok rootRes, datacenter, "", + "", rootsReq.CacheInfo().Key, ).Return(nil).Once() @@ -330,6 +331,7 @@ func (m *mockedConfig) expectInitialTLS(t *testing.T, agentName, datacenter, tok cachetype.ConnectCALeafName, leafRes, datacenter, + "", token, leafReq.Key(), ).Return(nil).Once() diff --git a/agent/auto-config/tls.go b/agent/auto-config/tls.go index 0683e94ba..e8a59d19f 100644 --- a/agent/auto-config/tls.go +++ b/agent/auto-config/tls.go @@ -96,7 +96,7 @@ func (ac *AutoConfig) populateCertificateCache(certs *structs.SignedResponse) er rootRes := cache.FetchResult{Value: &certs.ConnectCARoots, Index: certs.ConnectCARoots.QueryMeta.Index} rootsReq := ac.caRootsRequest() // getting the roots doesn't require a token so in order to potentially share the cache with another - if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCARootName, rootRes, ac.config.Datacenter, "", rootsReq.CacheInfo().Key); err != nil { + if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCARootName, rootRes, ac.config.Datacenter, structs.DefaultPeerKeyword, "", rootsReq.CacheInfo().Key); err != nil { return err } @@ -108,7 +108,7 @@ func (ac *AutoConfig) populateCertificateCache(certs *structs.SignedResponse) er Index: certs.IssuedCert.RaftIndex.ModifyIndex, State: cachetype.ConnectCALeafSuccess(connect.EncodeSigningKeyID(cert.AuthorityKeyId)), } - if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCALeafName, certRes, leafReq.Datacenter, leafReq.Token, leafReq.Key()); err != nil { + if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCALeafName, certRes, leafReq.Datacenter, structs.DefaultPeerKeyword, leafReq.Token, leafReq.Key()); err != nil { return err } diff --git a/agent/cache-types/catalog_list_services_test.go b/agent/cache-types/catalog_list_services_test.go index a630daaf3..60aa4ed81 100644 --- a/agent/cache-types/catalog_list_services_test.go +++ b/agent/cache-types/catalog_list_services_test.go @@ -5,10 +5,11 @@ import ( "testing" "time" - "github.com/hashicorp/consul/agent/cache" - "github.com/hashicorp/consul/agent/structs" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" ) func TestCatalogListServices(t *testing.T) { @@ -104,7 +105,7 @@ func TestCatalogListServices_IntegrationWithCache_NotModifiedResponse(t *testing }, } - err := c.Prepopulate(CatalogListServicesName, last, "dc1", "token", req.CacheInfo().Key) + err := c.Prepopulate(CatalogListServicesName, last, "dc1", "", "token", req.CacheInfo().Key) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/agent/cache-types/mock_Agent.go b/agent/cache-types/mock_Agent.go new file mode 100644 index 000000000..ec3ba4031 --- /dev/null +++ b/agent/cache-types/mock_Agent.go @@ -0,0 +1,92 @@ +// Code generated by mockery v2.11.0. DO NOT EDIT. + +package cachetype + +import ( + local "github.com/hashicorp/consul/agent/local" + memdb "github.com/hashicorp/go-memdb" + + mock "github.com/stretchr/testify/mock" + + structs "github.com/hashicorp/consul/agent/structs" + + testing "testing" + + time "time" +) + +// MockAgent is an autogenerated mock type for the Agent type +type MockAgent struct { + mock.Mock +} + +// LocalBlockingQuery provides a mock function with given fields: alwaysBlock, hash, wait, fn +func (_m *MockAgent) LocalBlockingQuery(alwaysBlock bool, hash string, wait time.Duration, fn func(memdb.WatchSet) (string, interface{}, error)) (string, interface{}, error) { + ret := _m.Called(alwaysBlock, hash, wait, fn) + + var r0 string + if rf, ok := ret.Get(0).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) string); ok { + r0 = rf(alwaysBlock, hash, wait, fn) + } else { + r0 = ret.Get(0).(string) + } + + var r1 interface{} + if rf, ok := ret.Get(1).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) interface{}); ok { + r1 = rf(alwaysBlock, hash, wait, fn) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(interface{}) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) error); ok { + r2 = rf(alwaysBlock, hash, wait, fn) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// LocalState provides a mock function with given fields: +func (_m *MockAgent) LocalState() *local.State { + ret := _m.Called() + + var r0 *local.State + if rf, ok := ret.Get(0).(func() *local.State); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*local.State) + } + } + + return r0 +} + +// ServiceHTTPBasedChecks provides a mock function with given fields: id +func (_m *MockAgent) ServiceHTTPBasedChecks(id structs.ServiceID) []structs.CheckType { + ret := _m.Called(id) + + var r0 []structs.CheckType + if rf, ok := ret.Get(0).(func(structs.ServiceID) []structs.CheckType); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]structs.CheckType) + } + } + + return r0 +} + +// NewMockAgent creates a new instance of MockAgent. It also registers a cleanup function to assert the mocks expectations. +func NewMockAgent(t testing.TB) *MockAgent { + mock := &MockAgent{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/cache/cache.go b/agent/cache/cache.go index f06ce27b2..de6d001e3 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -91,7 +91,7 @@ const ( // struct in agent/structs. This API makes cache usage a mostly drop-in // replacement for non-cached RPC calls. // -// The cache is partitioned by ACL and datacenter. This allows the cache +// The cache is partitioned by ACL and datacenter/peer. This allows the cache // to be safe for multi-DC queries and for queries where the data is modified // due to ACLs all without the cache having to have any clever logic, at // the slight expense of a less perfect cache. @@ -406,7 +406,7 @@ func (c *Cache) getWithIndex(ctx context.Context, r getOptions) (interface{}, Re return result.Value, ResultMeta{}, err } - key := makeEntryKey(r.TypeEntry.Name, r.Info.Datacenter, r.Info.Token, r.Info.Key) + key := makeEntryKey(r.TypeEntry.Name, r.Info.Datacenter, r.Info.PeerName, r.Info.Token, r.Info.Key) // First time through first := true @@ -526,7 +526,11 @@ RETRY_GET: } } -func makeEntryKey(t, dc, token, key string) string { +func makeEntryKey(t, dc, peerName, token, key string) string { + // TODO(peering): figure out if this is the desired format + if peerName != "" { + return fmt.Sprintf("%s/%s/%s/%s", t, "peer:"+peerName, token, key) + } return fmt.Sprintf("%s/%s/%s/%s", t, dc, token, key) } @@ -884,8 +888,8 @@ func (c *Cache) Close() error { // on startup. It is used to set the ConnectRootCA and AgentLeafCert when // AutoEncrypt.TLS is turned on. The cache itself cannot fetch that the first // time because it requires a special RPCType. Subsequent runs are fine though. -func (c *Cache) Prepopulate(t string, res FetchResult, dc, token, k string) error { - key := makeEntryKey(t, dc, token, k) +func (c *Cache) Prepopulate(t string, res FetchResult, dc, peerName, token, k string) error { + key := makeEntryKey(t, dc, peerName, token, k) newEntry := cacheEntry{ Valid: true, Value: res.Value, diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 5c2b3d203..a93969c2c 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -1545,7 +1545,7 @@ func TestCacheReload(t *testing.T) { c.entriesLock.Lock() tEntry, ok := c.types["t1"] require.True(t, ok) - keyName := makeEntryKey("t1", "", "", "hello1") + keyName := makeEntryKey("t1", "", "", "", "hello1") ok, entryValid, entry := c.getEntryLocked(tEntry, keyName, RequestInfo{}) require.True(t, ok) require.True(t, entryValid) @@ -1687,7 +1687,7 @@ func TestCache_Prepopulate(t *testing.T) { c := New(Options{}) c.RegisterType("t", typ) - c.Prepopulate("t", FetchResult{Value: 17, Index: 1}, "dc1", "token", "v1") + c.Prepopulate("t", FetchResult{Value: 17, Index: 1}, "dc1", "", "token", "v1") ctx := context.Background() req := fakeRequest{ @@ -1740,7 +1740,7 @@ func TestCache_RefreshLifeCycle(t *testing.T) { c := New(Options{}) c.RegisterType("t", typ) - key := makeEntryKey("t", "dc1", "token", "v1") + key := makeEntryKey("t", "dc1", "", "token", "v1") ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/agent/cache/request.go b/agent/cache/request.go index b2d3ab854..fbce6f891 100644 --- a/agent/cache/request.go +++ b/agent/cache/request.go @@ -16,6 +16,9 @@ type Request interface { // RequestInfo represents cache information for a request. The caching // framework uses this to control the behavior of caching and to determine // cacheability. +// +// TODO(peering): finish ensuring everything that sets a Datacenter sets or doesn't set PeerName. +// TODO(peering): also make sure the peer name is present in the cache key likely in lieu of the datacenter somehow. type RequestInfo struct { // Key is a unique cache key for this request. This key should // be globally unique to identify this request, since any conflicting @@ -28,14 +31,17 @@ type RequestInfo struct { // // Datacenter is the datacenter that the request is targeting. // - // Both of these values are used to partition the cache. The cache framework + // PeerName is the peer that the request is targeting. + // + // All of these values are used to partition the cache. The cache framework // today partitions data on these values to simplify behavior: by // partitioning ACL tokens, the cache doesn't need to be smart about - // filtering results. By filtering datacenter results, the cache can - // service the multi-DC nature of Consul. This comes at the expense of + // filtering results. By filtering datacenter/peer results, the cache can + // service the multi-DC/multi-peer nature of Consul. This comes at the expense of // working set size, but in general the effect is minimal. Token string Datacenter string + PeerName string // MinIndex is the minimum index being queried. This is used to // determine if we already have data satisfying the query or if we need diff --git a/agent/consul/acl.go b/agent/consul/acl.go index 2c4264223..2badf7875 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -1174,7 +1174,21 @@ func (r *ACLResolver) ACLsEnabled() bool { return true } -func (r *ACLResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (ACLResolveResult, error) { +// TODO(peering): fix all calls to use the new signature and rename it back +func (r *ACLResolver) ResolveTokenAndDefaultMeta( + token string, + entMeta *acl.EnterpriseMeta, + authzContext *acl.AuthorizerContext, +) (ACLResolveResult, error) { + return r.ResolveTokenAndDefaultMetaWithPeerName(token, entMeta, structs.DefaultPeerKeyword, authzContext) +} + +func (r *ACLResolver) ResolveTokenAndDefaultMetaWithPeerName( + token string, + entMeta *acl.EnterpriseMeta, + peerName string, + authzContext *acl.AuthorizerContext, +) (ACLResolveResult, error) { result, err := r.ResolveToken(token) if err != nil { return ACLResolveResult{}, err @@ -1186,9 +1200,19 @@ func (r *ACLResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl.Ente // Default the EnterpriseMeta based on the Tokens meta or actual defaults // in the case of unknown identity - if result.ACLIdentity != nil { + switch { + case peerName == "" && result.ACLIdentity != nil: entMeta.Merge(result.ACLIdentity.EnterpriseMetadata()) - } else { + case result.ACLIdentity != nil: + // We _do not_ normalize the enterprise meta from the token when a peer + // name was specified because namespaces across clusters are not + // equivalent. A local namespace is _never_ correct for a remote query. + entMeta.Merge( + structs.DefaultEnterpriseMetaInPartition( + result.ACLIdentity.EnterpriseMetadata().PartitionOrDefault(), + ), + ) + default: entMeta.Merge(structs.DefaultEnterpriseMetaInDefaultPartition()) } diff --git a/agent/consul/auto_config_endpoint_test.go b/agent/consul/auto_config_endpoint_test.go index 676b126fd..43df5fdab 100644 --- a/agent/consul/auto_config_endpoint_test.go +++ b/agent/consul/auto_config_endpoint_test.go @@ -11,12 +11,11 @@ import ( "testing" "time" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/hashicorp/memberlist" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" - "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" diff --git a/agent/consul/autopilot.go b/agent/consul/autopilot.go index 93560979a..4d934053a 100644 --- a/agent/consul/autopilot.go +++ b/agent/consul/autopilot.go @@ -142,7 +142,7 @@ func (s *Server) autopilotServerFromMetadata(srv *metadata.Server) (*autopilot.S // populate the node meta if there is any. When a node first joins or if // there are ACL issues then this could be empty if the server has not // yet been able to register itself in the catalog - _, node, err := s.fsm.State().GetNodeID(types.NodeID(srv.ID), structs.NodeEnterpriseMetaInDefaultPartition()) + _, node, err := s.fsm.State().GetNodeID(types.NodeID(srv.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword) if err != nil { return nil, fmt.Errorf("error retrieving node from state store: %w", err) } diff --git a/agent/consul/autopilotevents/mock_StateStore_test.go b/agent/consul/autopilotevents/mock_StateStore_test.go index 7c8ee9003..200e68be7 100644 --- a/agent/consul/autopilotevents/mock_StateStore_test.go +++ b/agent/consul/autopilotevents/mock_StateStore_test.go @@ -18,20 +18,20 @@ type MockStateStore struct { mock.Mock } -// GetNodeID provides a mock function with given fields: _a0, _a1 -func (_m *MockStateStore) GetNodeID(_a0 types.NodeID, _a1 *acl.EnterpriseMeta) (uint64, *structs.Node, error) { - ret := _m.Called(_a0, _a1) +// GetNodeID provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockStateStore) GetNodeID(_a0 types.NodeID, _a1 *acl.EnterpriseMeta, _a2 string) (uint64, *structs.Node, error) { + ret := _m.Called(_a0, _a1, _a2) var r0 uint64 - if rf, ok := ret.Get(0).(func(types.NodeID, *acl.EnterpriseMeta) uint64); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(types.NodeID, *acl.EnterpriseMeta, string) uint64); ok { + r0 = rf(_a0, _a1, _a2) } else { r0 = ret.Get(0).(uint64) } var r1 *structs.Node - if rf, ok := ret.Get(1).(func(types.NodeID, *acl.EnterpriseMeta) *structs.Node); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(types.NodeID, *acl.EnterpriseMeta, string) *structs.Node); ok { + r1 = rf(_a0, _a1, _a2) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*structs.Node) @@ -39,8 +39,8 @@ func (_m *MockStateStore) GetNodeID(_a0 types.NodeID, _a1 *acl.EnterpriseMeta) ( } var r2 error - if rf, ok := ret.Get(2).(func(types.NodeID, *acl.EnterpriseMeta) error); ok { - r2 = rf(_a0, _a1) + if rf, ok := ret.Get(2).(func(types.NodeID, *acl.EnterpriseMeta, string) error); ok { + r2 = rf(_a0, _a1, _a2) } else { r2 = ret.Error(2) } diff --git a/agent/consul/autopilotevents/ready_servers_events.go b/agent/consul/autopilotevents/ready_servers_events.go index dda14ebf9..7943ccacc 100644 --- a/agent/consul/autopilotevents/ready_servers_events.go +++ b/agent/consul/autopilotevents/ready_servers_events.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/types" ) @@ -70,6 +71,12 @@ func (e EventPayloadReadyServers) HasReadPermission(authz acl.Authorizer) bool { return authz.ServiceWriteAny(&authzContext) == acl.Allow } +func (e EventPayloadReadyServers) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + // TODO(peering) is this right? + // TODO(agentless) is this right? + panic("EventPayloadReadyServers does not implement ToSubscriptionEvent") +} + func ExtractEventPayload(event stream.Event) (EventPayloadReadyServers, error) { if event.Topic != EventTopicReadyServers { return nil, fmt.Errorf("unexpected topic (%q) for a %q event", event.Topic, EventTopicReadyServers) @@ -114,7 +121,7 @@ func NewReadyServersEventPublisher(config Config) *ReadyServersEventPublisher { //go:generate mockery --name StateStore --inpackage --testonly type StateStore interface { - GetNodeID(types.NodeID, *acl.EnterpriseMeta) (uint64, *structs.Node, error) + GetNodeID(types.NodeID, *acl.EnterpriseMeta, string) (uint64, *structs.Node, error) } //go:generate mockery --name Publisher --inpackage --testonly @@ -245,7 +252,7 @@ func (r *ReadyServersEventPublisher) getTaggedAddresses(srv *autopilot.ServerSta // from the catalog at that often and publish the events. So while its not quite // as responsive as actually watching for the Catalog changes, its MUCH simpler to // code and reason about and having those addresses be updated within 30s is good enough. - _, node, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition()) + _, node, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword) if err != nil || node == nil { // no catalog information means we should return a nil addres map return nil diff --git a/agent/consul/autopilotevents/ready_servers_events_test.go b/agent/consul/autopilotevents/ready_servers_events_test.go index b6d930f69..223292404 100644 --- a/agent/consul/autopilotevents/ready_servers_events_test.go +++ b/agent/consul/autopilotevents/ready_servers_events_test.go @@ -4,14 +4,16 @@ import ( "testing" time "time" - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/consul/stream" - structs "github.com/hashicorp/consul/agent/structs" - types "github.com/hashicorp/consul/types" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" mock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/stream" + structs "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbsubscribe" + types "github.com/hashicorp/consul/types" ) var testTime = time.Date(2022, 4, 14, 10, 56, 00, 0, time.UTC) @@ -161,6 +163,7 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) { store.On("GetNodeID", types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"), structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, ).Once().Return( uint64(0), &structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}}, @@ -170,6 +173,7 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) { store.On("GetNodeID", types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"), structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, ).Once().Return( uint64(0), &structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}}, @@ -179,6 +183,7 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) { store.On("GetNodeID", types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"), structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, ).Once().Return( uint64(0), &structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}}, @@ -487,6 +492,7 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) { store.On("GetNodeID", types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"), structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, ).Once().Return( uint64(0), &structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}}, @@ -496,6 +502,7 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) { store.On("GetNodeID", types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"), structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, ).Once().Return( uint64(0), &structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}}, @@ -505,6 +512,7 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) { store.On("GetNodeID", types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"), structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, ).Once().Return( uint64(0), &structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}}, @@ -547,6 +555,10 @@ func (e fakePayload) HasReadPermission(authz acl.Authorizer) bool { return false } +func (e fakePayload) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("fakePayload does not implement ToSubscriptionEvent") +} + func TestExtractEventPayload(t *testing.T) { t.Run("wrong-topic", func(t *testing.T) { payload, err := ExtractEventPayload(stream.NewCloseSubscriptionEvent([]string{"foo"})) diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 77ac97e77..742ddd1b3 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -133,7 +133,7 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error } // Check the complete register request against the given ACL policy. - _, ns, err := state.NodeServices(nil, args.Node, entMeta) + _, ns, err := state.NodeServices(nil, args.Node, entMeta, args.PeerName) if err != nil { return fmt.Errorf("Node lookup failed: %v", err) } @@ -367,7 +367,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e var ns *structs.NodeService if args.ServiceID != "" { - _, ns, err = state.NodeService(args.Node, args.ServiceID, &args.EnterpriseMeta) + _, ns, err = state.NodeService(args.Node, args.ServiceID, &args.EnterpriseMeta, args.PeerName) if err != nil { return fmt.Errorf("Service lookup failed: %v", err) } @@ -375,7 +375,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e var nc *structs.HealthCheck if args.CheckID != "" { - _, nc, err = state.NodeCheck(args.Node, args.CheckID, &args.EnterpriseMeta) + _, nc, err = state.NodeCheck(args.Node, args.CheckID, &args.EnterpriseMeta, args.PeerName) if err != nil { return fmt.Errorf("Check lookup failed: %v", err) } @@ -486,9 +486,9 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde func(ws memdb.WatchSet, state *state.Store) error { var err error if len(args.NodeMetaFilters) > 0 { - reply.Index, reply.Nodes, err = state.NodesByMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta) + reply.Index, reply.Nodes, err = state.NodesByMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) } else { - reply.Index, reply.Nodes, err = state.Nodes(ws, &args.EnterpriseMeta) + reply.Index, reply.Nodes, err = state.Nodes(ws, &args.EnterpriseMeta, args.PeerName) } if err != nil { return err @@ -546,9 +546,9 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I func(ws memdb.WatchSet, state *state.Store) error { var err error if len(args.NodeMetaFilters) > 0 { - reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta) + reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) } else { - reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta) + reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName) } if err != nil { return err @@ -584,7 +584,7 @@ func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.In &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, services, err := state.ServiceList(ws, &args.EnterpriseMeta) + index, services, err := state.ServiceList(ws, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -611,13 +611,13 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru switch { case args.Connect: f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) { - return s.ConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) + return s.ConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName) } default: f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) { if args.ServiceAddress != "" { - return s.ServiceAddressNodes(ws, args.ServiceAddress, &args.EnterpriseMeta) + return s.ServiceAddressNodes(ws, args.ServiceAddress, &args.EnterpriseMeta, args.PeerName) } if args.TagFilter { @@ -630,10 +630,10 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru tags = []string{args.ServiceTag} } - return s.ServiceTagNodes(ws, args.ServiceName, tags, &args.EnterpriseMeta) + return s.ServiceTagNodes(ws, args.ServiceName, tags, &args.EnterpriseMeta, args.PeerName) } - return s.ServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) + return s.ServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName) } } @@ -768,7 +768,7 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, services, err := state.NodeServices(ws, args.Node, &args.EnterpriseMeta) + index, services, err := state.NodeServices(ws, args.Node, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -824,7 +824,7 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, services, err := state.NodeServiceList(ws, args.Node, &args.EnterpriseMeta) + index, services, err := state.NodeServiceList(ws, args.Node, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } diff --git a/agent/consul/client_test.go b/agent/consul/client_test.go index 5c35e3f33..d8f0fbd4d 100644 --- a/agent/consul/client_test.go +++ b/agent/consul/client_test.go @@ -510,7 +510,7 @@ func newDefaultDeps(t *testing.T, c *Config) Deps { logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ Name: c.NodeName, - Level: testutil.TestLogLevel, + Level: hclog.Trace, Output: testutil.NewLogBuffer(t), }) diff --git a/agent/consul/fsm/commands_oss.go b/agent/consul/fsm/commands_oss.go index b07d12c14..861bffdfd 100644 --- a/agent/consul/fsm/commands_oss.go +++ b/agent/consul/fsm/commands_oss.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/proto/pbpeering" ) var CommandsSummaries = []prometheus.SummaryDefinition{ @@ -93,6 +94,10 @@ var CommandsSummaries = []prometheus.SummaryDefinition{ Name: []string{"fsm", "system_metadata"}, Help: "Measures the time it takes to apply a system metadata operation to the FSM.", }, + { + Name: []string{"fsm", "peering"}, + Help: "Measures the time it takes to apply a peering operation to the FSM.", + }, // TODO(kit): We generate the config-entry fsm summaries by reading off of the request. It is // possible to statically declare these when we know all of the names, but I didn't get to it // in this patch. Config-entries are known though and we should add these in the future. @@ -131,6 +136,11 @@ func init() { registerCommand(structs.ACLAuthMethodDeleteRequestType, (*FSM).applyACLAuthMethodDeleteOperation) registerCommand(structs.FederationStateRequestType, (*FSM).applyFederationStateOperation) registerCommand(structs.SystemMetadataRequestType, (*FSM).applySystemMetadataOperation) + registerCommand(structs.PeeringWriteType, (*FSM).applyPeeringWrite) + registerCommand(structs.PeeringDeleteType, (*FSM).applyPeeringDelete) + registerCommand(structs.PeeringTerminateByIDType, (*FSM).applyPeeringTerminate) + registerCommand(structs.PeeringTrustBundleWriteType, (*FSM).applyPeeringTrustBundleWrite) + registerCommand(structs.PeeringTrustBundleDeleteType, (*FSM).applyPeeringTrustBundleDelete) } func (c *FSM) applyRegister(buf []byte, index uint64) interface{} { @@ -159,17 +169,17 @@ func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} { // here is also baked into vetDeregisterWithACL() in acl.go, so if you // make changes here, be sure to also adjust the code over there. if req.ServiceID != "" { - if err := c.state.DeleteService(index, req.Node, req.ServiceID, &req.EnterpriseMeta); err != nil { + if err := c.state.DeleteService(index, req.Node, req.ServiceID, &req.EnterpriseMeta, req.PeerName); err != nil { c.logger.Warn("DeleteNodeService failed", "error", err) return err } } else if req.CheckID != "" { - if err := c.state.DeleteCheck(index, req.Node, req.CheckID, &req.EnterpriseMeta); err != nil { + if err := c.state.DeleteCheck(index, req.Node, req.CheckID, &req.EnterpriseMeta, req.PeerName); err != nil { c.logger.Warn("DeleteNodeCheck failed", "error", err) return err } } else { - if err := c.state.DeleteNode(index, req.Node, &req.EnterpriseMeta); err != nil { + if err := c.state.DeleteNode(index, req.Node, &req.EnterpriseMeta, req.PeerName); err != nil { c.logger.Warn("DeleteNode failed", "error", err) return err } @@ -679,3 +689,73 @@ func (c *FSM) applySystemMetadataOperation(buf []byte, index uint64) interface{} return fmt.Errorf("invalid system metadata operation type: %v", req.Op) } } + +func (c *FSM) applyPeeringWrite(buf []byte, index uint64) interface{} { + var req pbpeering.PeeringWriteRequest + if err := structs.DecodeProto(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode peering write request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering"}, time.Now(), + []metrics.Label{{Name: "op", Value: "write"}}) + + return c.state.PeeringWrite(index, req.Peering) +} + +// TODO(peering): replace with deferred deletion since this operation +// should involve cleanup of data associated with the peering. +func (c *FSM) applyPeeringDelete(buf []byte, index uint64) interface{} { + var req pbpeering.PeeringDeleteRequest + if err := structs.DecodeProto(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode peering delete request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering"}, time.Now(), + []metrics.Label{{Name: "op", Value: "delete"}}) + + q := state.Query{ + Value: req.Name, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + } + return c.state.PeeringDelete(index, q) +} + +func (c *FSM) applyPeeringTerminate(buf []byte, index uint64) interface{} { + var req pbpeering.PeeringTerminateByIDRequest + if err := structs.DecodeProto(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode peering delete request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering"}, time.Now(), + []metrics.Label{{Name: "op", Value: "terminate"}}) + + return c.state.PeeringTerminateByID(index, req.ID) +} + +func (c *FSM) applyPeeringTrustBundleWrite(buf []byte, index uint64) interface{} { + var req pbpeering.PeeringTrustBundleWriteRequest + if err := structs.DecodeProto(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode peering trust bundle write request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering_trust_bundle"}, time.Now(), + []metrics.Label{{Name: "op", Value: "write"}}) + + return c.state.PeeringTrustBundleWrite(index, req.PeeringTrustBundle) +} + +func (c *FSM) applyPeeringTrustBundleDelete(buf []byte, index uint64) interface{} { + var req pbpeering.PeeringTrustBundleDeleteRequest + if err := structs.DecodeProto(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode peering trust bundle delete request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering_trust_bundle"}, time.Now(), + []metrics.Label{{Name: "op", Value: "delete"}}) + + q := state.Query{ + Value: req.Name, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + } + return c.state.PeeringTrustBundleDelete(index, q) +} diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index 27a21c871..061c4a9cc 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -69,7 +69,7 @@ func TestFSM_RegisterNode(t *testing.T) { } // Verify we are registered - _, node, err := fsm.state.GetNode("foo", nil) + _, node, err := fsm.state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -81,7 +81,7 @@ func TestFSM_RegisterNode(t *testing.T) { } // Verify service registered - _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -128,7 +128,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) { } // Verify we are registered - _, node, err := fsm.state.GetNode("foo", nil) + _, node, err := fsm.state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -137,7 +137,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) { } // Verify service registered - _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -146,7 +146,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) { } // Verify check - _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -200,7 +200,7 @@ func TestFSM_DeregisterService(t *testing.T) { } // Verify we are registered - _, node, err := fsm.state.GetNode("foo", nil) + _, node, err := fsm.state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -209,7 +209,7 @@ func TestFSM_DeregisterService(t *testing.T) { } // Verify service not registered - _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -263,7 +263,7 @@ func TestFSM_DeregisterCheck(t *testing.T) { } // Verify we are registered - _, node, err := fsm.state.GetNode("foo", nil) + _, node, err := fsm.state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -272,7 +272,7 @@ func TestFSM_DeregisterCheck(t *testing.T) { } // Verify check not registered - _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -332,7 +332,7 @@ func TestFSM_DeregisterNode(t *testing.T) { } // Verify we are not registered - _, node, err := fsm.state.GetNode("foo", nil) + _, node, err := fsm.state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -341,7 +341,7 @@ func TestFSM_DeregisterNode(t *testing.T) { } // Verify service not registered - _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -350,7 +350,7 @@ func TestFSM_DeregisterNode(t *testing.T) { } // Verify checks not registered - _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -1468,7 +1468,7 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) { // Verify we are not registered for i := 0; i < 10; i++ { - _, node, err := fsm.state.GetNode(fmt.Sprintf("foo%d", i), nil) + _, node, err := fsm.state.GetNode(fmt.Sprintf("foo%d", i), nil, "") require.NoError(t, err) assert.Nil(t, node) } @@ -1491,7 +1491,7 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) { // Verify we are still not registered for i := 0; i < 10; i++ { - _, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil) + _, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil, "") require.NoError(t, err) assert.Nil(t, node) } @@ -1515,19 +1515,19 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) { // Verify we are registered for i := 0; i < 10; i++ { - _, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil) + _, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil, "") require.NoError(t, err) assert.NotNil(t, node) // Verify service registered - _, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i), structs.DefaultEnterpriseMetaInDefaultPartition()) + _, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i), structs.DefaultEnterpriseMetaInDefaultPartition(), "") require.NoError(t, err) require.NotNil(t, services) _, ok := services.Services["db"] assert.True(t, ok) // Verify check - _, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i), nil) + _, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i), nil, "") require.NoError(t, err) require.NotNil(t, checks) assert.Equal(t, string(checks[0].CheckID), "db") diff --git a/agent/consul/fsm/snapshot_oss.go b/agent/consul/fsm/snapshot_oss.go index 48dea223e..3ee4c8558 100644 --- a/agent/consul/fsm/snapshot_oss.go +++ b/agent/consul/fsm/snapshot_oss.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" ) func init() { @@ -35,6 +36,8 @@ func init() { registerRestorer(structs.SystemMetadataRequestType, restoreSystemMetadata) registerRestorer(structs.ServiceVirtualIPRequestType, restoreServiceVirtualIP) registerRestorer(structs.FreeVirtualIPRequestType, restoreFreeVirtualIP) + registerRestorer(structs.PeeringWriteType, restorePeering) + registerRestorer(structs.PeeringTrustBundleWriteType, restorePeeringTrustBundle) } func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) error { @@ -86,6 +89,12 @@ func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) err if err := s.persistIndex(sink, encoder); err != nil { return err } + if err := s.persistPeerings(sink, encoder); err != nil { + return err + } + if err := s.persistPeeringTrustBundles(sink, encoder); err != nil { + return err + } return nil } @@ -112,6 +121,7 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink, NodeMeta: n.Meta, RaftIndex: n.RaftIndex, EnterpriseMeta: *nodeEntMeta, + PeerName: n.PeerName, } // Register the node itself @@ -123,7 +133,7 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink, } // Register each service this node has - services, err := s.state.Services(n.Node, nodeEntMeta) + services, err := s.state.Services(n.Node, nodeEntMeta, n.PeerName) if err != nil { return err } @@ -139,7 +149,7 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink, // Register each check this node has req.Service = nil - checks, err := s.state.Checks(n.Node, nodeEntMeta) + checks, err := s.state.Checks(n.Node, nodeEntMeta, n.PeerName) if err != nil { return err } @@ -161,7 +171,6 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink, if err != nil { return err } - // TODO(partitions) for coord := coords.Next(); coord != nil; coord = coords.Next() { if _, err := sink.Write([]byte{byte(structs.CoordinateBatchUpdateType)}); err != nil { return err @@ -547,6 +556,42 @@ func (s *snapshot) persistVirtualIPs(sink raft.SnapshotSink, encoder *codec.Enco return nil } +func (s *snapshot) persistPeerings(sink raft.SnapshotSink, encoder *codec.Encoder) error { + peerings, err := s.state.Peerings() + if err != nil { + return err + } + + for entry := peerings.Next(); entry != nil; entry = peerings.Next() { + if _, err := sink.Write([]byte{byte(structs.PeeringWriteType)}); err != nil { + return err + } + if err := encoder.Encode(entry.(*pbpeering.Peering)); err != nil { + return err + } + } + + return nil +} + +func (s *snapshot) persistPeeringTrustBundles(sink raft.SnapshotSink, encoder *codec.Encoder) error { + ptbs, err := s.state.PeeringTrustBundles() + if err != nil { + return err + } + + for entry := ptbs.Next(); entry != nil; entry = ptbs.Next() { + if _, err := sink.Write([]byte{byte(structs.PeeringTrustBundleWriteType)}); err != nil { + return err + } + if err := encoder.Encode(entry.(*pbpeering.PeeringTrustBundle)); err != nil { + return err + } + } + + return nil +} + func restoreRegistration(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { var req structs.RegisterRequest if err := decoder.Decode(&req); err != nil { @@ -849,3 +894,25 @@ func restoreFreeVirtualIP(header *SnapshotHeader, restore *state.Restore, decode } return nil } + +func restorePeering(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { + var req pbpeering.Peering + if err := decoder.Decode(&req); err != nil { + return err + } + if err := restore.Peering(&req); err != nil { + return err + } + return nil +} + +func restorePeeringTrustBundle(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { + var req pbpeering.PeeringTrustBundle + if err := decoder.Decode(&req); err != nil { + return err + } + if err := restore.PeeringTrustBundle(&req); err != nil { + return err + } + return nil +} diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index c75bbc197..558abf4be 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib/stringslice" + "github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/sdk/testutil" ) @@ -473,6 +474,18 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Equal(t, expect[i], sn.Service.Name) } + // Peerings + require.NoError(t, fsm.state.PeeringWrite(31, &pbpeering.Peering{ + Name: "baz", + })) + + // Peering Trust Bundles + require.NoError(t, fsm.state.PeeringTrustBundleWrite(32, &pbpeering.PeeringTrustBundle{ + TrustDomain: "qux.com", + PeerName: "qux", + RootPEMs: []string{"qux certificate bundle"}, + })) + // Snapshot snap, err := fsm.Snapshot() require.NoError(t, err) @@ -528,7 +541,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.NoError(t, fsm2.Restore(sink)) // Verify the contents - _, nodes, err := fsm2.state.Nodes(nil, nil) + _, nodes, err := fsm2.state.Nodes(nil, nil, "") require.NoError(t, err) require.Len(t, nodes, 2, "incorect number of nodes: %v", nodes) @@ -556,7 +569,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Equal(t, uint64(1), nodes[1].CreateIndex) require.Equal(t, uint64(23), nodes[1].ModifyIndex) - _, fooSrv, err := fsm2.state.NodeServices(nil, "foo", nil) + _, fooSrv, err := fsm2.state.NodeServices(nil, "foo", nil, "") require.NoError(t, err) require.Len(t, fooSrv.Services, 4) require.Contains(t, fooSrv.Services["db"].Tags, "primary") @@ -569,7 +582,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Equal(t, uint64(3), fooSrv.Services["web"].CreateIndex) require.Equal(t, uint64(3), fooSrv.Services["web"].ModifyIndex) - _, checks, err := fsm2.state.NodeChecks(nil, "foo", nil) + _, checks, err := fsm2.state.NodeChecks(nil, "foo", nil, "") require.NoError(t, err) require.Len(t, checks, 1) require.Equal(t, "foo", checks[0].Node) @@ -768,6 +781,27 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Equal(t, expect[i], sn.Service.Name) } + // Verify peering is restored + idx, prngRestored, err := fsm2.state.PeeringRead(nil, state.Query{ + Value: "baz", + }) + require.NoError(t, err) + require.Equal(t, uint64(31), idx) + require.NotNil(t, prngRestored) + require.Equal(t, "baz", prngRestored.Name) + + // Verify peering trust bundle is restored + idx, ptbRestored, err := fsm2.state.PeeringTrustBundleRead(nil, state.Query{ + Value: "qux", + }) + require.NoError(t, err) + require.Equal(t, uint64(32), idx) + require.NotNil(t, ptbRestored) + require.Equal(t, "qux.com", ptbRestored.TrustDomain) + require.Equal(t, "qux", ptbRestored.PeerName) + require.Len(t, ptbRestored.RootPEMs, 1) + require.Equal(t, "qux certificate bundle", ptbRestored.RootPEMs[0]) + // Snapshot snap, err = fsm2.Snapshot() require.NoError(t, err) @@ -821,7 +855,7 @@ func TestFSM_BadRestore_OSS(t *testing.T) { require.Error(t, fsm.Restore(sink)) // Verify the contents didn't get corrupted. - _, nodes, err := fsm.state.Nodes(nil, nil) + _, nodes, err := fsm.state.Nodes(nil, nil, "") require.NoError(t, err) require.Len(t, nodes, 1) require.Equal(t, "foo", nodes[0].Node) diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index f9268c21c..60dc968c5 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -47,9 +47,9 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest, var checks structs.HealthChecks var err error if len(args.NodeMetaFilters) > 0 { - index, checks, err = state.ChecksInStateByNodeMeta(ws, args.State, args.NodeMetaFilters, &args.EnterpriseMeta) + index, checks, err = state.ChecksInStateByNodeMeta(ws, args.State, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) } else { - index, checks, err = state.ChecksInState(ws, args.State, &args.EnterpriseMeta) + index, checks, err = state.ChecksInState(ws, args.State, &args.EnterpriseMeta, args.PeerName) } if err != nil { return err @@ -98,7 +98,7 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest, &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, checks, err := state.NodeChecks(ws, args.Node, &args.EnterpriseMeta) + index, checks, err := state.NodeChecks(ws, args.Node, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -157,9 +157,9 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest, var checks structs.HealthChecks var err error if len(args.NodeMetaFilters) > 0 { - index, checks, err = state.ServiceChecksByNodeMeta(ws, args.ServiceName, args.NodeMetaFilters, &args.EnterpriseMeta) + index, checks, err = state.ServiceChecksByNodeMeta(ws, args.ServiceName, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) } else { - index, checks, err = state.ServiceChecks(ws, args.ServiceName, &args.EnterpriseMeta) + index, checks, err = state.ServiceChecks(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName) } if err != nil { return err @@ -304,7 +304,7 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc // can be used by the ServiceNodes endpoint. func (h *Health) serviceNodesConnect(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { - return s.CheckConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) + return s.CheckConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName) } func (h *Health) serviceNodesIngress(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { @@ -317,11 +317,11 @@ func (h *Health) serviceNodesTagFilter(ws memdb.WatchSet, s *state.Store, args * // Agents < v1.3.0 populate the ServiceTag field. In this case, // use ServiceTag instead of the ServiceTags field. if args.ServiceTag != "" { - return s.CheckServiceTagNodes(ws, args.ServiceName, []string{args.ServiceTag}, &args.EnterpriseMeta) + return s.CheckServiceTagNodes(ws, args.ServiceName, []string{args.ServiceTag}, &args.EnterpriseMeta, args.PeerName) } - return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTags, &args.EnterpriseMeta) + return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTags, &args.EnterpriseMeta, args.PeerName) } func (h *Health) serviceNodesDefault(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { - return s.CheckServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) + return s.CheckServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName) } diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index 4193f7fee..c3ebab97c 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/lib/stringslice" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/types" @@ -558,124 +557,109 @@ func TestHealth_ServiceNodes(t *testing.T) { } t.Parallel() - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() + _, s1 := testServer(t) codec := rpcClient(t, s1) - defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + waitForLeaderEstablishment(t, s1) - arg := structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - ID: "db", - Service: "db", - Tags: []string{"primary"}, - }, - Check: &structs.HealthCheck{ - Name: "db connect", - Status: api.HealthPassing, - ServiceID: "db", - }, - } - var out struct{} - if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil { - t.Fatalf("err: %v", err) - } + testingPeerNames := []string{"", "my-peer"} - arg = structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.2", - Service: &structs.NodeService{ - ID: "db", - Service: "db", - Tags: []string{"replica"}, - }, - Check: &structs.HealthCheck{ - Name: "db connect", - Status: api.HealthWarning, - ServiceID: "db", - }, - } - if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil { - t.Fatalf("err: %v", err) - } - - var out2 structs.IndexedCheckServiceNodes - req := structs.ServiceSpecificRequest{ - Datacenter: "dc1", - ServiceName: "db", - ServiceTags: []string{"primary"}, - TagFilter: false, - } - if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil { - t.Fatalf("err: %v", err) - } - - nodes := out2.Nodes - if len(nodes) != 2 { - t.Fatalf("Bad: %v", nodes) - } - if nodes[0].Node.Node != "bar" { - t.Fatalf("Bad: %v", nodes[0]) - } - if nodes[1].Node.Node != "foo" { - t.Fatalf("Bad: %v", nodes[1]) - } - if !stringslice.Contains(nodes[0].Service.Tags, "replica") { - t.Fatalf("Bad: %v", nodes[0]) - } - if !stringslice.Contains(nodes[1].Service.Tags, "primary") { - t.Fatalf("Bad: %v", nodes[1]) - } - if nodes[0].Checks[0].Status != api.HealthWarning { - t.Fatalf("Bad: %v", nodes[0]) - } - if nodes[1].Checks[0].Status != api.HealthPassing { - t.Fatalf("Bad: %v", nodes[1]) - } - - // Same should still work for <1.3 RPCs with singular tags - // DEPRECATED (singular-service-tag) - remove this when backwards RPC compat - // with 1.2.x is not required. - { - var out2 structs.IndexedCheckServiceNodes - req := structs.ServiceSpecificRequest{ - Datacenter: "dc1", - ServiceName: "db", - ServiceTag: "primary", - TagFilter: false, - } - if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil { - t.Fatalf("err: %v", err) + // TODO(peering): will have to seed this data differently in the future + for _, peerName := range testingPeerNames { + arg := structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + PeerName: peerName, + Service: &structs.NodeService{ + ID: "db", + Service: "db", + Tags: []string{"primary"}, + PeerName: peerName, + }, + Check: &structs.HealthCheck{ + Name: "db connect", + Status: api.HealthPassing, + ServiceID: "db", + PeerName: peerName, + }, } + var out struct{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + arg = structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.2", + PeerName: peerName, + Service: &structs.NodeService{ + ID: "db", + Service: "db", + Tags: []string{"replica"}, + PeerName: peerName, + }, + Check: &structs.HealthCheck{ + Name: "db connect", + Status: api.HealthWarning, + ServiceID: "db", + PeerName: peerName, + }, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + } + + verify := func(t *testing.T, out2 structs.IndexedCheckServiceNodes, peerName string) { nodes := out2.Nodes - if len(nodes) != 2 { - t.Fatalf("Bad: %v", nodes) - } - if nodes[0].Node.Node != "bar" { - t.Fatalf("Bad: %v", nodes[0]) - } - if nodes[1].Node.Node != "foo" { - t.Fatalf("Bad: %v", nodes[1]) - } - if !stringslice.Contains(nodes[0].Service.Tags, "replica") { - t.Fatalf("Bad: %v", nodes[0]) - } - if !stringslice.Contains(nodes[1].Service.Tags, "primary") { - t.Fatalf("Bad: %v", nodes[1]) - } - if nodes[0].Checks[0].Status != api.HealthWarning { - t.Fatalf("Bad: %v", nodes[0]) - } - if nodes[1].Checks[0].Status != api.HealthPassing { - t.Fatalf("Bad: %v", nodes[1]) + require.Len(t, nodes, 2) + require.Equal(t, peerName, nodes[0].Node.PeerName) + require.Equal(t, peerName, nodes[1].Node.PeerName) + require.Equal(t, "bar", nodes[0].Node.Node) + require.Equal(t, "foo", nodes[1].Node.Node) + require.Equal(t, peerName, nodes[0].Service.PeerName) + require.Equal(t, peerName, nodes[1].Service.PeerName) + require.Contains(t, nodes[0].Service.Tags, "replica") + require.Contains(t, nodes[1].Service.Tags, "primary") + require.Equal(t, peerName, nodes[0].Checks[0].PeerName) + require.Equal(t, peerName, nodes[1].Checks[0].PeerName) + require.Equal(t, api.HealthWarning, nodes[0].Checks[0].Status) + require.Equal(t, api.HealthPassing, nodes[1].Checks[0].Status) + } + + for _, peerName := range testingPeerNames { + testName := "peer named " + peerName + if peerName == "" { + testName = "local peer" } + t.Run(testName, func(t *testing.T) { + t.Run("with service tags", func(t *testing.T) { + var out2 structs.IndexedCheckServiceNodes + req := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "db", + ServiceTags: []string{"primary"}, + TagFilter: false, + PeerName: peerName, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2)) + verify(t, out2, peerName) + }) + + // Same should still work for <1.3 RPCs with singular tags + // DEPRECATED (singular-service-tag) - remove this when backwards RPC compat + // with 1.2.x is not required. + t.Run("with legacy service tag", func(t *testing.T) { + var out2 structs.IndexedCheckServiceNodes + req := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "db", + ServiceTag: "primary", + TagFilter: false, + PeerName: peerName, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2)) + verify(t, out2, peerName) + }) + }) } } diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index d78f20046..718002889 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -38,7 +38,7 @@ func (m *Internal) NodeInfo(args *structs.NodeSpecificRequest, &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, dump, err := state.NodeInfo(ws, args.Node, &args.EnterpriseMeta) + index, dump, err := state.NodeInfo(ws, args.Node, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -69,7 +69,7 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest, &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta) + index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -112,7 +112,7 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs. &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { // Get, store, and filter nodes - maxIdx, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta) + maxIdx, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -314,7 +314,7 @@ func (m *Internal) GatewayServiceDump(args *structs.ServiceSpecificRequest, repl // Loop over the gateway <-> serviceName mappings and fetch all service instances for each var result structs.ServiceDump for _, gs := range gatewayServices { - idx, instances, err := state.CheckServiceNodes(ws, gs.Service.Name, &gs.Service.EnterpriseMeta) + idx, instances, err := state.CheckServiceNodes(ws, gs.Service.Name, &gs.Service.EnterpriseMeta, args.PeerName) if err != nil { return err } diff --git a/agent/consul/issue_test.go b/agent/consul/issue_test.go index 516e42ff9..7839be0b9 100644 --- a/agent/consul/issue_test.go +++ b/agent/consul/issue_test.go @@ -62,7 +62,7 @@ func TestHealthCheckRace(t *testing.T) { } // Verify the index - idx, out1, err := state.CheckServiceNodes(nil, "db", nil) + idx, out1, err := state.CheckServiceNodes(nil, "db", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -85,7 +85,7 @@ func TestHealthCheckRace(t *testing.T) { } // Verify the index changed - idx, out2, err := state.CheckServiceNodes(nil, "db", nil) + idx, out2, err := state.CheckServiceNodes(nil, "db", nil, "") if err != nil { t.Fatalf("err: %s", err) } diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 456fbec1e..aedcb032f 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -305,6 +305,8 @@ func (s *Server) establishLeadership(ctx context.Context) error { s.startFederationStateAntiEntropy(ctx) + s.startPeeringStreamSync(ctx) + if err := s.startConnectLeader(ctx); err != nil { return err } @@ -342,6 +344,8 @@ func (s *Server) revokeLeadership() { s.stopACLReplication() + s.stopPeeringStreamSync() + s.stopConnectLeader() s.stopACLTokenReaping() @@ -887,7 +891,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.Ent } state := s.fsm.State() - _, checks, err := state.ChecksInState(nil, api.HealthAny, nodeEntMeta) + _, checks, err := state.ChecksInState(nil, api.HealthAny, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -903,7 +907,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.Ent } // Get the node services, look for ConsulServiceID - _, services, err := state.NodeServices(nil, check.Node, nodeEntMeta) + _, services, err := state.NodeServices(nil, check.Node, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -914,7 +918,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.Ent CHECKS: for _, service := range services.Services { if service.ID == structs.ConsulServiceID { - _, node, err := state.GetNode(check.Node, nodeEntMeta) + _, node, err := state.GetNode(check.Node, nodeEntMeta, check.PeerName) if err != nil { s.logger.Error("Unable to look up node with name", "name", check.Node, "error", err) continue CHECKS @@ -1051,7 +1055,7 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri // Check if the node exists state := s.fsm.State() - _, node, err := state.GetNode(member.Name, nodeEntMeta) + _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -1059,7 +1063,7 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri // Check if the associated service is available if service != nil { match := false - _, services, err := state.NodeServices(nil, member.Name, nodeEntMeta) + _, services, err := state.NodeServices(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -1077,7 +1081,7 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri } // Check if the serfCheck is in the passing state - _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta) + _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -1127,7 +1131,7 @@ func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *acl.Enterpr // Check if the node exists state := s.fsm.State() - _, node, err := state.GetNode(member.Name, nodeEntMeta) + _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -1142,7 +1146,7 @@ func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *acl.Enterpr if node.Address == member.Addr.String() { // Check if the serfCheck is in the critical state - _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta) + _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -1220,7 +1224,7 @@ func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeE // Check if the node does not exist state := s.fsm.State() - _, node, err := state.GetNode(member.Name, nodeEntMeta) + _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } diff --git a/agent/consul/leader_federation_state_ae.go b/agent/consul/leader_federation_state_ae.go index ef6f6378f..6cc0d4ba2 100644 --- a/agent/consul/leader_federation_state_ae.go +++ b/agent/consul/leader_federation_state_ae.go @@ -157,7 +157,7 @@ func (s *Server) fetchFederationStateAntiEntropyDetails( // Fetch our current list of all mesh gateways. entMeta := structs.WildcardEnterpriseMetaInDefaultPartition() - idx2, raw, err := state.ServiceDump(ws, structs.ServiceKindMeshGateway, true, entMeta) + idx2, raw, err := state.ServiceDump(ws, structs.ServiceKindMeshGateway, true, entMeta, structs.DefaultPeerKeyword) if err != nil { return err } diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go new file mode 100644 index 000000000..d1dfc8c43 --- /dev/null +++ b/agent/consul/leader_peering.go @@ -0,0 +1,244 @@ +package consul + +import ( + "container/ring" + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/hashicorp/consul/agent/pool" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +func (s *Server) startPeeringStreamSync(ctx context.Context) { + s.leaderRoutineManager.Start(ctx, peeringStreamsRoutineName, s.runPeeringSync) +} + +func (s *Server) runPeeringSync(ctx context.Context) error { + logger := s.logger.Named("peering-syncer") + cancelFns := make(map[string]context.CancelFunc) + + retryLoopBackoff(ctx, func() error { + if err := s.syncPeeringsAndBlock(ctx, logger, cancelFns); err != nil { + return err + } + return nil + + }, func(err error) { + s.logger.Error("error syncing peering streams from state store", "error", err) + }) + + return nil +} + +func (s *Server) stopPeeringStreamSync() { + // will be a no-op when not started + s.leaderRoutineManager.Stop(peeringStreamsRoutineName) +} + +// syncPeeringsAndBlock is a long-running goroutine that is responsible for watching +// changes to peerings in the state store and managing streams to those peers. +func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger, cancelFns map[string]context.CancelFunc) error { + state := s.fsm.State() + + // Pull the state store contents and set up to block for changes. + ws := memdb.NewWatchSet() + ws.Add(state.AbandonCh()) + ws.Add(ctx.Done()) + + _, peers, err := state.PeeringList(ws, *structs.NodeEnterpriseMetaInPartition(structs.WildcardSpecifier)) + if err != nil { + return err + } + + // TODO(peering) Adjust this debug info. + // Generate a UUID to trace different passes through this function. + seq, err := uuid.GenerateUUID() + if err != nil { + s.logger.Debug("failed to generate sequence uuid while syncing peerings") + } + + logger.Trace("syncing new list of peers", "num_peers", len(peers), "sequence_id", seq) + + // Stored tracks the unique set of peers that should be dialed. + // It is used to reconcile the list of active streams. + stored := make(map[string]struct{}) + + var merr *multierror.Error + + // Create connections and streams to peers in the state store that do not have an active stream. + for _, peer := range peers { + logger.Trace("evaluating stored peer", "peer", peer.Name, "should_dial", peer.ShouldDial(), "sequence_id", seq) + + if !peer.ShouldDial() { + continue + } + + // TODO(peering) Account for deleted peers that are still in the state store + stored[peer.ID] = struct{}{} + + status, found := s.peeringService.StreamStatus(peer.ID) + + // TODO(peering): If there is new peering data and a connected stream, should we tear down the stream? + // If the data in the updated token is bad, the user wouldn't know until the old servers/certs become invalid. + // Alternatively we could do a basic Ping from the initiate peering endpoint to avoid dealing with that here. + if found && status.Connected { + // Nothing to do when we already have an active stream to the peer. + continue + } + logger.Trace("ensuring stream to peer", "peer_id", peer.ID, "sequence_id", seq) + + if cancel, ok := cancelFns[peer.ID]; ok { + // If the peer is known but we're not connected, clean up the retry-er and start over. + // There may be new data in the state store that would enable us to get out of an error state. + logger.Trace("cancelling context to re-establish stream", "peer_id", peer.ID, "sequence_id", seq) + cancel() + } + + if err := s.establishStream(ctx, logger, peer, cancelFns); err != nil { + // TODO(peering): These errors should be reported in the peer status, otherwise they're only in the logs. + // Lockable status isn't available here though. Could report it via the peering.Service? + logger.Error("error establishing peering stream", "peer_id", peer.ID, "error", err) + merr = multierror.Append(merr, err) + + // Continue on errors to avoid one bad peering from blocking the establishment and cleanup of others. + continue + } + } + + logger.Trace("checking connected streams", "streams", s.peeringService.ConnectedStreams(), "sequence_id", seq) + + // Clean up active streams of peerings that were deleted from the state store. + // TODO(peering): This is going to trigger shutting down peerings we generated a token for. Is that OK? + for stream, doneCh := range s.peeringService.ConnectedStreams() { + if _, ok := stored[stream]; ok { + // Active stream is in the state store, nothing to do. + continue + } + + select { + case <-doneCh: + // channel is closed, do nothing to avoid a panic + default: + logger.Trace("tearing down stream for deleted peer", "peer_id", stream, "sequence_id", seq) + close(doneCh) + } + } + + logger.Trace("blocking for changes", "sequence_id", seq) + + // Block for any changes to the state store. + ws.WatchCtx(ctx) + + logger.Trace("unblocked", "sequence_id", seq) + return merr.ErrorOrNil() +} + +func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer *pbpeering.Peering, cancelFns map[string]context.CancelFunc) error { + tlsOption := grpc.WithInsecure() + if len(peer.PeerCAPems) > 0 { + var haveCerts bool + pool := x509.NewCertPool() + for _, pem := range peer.PeerCAPems { + if !pool.AppendCertsFromPEM([]byte(pem)) { + return fmt.Errorf("failed to parse PEM %s", pem) + } + if len(pem) > 0 { + haveCerts = true + } + } + if !haveCerts { + return fmt.Errorf("failed to build cert pool from peer CA pems") + } + cfg := tls.Config{ + ServerName: peer.PeerServerName, + RootCAs: pool, + } + tlsOption = grpc.WithTransportCredentials(credentials.NewTLS(&cfg)) + } + + // Create a ring buffer to cycle through peer addresses in the retry loop below. + buffer := ring.New(len(peer.PeerServerAddresses)) + for _, addr := range peer.PeerServerAddresses { + buffer.Value = addr + buffer = buffer.Next() + } + + logger.Trace("establishing stream to peer", "peer_id", peer.ID) + + retryCtx, cancel := context.WithCancel(ctx) + cancelFns[peer.ID] = cancel + + // Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes. + go retryLoopBackoff(retryCtx, func() error { + // Try a new address on each iteration by advancing the ring buffer on errors. + defer func() { + buffer = buffer.Next() + }() + addr, ok := buffer.Value.(string) + if !ok { + return fmt.Errorf("peer server address type %T is not a string", buffer.Value) + } + + logger.Trace("dialing peer", "peer_id", peer.ID, "addr", addr) + conn, err := grpc.DialContext(retryCtx, addr, + grpc.WithContextDialer(newPeerDialer(addr)), + grpc.WithBlock(), + tlsOption, + ) + if err != nil { + return fmt.Errorf("failed to dial: %w", err) + } + defer conn.Close() + + client := pbpeering.NewPeeringServiceClient(conn) + stream, err := client.StreamResources(retryCtx) + if err != nil { + return err + } + + err = s.peeringService.HandleStream(peer.ID, peer.PeerID, stream) + if err == nil { + // This will cancel the retry-er context, letting us break out of this loop when we want to shut down the stream. + cancel() + } + return err + + }, func(err error) { + // TODO(peering): These errors should be reported in the peer status, otherwise they're only in the logs. + // Lockable status isn't available here though. Could report it via the peering.Service? + logger.Error("error managing peering stream", "peer_id", peer.ID, "error", err) + }) + + return nil +} + +func newPeerDialer(peerAddr string) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (net.Conn, error) { + d := net.Dialer{} + conn, err := d.DialContext(ctx, "tcp", peerAddr) + if err != nil { + return nil, err + } + + // TODO(peering): This is going to need to be revisited. This type uses the TLS settings configured on the agent, but + // for peering we never want mutual TLS because the client peer doesn't share its CA cert. + _, err = conn.Write([]byte{byte(pool.RPCGRPC)}) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil + } +} diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go new file mode 100644 index 000000000..dd79529b3 --- /dev/null +++ b/agent/consul/leader_peering_test.go @@ -0,0 +1,197 @@ +package consul + +import ( + "context" + "encoding/base64" + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/consul/testrpc" +) + +func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // TODO(peering): Configure with TLS + _, s1 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s1.dc1" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + }) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create a peering by generating a token + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + req := pbpeering.GenerateTokenRequest{ + PeerName: "my-peer-s2", + } + resp, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + // S1 should not have a stream tracked for dc2 because s1 generated a token for baz, and therefore needs to wait to be dialed. + time.Sleep(1 * time.Second) + _, found := s1.peeringService.StreamStatus(token.PeerID) + require.False(t, found) + + // Bring up s2 and store s1's token so that it attempts to dial. + _, s2 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s2.dc2" + c.Datacenter = "dc2" + c.PrimaryDatacenter = "dc2" + }) + testrpc.WaitForLeader(t, s2.RPC, "dc2") + + // Simulate a peering initiation event by writing a peering with data from a peering token. + // Eventually the leader in dc2 should dial and connect to the leader in dc1. + p := &pbpeering.Peering{ + Name: "my-peer-s1", + PeerID: token.PeerID, + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p.ShouldDial()) + + // We maintain a pointer to the peering on the write so that we can get the ID without needing to re-query the state store. + require.NoError(t, s2.fsm.State().PeeringWrite(1000, p)) + + retry.Run(t, func(r *retry.R) { + status, found := s2.peeringService.StreamStatus(p.ID) + require.True(r, found) + require.True(r, status.Connected) + }) + + // Delete the peering to trigger the termination sequence + require.NoError(t, s2.fsm.State().PeeringDelete(2000, state.Query{ + Value: "my-peer-s1", + })) + s2.logger.Trace("deleted peering for my-peer-s1") + + retry.Run(t, func(r *retry.R) { + _, found := s2.peeringService.StreamStatus(p.ID) + require.False(r, found) + }) + + // s1 should have also marked the peering as terminated. + retry.Run(t, func(r *retry.R) { + _, peering, err := s1.fsm.State().PeeringRead(nil, state.Query{ + Value: "my-peer-s2", + }) + require.NoError(r, err) + require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State) + }) +} + +func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // TODO(peering): Configure with TLS + _, s1 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s1.dc1" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + }) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create a peering by generating a token + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + req := pbpeering.GenerateTokenRequest{ + PeerName: "my-peer-s2", + } + resp, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + // Bring up s2 and store s1's token so that it attempts to dial. + _, s2 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s2.dc2" + c.Datacenter = "dc2" + c.PrimaryDatacenter = "dc2" + }) + testrpc.WaitForLeader(t, s2.RPC, "dc2") + + // Simulate a peering initiation event by writing a peering with data from a peering token. + // Eventually the leader in dc2 should dial and connect to the leader in dc1. + p := &pbpeering.Peering{ + Name: "my-peer-s1", + PeerID: token.PeerID, + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p.ShouldDial()) + + // We maintain a pointer to the peering on the write so that we can get the ID without needing to re-query the state store. + require.NoError(t, s2.fsm.State().PeeringWrite(1000, p)) + + retry.Run(t, func(r *retry.R) { + status, found := s2.peeringService.StreamStatus(p.ID) + require.True(r, found) + require.True(r, status.Connected) + }) + + // Delete the peering from the server peer to trigger the termination sequence + require.NoError(t, s1.fsm.State().PeeringDelete(2000, state.Query{ + Value: "my-peer-s2", + })) + s2.logger.Trace("deleted peering for my-peer-s1") + + retry.Run(t, func(r *retry.R) { + _, found := s1.peeringService.StreamStatus(p.PeerID) + require.False(r, found) + }) + + // s2 should have received the termination message and updated the peering state + retry.Run(t, func(r *retry.R) { + _, peering, err := s2.fsm.State().PeeringRead(nil, state.Query{ + Value: "my-peer-s1", + }) + require.NoError(r, err) + require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State) + }) +} diff --git a/agent/consul/leader_test.go b/agent/consul/leader_test.go index cb767acf0..c043fa0f5 100644 --- a/agent/consul/leader_test.go +++ b/agent/consul/leader_test.go @@ -51,7 +51,7 @@ func TestLeader_RegisterMember(t *testing.T) { // Client should be registered state := s1.fsm.State() retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -61,7 +61,7 @@ func TestLeader_RegisterMember(t *testing.T) { }) // Should have a check - _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil) + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -80,7 +80,7 @@ func TestLeader_RegisterMember(t *testing.T) { // Server should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(s1.config.NodeName, nil) + _, node, err := state.GetNode(s1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -90,7 +90,7 @@ func TestLeader_RegisterMember(t *testing.T) { }) // Service should be registered - _, services, err := state.NodeServices(nil, s1.config.NodeName, nil) + _, services, err := state.NodeServices(nil, s1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -129,7 +129,7 @@ func TestLeader_FailedMember(t *testing.T) { // Should be registered state := s1.fsm.State() retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -139,7 +139,7 @@ func TestLeader_FailedMember(t *testing.T) { }) // Should have a check - _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil) + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -154,7 +154,7 @@ func TestLeader_FailedMember(t *testing.T) { } retry.Run(t, func(r *retry.R) { - _, checks, err = state.NodeChecks(nil, c1.config.NodeName, nil) + _, checks, err = state.NodeChecks(nil, c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -193,7 +193,7 @@ func TestLeader_LeftMember(t *testing.T) { // Should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") require.NoError(r, err) require.NotNil(r, node, "client not registered") }) @@ -204,7 +204,7 @@ func TestLeader_LeftMember(t *testing.T) { // Should be deregistered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") require.NoError(r, err) require.Nil(r, node, "client still registered") }) @@ -236,7 +236,7 @@ func TestLeader_ReapMember(t *testing.T) { // Should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") require.NoError(r, err) require.NotNil(r, node, "client not registered") }) @@ -257,7 +257,7 @@ func TestLeader_ReapMember(t *testing.T) { // anti-entropy will put it back. reaped := false for start := time.Now(); time.Since(start) < 5*time.Second; { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") require.NoError(t, err) if node == nil { reaped = true @@ -296,7 +296,7 @@ func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) { // Should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(nodeName, nil) + _, node, err := state.GetNode(nodeName, nil, "") require.NoError(r, err) require.NotNil(r, node, "server not registered") }) @@ -318,7 +318,7 @@ func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) { // anti-entropy will put it back if it did get deleted. reaped := false for start := time.Now(); time.Since(start) < 5*time.Second; { - _, node, err := state.GetNode(nodeName, nil) + _, node, err := state.GetNode(nodeName, nil, "") require.NoError(t, err) if node == nil { reaped = true @@ -402,7 +402,7 @@ func TestLeader_CheckServersMeta(t *testing.T) { } // s3 should be registered retry.Run(t, func(r *retry.R) { - _, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta) + _, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta, "") if err != nil { r.Fatalf("err: %v", err) } @@ -438,7 +438,7 @@ func TestLeader_CheckServersMeta(t *testing.T) { if err != nil { r.Fatalf("Unexpected error :%v", err) } - _, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta) + _, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta, "") if err != nil { r.Fatalf("err: %v", err) } @@ -506,7 +506,7 @@ func TestLeader_ReapServer(t *testing.T) { // s3 should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(s3.config.NodeName, nil) + _, node, err := state.GetNode(s3.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -527,7 +527,7 @@ func TestLeader_ReapServer(t *testing.T) { } // s3 should be deregistered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(s3.config.NodeName, nil) + _, node, err := state.GetNode(s3.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -582,7 +582,7 @@ func TestLeader_Reconcile_ReapMember(t *testing.T) { // Node should be gone state := s1.fsm.State() - _, node, err := state.GetNode("no-longer-around", nil) + _, node, err := state.GetNode("no-longer-around", nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -615,7 +615,7 @@ func TestLeader_Reconcile(t *testing.T) { // Should not be registered state := s1.fsm.State() - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -625,7 +625,7 @@ func TestLeader_Reconcile(t *testing.T) { // Should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -657,7 +657,7 @@ func TestLeader_Reconcile_Races(t *testing.T) { state := s1.fsm.State() var nodeAddr string retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -693,7 +693,7 @@ func TestLeader_Reconcile_Races(t *testing.T) { if err := s1.reconcile(); err != nil { t.Fatalf("err: %v", err) } - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -707,7 +707,7 @@ func TestLeader_Reconcile_Races(t *testing.T) { // Fail the member and wait for the health to go critical. c1.Shutdown() retry.Run(t, func(r *retry.R) { - _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil) + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -720,7 +720,7 @@ func TestLeader_Reconcile_Races(t *testing.T) { }) // Make sure the metadata didn't get clobbered. - _, node, err = state.GetNode(c1.config.NodeName, nil) + _, node, err = state.GetNode(c1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -835,7 +835,7 @@ func TestLeader_LeftLeader(t *testing.T) { // Verify the old leader is deregistered state := remain.fsm.State() retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(leader.config.NodeName, nil) + _, node, err := state.GetNode(leader.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -2336,7 +2336,7 @@ func TestLeader_EnableVirtualIPs(t *testing.T) { }) require.NoError(t, err) - _, node, err := state.NodeService("bar", "tgate1", nil) + _, node, err := state.NodeService("bar", "tgate1", nil, "") require.NoError(t, err) sn := structs.ServiceName{Name: "api"} key := structs.ServiceGatewayVirtualIPTag(sn) diff --git a/agent/consul/peering_backend.go b/agent/consul/peering_backend.go new file mode 100644 index 000000000..7e8c698c8 --- /dev/null +++ b/agent/consul/peering_backend.go @@ -0,0 +1,126 @@ +package consul + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "google.golang.org/grpc" + + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/rpc/peering" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +type peeringBackend struct { + srv *Server + connPool GRPCClientConner + apply *peeringApply +} + +var _ peering.Backend = (*peeringBackend)(nil) + +// NewPeeringBackend returns a peering.Backend implementation that is bound to the given server. +func NewPeeringBackend(srv *Server, connPool GRPCClientConner) peering.Backend { + return &peeringBackend{ + srv: srv, + connPool: connPool, + apply: &peeringApply{srv: srv}, + } +} + +func (b *peeringBackend) Forward(info structs.RPCInfo, f func(*grpc.ClientConn) error) (handled bool, err error) { + // Only forward the request if the dc in the request matches the server's datacenter. + if info.RequestDatacenter() != "" && info.RequestDatacenter() != b.srv.config.Datacenter { + return false, fmt.Errorf("requests to generate peering tokens cannot be forwarded to remote datacenters") + } + return b.srv.ForwardGRPC(b.connPool, info, f) +} + +// GetAgentCACertificates gets the server's raw CA data from its TLS Configurator. +func (b *peeringBackend) GetAgentCACertificates() ([]string, error) { + // TODO(peering): handle empty CA pems + return b.srv.tlsConfigurator.ManualCAPems(), nil +} + +// GetServerAddresses looks up server node addresses from the state store. +func (b *peeringBackend) GetServerAddresses() ([]string, error) { + state := b.srv.fsm.State() + _, nodes, err := state.ServiceNodes(nil, "consul", structs.DefaultEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword) + if err != nil { + return nil, err + } + var addrs []string + for _, node := range nodes { + addrs = append(addrs, node.Address+":"+strconv.Itoa(node.ServicePort)) + } + return addrs, nil +} + +// GetServerName returns the SNI to be returned in the peering token data which +// will be used by peers when establishing peering connections over TLS. +func (b *peeringBackend) GetServerName() string { + return b.srv.tlsConfigurator.ServerSNI(b.srv.config.Datacenter, "") +} + +// EncodeToken encodes a peering token as a bas64-encoded representation of JSON (for now). +func (b *peeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) { + jsonToken, err := json.Marshal(tok) + if err != nil { + return nil, fmt.Errorf("failed to marshal token: %w", err) + } + return []byte(base64.StdEncoding.EncodeToString(jsonToken)), nil +} + +// DecodeToken decodes a peering token from a base64-encoded JSON byte array (for now). +func (b *peeringBackend) DecodeToken(tokRaw []byte) (*structs.PeeringToken, error) { + tokJSONRaw, err := base64.StdEncoding.DecodeString(string(tokRaw)) + if err != nil { + return nil, fmt.Errorf("failed to decode token: %w", err) + } + var tok structs.PeeringToken + if err := json.Unmarshal(tokJSONRaw, &tok); err != nil { + return nil, err + } + return &tok, nil +} + +func (s peeringBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) { + return s.srv.publisher.Subscribe(req) +} + +func (b *peeringBackend) Store() peering.Store { + return b.srv.fsm.State() +} + +func (b *peeringBackend) Apply() peering.Apply { + return b.apply +} + +func (b *peeringBackend) EnterpriseCheckPartitions(partition string) error { + return b.enterpriseCheckPartitions(partition) +} + +type peeringApply struct { + srv *Server +} + +func (a *peeringApply) PeeringWrite(req *pbpeering.PeeringWriteRequest) error { + _, err := a.srv.raftApplyProtobuf(structs.PeeringWriteType, req) + return err +} + +func (a *peeringApply) PeeringDelete(req *pbpeering.PeeringDeleteRequest) error { + _, err := a.srv.raftApplyProtobuf(structs.PeeringDeleteType, req) + return err +} + +// TODO(peering): This needs RPC metrics interceptor since it's not triggered by an RPC. +func (a *peeringApply) PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error { + _, err := a.srv.raftApplyProtobuf(structs.PeeringTerminateByIDType, req) + return err +} + +var _ peering.Apply = (*peeringApply)(nil) diff --git a/agent/consul/peering_backend_oss.go b/agent/consul/peering_backend_oss.go new file mode 100644 index 000000000..5f5a117db --- /dev/null +++ b/agent/consul/peering_backend_oss.go @@ -0,0 +1,15 @@ +//go:build !consulent +// +build !consulent + +package consul + +import ( + "fmt" +) + +func (b *peeringBackend) enterpriseCheckPartitions(partition string) error { + if partition != "" { + return fmt.Errorf("Partitions are a Consul Enterprise feature") + } + return nil +} diff --git a/agent/consul/peering_backend_oss_test.go b/agent/consul/peering_backend_oss_test.go new file mode 100644 index 000000000..75decc0a8 --- /dev/null +++ b/agent/consul/peering_backend_oss_test.go @@ -0,0 +1,51 @@ +//go:build !consulent +// +build !consulent + +package consul + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + gogrpc "google.golang.org/grpc" + + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/testrpc" +) + +func TestPeeringBackend_RejectsPartition(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + _, s1 := testServerWithConfig(t, func(c *Config) { + c.Datacenter = "dc1" + c.Bootstrap = true + }) + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // make a grpc client to dial s1 directly + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + t.Cleanup(cancel) + + conn, err := gogrpc.DialContext(ctx, s1.config.RPCAddr.String(), + gogrpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), + gogrpc.WithInsecure(), + gogrpc.WithBlock()) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + req := pbpeering.GenerateTokenRequest{ + Datacenter: "dc1", + Partition: "test", + } + _, err = peeringClient.GenerateToken(ctx, &req) + require.Error(t, err) + require.Contains(t, err.Error(), "Partitions are a Consul Enterprise feature") +} diff --git a/agent/consul/peering_backend_test.go b/agent/consul/peering_backend_test.go new file mode 100644 index 000000000..eb89cd531 --- /dev/null +++ b/agent/consul/peering_backend_test.go @@ -0,0 +1,115 @@ +package consul + +import ( + "context" + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + gogrpc "google.golang.org/grpc" + + "github.com/hashicorp/consul/agent/pool" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/testrpc" +) + +func TestPeeringBackend_DoesNotForwardToDifferentDC(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + _, s1 := testServerDC(t, "dc1") + _, s2 := testServerDC(t, "dc2") + + joinWAN(t, s2, s1) + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s2.RPC, "dc2") + + // make a grpc client to dial s2 directly + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err := gogrpc.DialContext(ctx, s2.config.RPCAddr.String(), + gogrpc.WithContextDialer(newServerDialer(s2.config.RPCAddr.String())), + gogrpc.WithInsecure(), + gogrpc.WithBlock()) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + // GenerateToken request should fail against dc1, because we are dialing dc2. The GenerateToken request should never be forwarded across datacenters. + req := pbpeering.GenerateTokenRequest{ + PeerName: "peer1-usw1", + Datacenter: "dc1", + } + _, err = peeringClient.GenerateToken(ctx, &req) + require.Error(t, err) + require.Contains(t, err.Error(), "requests to generate peering tokens cannot be forwarded to remote datacenters") +} + +func TestPeeringBackend_ForwardToLeader(t *testing.T) { + t.Parallel() + + _, conf1 := testServerConfig(t) + server1, err := newServer(t, conf1) + require.NoError(t, err) + + _, conf2 := testServerConfig(t) + conf2.Bootstrap = false + server2, err := newServer(t, conf2) + require.NoError(t, err) + + // Join a 2nd server (not the leader) + testrpc.WaitForLeader(t, server1.RPC, "dc1") + joinLAN(t, server2, server1) + testrpc.WaitForLeader(t, server2.RPC, "dc1") + + // Make a write call to server2 and make sure it gets forwarded to server1 + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + // Dial server2 directly + conn, err := gogrpc.DialContext(ctx, server2.config.RPCAddr.String(), + gogrpc.WithContextDialer(newServerDialer(server2.config.RPCAddr.String())), + gogrpc.WithInsecure(), + gogrpc.WithBlock()) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + runStep(t, "forward a write", func(t *testing.T) { + // Do the grpc Write call to server2 + req := pbpeering.GenerateTokenRequest{ + Datacenter: "dc1", + PeerName: "foo", + } + _, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + // TODO(peering) check that state store is updated on leader, indicating a forwarded request after state store + // is implemented. + }) +} + +func newServerDialer(serverAddr string) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (net.Conn, error) { + d := net.Dialer{} + conn, err := d.DialContext(ctx, "tcp", serverAddr) + if err != nil { + return nil, err + } + + _, err = conn.Write([]byte{byte(pool.RPCGRPC)}) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil + } +} diff --git a/agent/consul/prepared_query/walk_test.go b/agent/consul/prepared_query/walk_test.go index 2c6920afd..e45aa3a1e 100644 --- a/agent/consul/prepared_query/walk_test.go +++ b/agent/consul/prepared_query/walk_test.go @@ -3,12 +3,12 @@ package prepared_query import ( "fmt" "reflect" + "sort" "testing" - "sort" + "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/structs" - "github.com/stretchr/testify/require" ) func TestWalk_ServiceQuery(t *testing.T) { @@ -42,6 +42,7 @@ func TestWalk_ServiceQuery(t *testing.T) { ".Tags[0]:tag1", ".Tags[1]:tag2", ".Tags[2]:tag3", + ".PeerName:", } expected = append(expected, entMetaWalkFields...) sort.Strings(expected) diff --git a/agent/consul/prepared_query_endpoint.go b/agent/consul/prepared_query_endpoint.go index 15f818171..31890449c 100644 --- a/agent/consul/prepared_query_endpoint.go +++ b/agent/consul/prepared_query_endpoint.go @@ -404,7 +404,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, qs.Node = args.Agent.Node } else if qs.Node == "_ip" { if args.Source.Ip != "" { - _, nodes, err := state.Nodes(nil, structs.NodeEnterpriseMetaInDefaultPartition()) + _, nodes, err := state.Nodes(nil, structs.NodeEnterpriseMetaInDefaultPartition(), structs.TODOPeerKeyword) if err != nil { return err } @@ -534,7 +534,7 @@ func (p *PreparedQuery) execute(query *structs.PreparedQuery, f = state.CheckConnectServiceNodes } - _, nodes, err := f(nil, query.Service.Service, &query.Service.EnterpriseMeta) + _, nodes, err := f(nil, query.Service.Service, &query.Service.EnterpriseMeta, query.Service.PeerName) if err != nil { return err } diff --git a/agent/consul/server.go b/agent/consul/server.go index 163b1fe38..6147df3bd 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -16,24 +16,20 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/consul/agent/rpc/middleware" - - "github.com/hashicorp/go-version" - "go.etcd.io/bbolt" - "github.com/armon/go-metrics" + "github.com/hashicorp/consul-net-rpc/net/rpc" connlimit "github.com/hashicorp/go-connlimit" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-version" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" raftboltdb "github.com/hashicorp/raft-boltdb/v2" "github.com/hashicorp/serf/serf" + "go.etcd.io/bbolt" "golang.org/x/time/rate" "google.golang.org/grpc" - "github.com/hashicorp/consul-net-rpc/net/rpc" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/authmethod" "github.com/hashicorp/consul/agent/consul/authmethod/ssoauth" @@ -50,11 +46,14 @@ import ( "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" + "github.com/hashicorp/consul/agent/rpc/middleware" + "github.com/hashicorp/consul/agent/rpc/peering" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/routine" "github.com/hashicorp/consul/logging" + "github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" @@ -124,6 +123,7 @@ const ( intermediateCertRenewWatchRoutineName = "intermediate cert renew watch" backgroundCAInitializationRoutineName = "CA initialization" virtualIPCheckRoutineName = "virtual IP version check" + peeringStreamsRoutineName = "streaming peering resources" ) var ( @@ -356,6 +356,9 @@ type Server struct { // this into the Deps struct and created it much earlier on. publisher *stream.EventPublisher + // peering is a service used to handle peering streams. + peeringService *peering.Service + // embedded struct to hold all the enterprise specific data EnterpriseServer } @@ -730,12 +733,19 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve } func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler { + p := peering.NewService( + deps.Logger.Named("grpc-api.peering"), + NewPeeringBackend(s, deps.GRPCConnPool), + ) + s.peeringService = p + register := func(srv *grpc.Server) { if config.RPCConfig.EnableStreaming { pbsubscribe.RegisterStateChangeSubscriptionServer(srv, subscribe.NewServer( &subscribeBackend{srv: s, connPool: deps.GRPCConnPool}, deps.Logger.Named("grpc-api.subscription"))) } + pbpeering.RegisterPeeringServiceServer(srv, s.peeringService) s.registerEnterpriseGRPCServices(deps, srv) // Note: this public gRPC service is also exposed on the private server to @@ -783,7 +793,7 @@ func (s *Server) setupRaft() error { }() var serverAddressProvider raft.ServerAddressProvider = nil - if s.config.RaftConfig.ProtocolVersion >= 3 { //ServerAddressProvider needs server ids to work correctly, which is only supported in protocol version 3 or higher + if s.config.RaftConfig.ProtocolVersion >= 3 { // ServerAddressProvider needs server ids to work correctly, which is only supported in protocol version 3 or higher serverAddressProvider = s.serverLookup } diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 31972b5cc..7b0dafb2b 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -237,6 +237,8 @@ func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *S r.Fatalf("err: %v", err) } }) + t.Cleanup(func() { srv.Shutdown() }) + return dir, srv } diff --git a/agent/consul/state/acl_schema.go b/agent/consul/state/acl_schema.go index f2b77dcbf..5b9529bbd 100644 --- a/agent/consul/state/acl_schema.go +++ b/agent/consul/state/acl_schema.go @@ -239,6 +239,26 @@ func prefixIndexFromUUIDQuery(arg interface{}) ([]byte, error) { return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) } +func prefixIndexFromUUIDWithPeerQuery(arg interface{}) ([]byte, error) { + switch v := arg.(type) { + case Query: + var b indexBuilder + peername := v.PeerOrEmpty() + if peername == "" { + b.String(structs.LocalPeerKeyword) + } else { + b.String(strings.ToLower(peername)) + } + uuidBytes, err := variableLengthUUIDStringToBytes(v.Value) + if err != nil { + return nil, err + } + return append(b.Bytes(), uuidBytes...), nil + } + + return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) +} + func multiIndexPolicyFromACLRole(raw interface{}) ([][]byte, error) { role, ok := raw.(*structs.ACLRole) if !ok { diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 052132018..32b363355 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -59,7 +59,7 @@ func (s *Snapshot) Nodes() (memdb.ResultIterator, error) { // Services is used to pull the full list of services for a given node for use // during snapshots. -func (s *Snapshot) Services(node string, entMeta *acl.EnterpriseMeta) (memdb.ResultIterator, error) { +func (s *Snapshot) Services(node string, entMeta *acl.EnterpriseMeta, peerName string) (memdb.ResultIterator, error) { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.NodeEnterpriseMetaInDefaultPartition() @@ -67,12 +67,13 @@ func (s *Snapshot) Services(node string, entMeta *acl.EnterpriseMeta) (memdb.Res return s.tx.Get(tableServices, indexNode, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) } // Checks is used to pull the full list of checks for a given node for use // during snapshots. -func (s *Snapshot) Checks(node string, entMeta *acl.EnterpriseMeta) (memdb.ResultIterator, error) { +func (s *Snapshot) Checks(node string, entMeta *acl.EnterpriseMeta, peerName string) (memdb.ResultIterator, error) { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.NodeEnterpriseMetaInDefaultPartition() @@ -80,6 +81,7 @@ func (s *Snapshot) Checks(node string, entMeta *acl.EnterpriseMeta) (memdb.Resul return s.tx.Get(tableChecks, indexNode, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) } @@ -136,8 +138,12 @@ func (s *Store) ensureCheckIfNodeMatches( preserveIndexes bool, node string, nodePartition string, + nodePeerName string, check *structs.HealthCheck, ) error { + if !strings.EqualFold(check.PeerName, nodePeerName) { + return fmt.Errorf("check peer name %q does not match node peer name %q", check.PeerName, nodePeerName) + } if !strings.EqualFold(check.Node, node) || !acl.EqualPartitions(nodePartition, check.PartitionOrDefault()) { return fmt.Errorf("check node %q does not match node %q", printNodeName(check.Node, check.PartitionOrDefault()), @@ -161,6 +167,9 @@ func printNodeName(nodeName, partition string) string { // registration is performed within a single transaction to avoid race // conditions on state updates. func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes bool, req *structs.RegisterRequest, restore bool) error { + if err := validateRegisterRequestPeerNamesTxn(tx, req, restore); err != nil { + return err + } if _, err := validateRegisterRequestTxn(tx, req, restore); err != nil { return err } @@ -174,6 +183,7 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b Partition: req.PartitionOrDefault(), TaggedAddresses: req.TaggedAddresses, Meta: req.NodeMeta, + PeerName: req.PeerName, } if preserveIndexes { node.CreateIndex = req.CreateIndex @@ -189,6 +199,7 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b existing, err := tx.First(tableNodes, indexID, Query{ Value: node.Node, EnterpriseMeta: *node.GetEnterpriseMeta(), + PeerName: node.PeerName, }) if err != nil { return fmt.Errorf("node lookup failed: %s", err) @@ -208,6 +219,7 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b EnterpriseMeta: req.Service.EnterpriseMeta, Node: req.Node, Service: req.Service.ID, + PeerName: req.PeerName, }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) @@ -222,12 +234,14 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b // Add the checks, if any. if req.Check != nil { - if err := s.ensureCheckIfNodeMatches(tx, idx, preserveIndexes, req.Node, req.PartitionOrDefault(), req.Check); err != nil { + err := s.ensureCheckIfNodeMatches(tx, idx, preserveIndexes, req.Node, req.PartitionOrDefault(), req.PeerName, req.Check) + if err != nil { return err } } for _, check := range req.Checks { - if err := s.ensureCheckIfNodeMatches(tx, idx, preserveIndexes, req.Node, req.PartitionOrDefault(), check); err != nil { + err := s.ensureCheckIfNodeMatches(tx, idx, preserveIndexes, req.Node, req.PartitionOrDefault(), req.PeerName, check) + if err != nil { return err } } @@ -235,6 +249,61 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b return nil } +func validateRegisterRequestPeerNamesTxn(_ ReadTxn, args *structs.RegisterRequest, _ bool) error { + var ( + peerNames = make(map[string]struct{}) + ) + if args.Service != nil { + if args.Service.PeerName == "" { + args.Service.PeerName = args.PeerName + } + + peerName := args.Service.PeerName + // TODO(peering): validate the peering exists (skip check on restore) + + peerNames[peerName] = struct{}{} + } + + validateCheck := func(chk *structs.HealthCheck) error { + if chk.PeerName == "" { + chk.PeerName = args.PeerName + } + + peerName := chk.PeerName + // TODO(peering): validate the peering exists (skip check on restore) + + peerNames[peerName] = struct{}{} + + return nil + } + + if args.Check != nil { + if err := validateCheck(args.Check); err != nil { + return err + } + } + for _, chk := range args.Checks { + if err := validateCheck(chk); err != nil { + return err + } + } + + { + // TODO(peering): validate the node's peering exists (skip check on restore) + peerName := args.PeerName + peerNames[peerName] = struct{}{} + } + + if len(peerNames) > 1 { + return fmt.Errorf("Cannot register services and checks for multiple peer names in one registration request") + } else if len(peerNames) == 0 { + return fmt.Errorf("No peer names are present on the registration request; this makes no sense") + } + + return nil + +} + // EnsureNode is used to upsert node registration or modification. func (s *Store) EnsureNode(idx uint64, node *structs.Node) error { tx := s.db.WriteTxn(idx) @@ -252,8 +321,11 @@ func (s *Store) EnsureNode(idx uint64, node *structs.Node) error { // If allowClashWithoutID then, getting a conflict on another node without ID will be allowed func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWithoutID bool) error { // Retrieve all of the nodes - - enodes, err := tx.Get(tableNodes, indexID+"_prefix", node.GetEnterpriseMeta()) + q := Query{ + PeerName: node.PeerName, + EnterpriseMeta: *node.GetEnterpriseMeta(), + } + enodes, err := tx.Get(tableNodes, indexID+"_prefix", q) if err != nil { return fmt.Errorf("Cannot lookup all nodes: %s", err) } @@ -266,6 +338,7 @@ func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWi EnterpriseMeta: *node.GetEnterpriseMeta(), Node: enode.Node, CheckID: string(structs.SerfCheckID), + PeerName: enode.PeerName, }) if err != nil { return fmt.Errorf("Cannot get status of node %s: %s", enode.Node, err) @@ -293,7 +366,7 @@ func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWi // Returns a bool indicating if a write happened and any error. func (s *Store) ensureNodeCASTxn(tx WriteTxn, idx uint64, node *structs.Node) (bool, error) { // Retrieve the existing entry. - existing, err := getNodeTxn(tx, node.Node, node.GetEnterpriseMeta()) + existing, err := getNodeTxn(tx, node.Node, node.GetEnterpriseMeta(), node.PeerName) if err != nil { return false, err } @@ -326,7 +399,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod // name is the same. var n *structs.Node if node.ID != "" { - existing, err := getNodeIDTxn(tx, node.ID, node.GetEnterpriseMeta()) + existing, err := getNodeIDTxn(tx, node.ID, node.GetEnterpriseMeta(), node.PeerName) if err != nil { return fmt.Errorf("node lookup failed: %s", err) } @@ -339,7 +412,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod return fmt.Errorf("Error while renaming Node ID: %q (%s): %s", node.ID, node.Address, dupNameError) } // We are actually renaming a node, remove its reference first - err := s.deleteNodeTxn(tx, idx, n.Node, n.GetEnterpriseMeta()) + err := s.deleteNodeTxn(tx, idx, n.Node, n.GetEnterpriseMeta(), n.PeerName) if err != nil { return fmt.Errorf("Error while renaming Node ID: %q (%s) from %s to %s", node.ID, node.Address, n.Node, node.Node) @@ -362,6 +435,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod existing, err := tx.First(tableNodes, indexID, Query{ Value: node.Node, EnterpriseMeta: *node.GetEnterpriseMeta(), + PeerName: node.PeerName, }) if err != nil { return fmt.Errorf("node name lookup failed: %s", err) @@ -398,7 +472,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod } // GetNode is used to retrieve a node registration by node name ID. -func (s *Store) GetNode(nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.Node, error) { +func (s *Store) GetNode(nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.Node, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -408,20 +482,21 @@ func (s *Store) GetNode(nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint6 } // Get the table index. - idx := catalogNodesMaxIndex(tx, entMeta) + idx := catalogNodesMaxIndex(tx, entMeta, peerName) // Retrieve the node from the state store - node, err := getNodeTxn(tx, nodeNameOrID, entMeta) + node, err := getNodeTxn(tx, nodeNameOrID, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("node lookup failed: %s", err) } return idx, node, nil } -func getNodeTxn(tx ReadTxn, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (*structs.Node, error) { +func getNodeTxn(tx ReadTxn, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (*structs.Node, error) { node, err := tx.First(tableNodes, indexID, Query{ Value: nodeNameOrID, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return nil, fmt.Errorf("node lookup failed: %s", err) @@ -432,10 +507,11 @@ func getNodeTxn(tx ReadTxn, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (* return nil, nil } -func getNodeIDTxn(tx ReadTxn, id types.NodeID, entMeta *acl.EnterpriseMeta) (*structs.Node, error) { +func getNodeIDTxn(tx ReadTxn, id types.NodeID, entMeta *acl.EnterpriseMeta, peerName string) (*structs.Node, error) { node, err := tx.First(tableNodes, indexUUID+"_prefix", Query{ Value: string(id), EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return nil, fmt.Errorf("node lookup by ID failed: %s", err) @@ -447,7 +523,7 @@ func getNodeIDTxn(tx ReadTxn, id types.NodeID, entMeta *acl.EnterpriseMeta) (*st } // GetNodeID is used to retrieve a node registration by node ID. -func (s *Store) GetNodeID(id types.NodeID, entMeta *acl.EnterpriseMeta) (uint64, *structs.Node, error) { +func (s *Store) GetNodeID(id types.NodeID, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.Node, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -456,16 +532,15 @@ func (s *Store) GetNodeID(id types.NodeID, entMeta *acl.EnterpriseMeta) (uint64, entMeta = structs.NodeEnterpriseMetaInDefaultPartition() } - // Get the table index. - idx := catalogNodesMaxIndex(tx, entMeta) + idx := catalogNodesMaxIndex(tx, entMeta, peerName) // Retrieve the node from the state store - node, err := getNodeIDTxn(tx, id, entMeta) + node, err := getNodeIDTxn(tx, id, entMeta, peerName) return idx, node, err } // Nodes is used to return all of the known nodes. -func (s *Store) Nodes(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Nodes, error) { +func (s *Store) Nodes(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Nodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -474,11 +549,14 @@ func (s *Store) Nodes(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, s entMeta = structs.NodeEnterpriseMetaInDefaultPartition() } - // Get the table index. - idx := catalogNodesMaxIndex(tx, entMeta) + idx := catalogNodesMaxIndex(tx, entMeta, peerName) // Retrieve all of the nodes - nodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) + q := Query{ + PeerName: peerName, + EnterpriseMeta: *entMeta, + } + nodes, err := tx.Get(tableNodes, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) } @@ -493,7 +571,7 @@ func (s *Store) Nodes(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, s } // NodesByMeta is used to return all nodes with the given metadata key/value pairs. -func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.Nodes, error) { +func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Nodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -502,8 +580,7 @@ func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMet entMeta = structs.NodeEnterpriseMetaInDefaultPartition() } - // Get the table index. - idx := catalogNodesMaxIndex(tx, entMeta) + idx := catalogNodesMaxIndex(tx, entMeta, peerName) if len(filters) == 0 { return idx, nil, nil // NodesByMeta is never called with an empty map, but just in case make it return no results. @@ -521,6 +598,7 @@ func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMet Key: firstKey, Value: firstValue, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) @@ -539,7 +617,7 @@ func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMet } // DeleteNode is used to delete a given node by its ID. -func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *acl.EnterpriseMeta) error { +func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *acl.EnterpriseMeta, peerName string) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -549,7 +627,7 @@ func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *acl.EnterpriseM } // Call the node deletion. - if err := s.deleteNodeTxn(tx, idx, nodeName, entMeta); err != nil { + if err := s.deleteNodeTxn(tx, idx, nodeName, entMeta, peerName); err != nil { return err } @@ -559,9 +637,9 @@ func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *acl.EnterpriseM // deleteNodeCASTxn is used to try doing a node delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given check, then the call is a noop, otherwise a normal check delete is invoked. -func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, entMeta *acl.EnterpriseMeta) (bool, error) { +func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, entMeta *acl.EnterpriseMeta, peerName string) (bool, error) { // Look up the node. - node, err := getNodeTxn(tx, nodeName, entMeta) + node, err := getNodeTxn(tx, nodeName, entMeta, peerName) if err != nil { return false, err } @@ -577,7 +655,7 @@ func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, } // Call the actual deletion if the above passed. - if err := s.deleteNodeTxn(tx, idx, nodeName, entMeta); err != nil { + if err := s.deleteNodeTxn(tx, idx, nodeName, entMeta, peerName); err != nil { return false, err } @@ -586,21 +664,22 @@ func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, // deleteNodeTxn is the inner method used for removing a node from // the store within a given transaction. -func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta *acl.EnterpriseMeta) error { +func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta *acl.EnterpriseMeta, peerName string) error { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } // Look up the node. - node, err := tx.First(tableNodes, indexID, Query{ + nodeRaw, err := tx.First(tableNodes, indexID, Query{ Value: nodeName, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return fmt.Errorf("node lookup failed: %s", err) } - if node == nil { + if nodeRaw == nil { return nil } @@ -608,6 +687,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta services, err := tx.Get(tableServices, indexNode, Query{ Value: nodeName, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) @@ -617,17 +697,17 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta svc := service.(*structs.ServiceNode) deleteServices = append(deleteServices, svc) - if err := catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceIndexes(tx, idx, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } } // Do the delete in a separate loop so we don't trash the iterator. for _, svc := range deleteServices { - if err := s.deleteServiceTxn(tx, idx, nodeName, svc.ServiceID, &svc.EnterpriseMeta); err != nil { + if err := s.deleteServiceTxn(tx, idx, nodeName, svc.ServiceID, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } } @@ -637,6 +717,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta checks, err := tx.Get(tableChecks, indexNode, Query{ Value: nodeName, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return fmt.Errorf("failed check lookup: %s", err) @@ -648,46 +729,52 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta // Do the delete in a separate loop so we don't trash the iterator. for _, chk := range deleteChecks { - if err := s.deleteCheckTxn(tx, idx, nodeName, chk.CheckID, &chk.EnterpriseMeta); err != nil { + if err := s.deleteCheckTxn(tx, idx, nodeName, chk.CheckID, &chk.EnterpriseMeta, chk.PeerName); err != nil { return err } } - // Delete any coordinates associated with this node. - coords, err := tx.Get(tableCoordinates, indexNode, Query{ - Value: nodeName, - EnterpriseMeta: *entMeta, - }) - if err != nil { - return fmt.Errorf("failed coordinate lookup: %s", err) - } - var coordsToDelete []*structs.Coordinate - for coord := coords.Next(); coord != nil; coord = coords.Next() { - coordsToDelete = append(coordsToDelete, coord.(*structs.Coordinate)) - } - for _, coord := range coordsToDelete { - if err := deleteCoordinateTxn(tx, idx, coord); err != nil { - return fmt.Errorf("failed deleting coordinate: %s", err) + if peerName == "" { + // Delete any coordinates associated with this node. + coords, err := tx.Get(tableCoordinates, indexNode, Query{ + Value: nodeName, + EnterpriseMeta: *entMeta, + PeerName: structs.DefaultPeerKeyword, + }) + if err != nil { + return fmt.Errorf("failed coordinate lookup: %s", err) + } + var coordsToDelete []*structs.Coordinate + for coord := coords.Next(); coord != nil; coord = coords.Next() { + coordsToDelete = append(coordsToDelete, coord.(*structs.Coordinate)) + } + for _, coord := range coordsToDelete { + if err := deleteCoordinateTxn(tx, idx, coord); err != nil { + return fmt.Errorf("failed deleting coordinate: %s", err) + } } } // Delete the node and update the index. - if err := tx.Delete(tableNodes, node); err != nil { + if err := tx.Delete(tableNodes, nodeRaw); err != nil { return fmt.Errorf("failed deleting node: %s", err) } - if err := catalogUpdateNodesIndexes(tx, idx, entMeta); err != nil { + node := nodeRaw.(*structs.Node) + if err := catalogUpdateNodesIndexes(tx, idx, entMeta, node.PeerName); err != nil { return fmt.Errorf("failed updating index: %s", err) } - // Invalidate any sessions for this node. - toDelete, err := allNodeSessionsTxn(tx, nodeName, entMeta.PartitionOrDefault()) - if err != nil { - return err - } + if peerName == "" { + // Invalidate any sessions for this node. + toDelete, err := allNodeSessionsTxn(tx, nodeName, entMeta.PartitionOrDefault()) + if err != nil { + return err + } - for _, session := range toDelete { - if err := s.deleteSessionTxn(tx, idx, session.ID, &session.EnterpriseMeta); err != nil { - return fmt.Errorf("failed to delete session '%s': %v", session.ID, err) + for _, session := range toDelete { + if err := s.deleteSessionTxn(tx, idx, session.ID, &session.EnterpriseMeta); err != nil { + return fmt.Errorf("failed to delete session '%s': %v", session.ID, err) + } } } @@ -713,7 +800,13 @@ var errCASCompareFailed = errors.New("compare-and-set: comparison failed") // Returns an error if the write didn't happen and nil if write was successful. func ensureServiceCASTxn(tx WriteTxn, idx uint64, node string, svc *structs.NodeService) error { // Retrieve the existing service. - existing, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: svc.EnterpriseMeta, Node: node, Service: svc.ID}) + existing, err := tx.First(tableServices, indexID, + NodeServiceQuery{ + EnterpriseMeta: svc.EnterpriseMeta, + Node: node, + Service: svc.ID, + PeerName: svc.PeerName, + }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) } @@ -742,6 +835,7 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool EnterpriseMeta: svc.EnterpriseMeta, Node: node, Service: svc.ID, + PeerName: svc.PeerName, }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) @@ -751,9 +845,11 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool return fmt.Errorf("Invalid Service Meta for node %s and serviceID %s: %v", node, svc.ID, err) } - // Check if this service is covered by a gateway's wildcard specifier - if err = checkGatewayWildcardsAndUpdate(tx, idx, svc); err != nil { - return fmt.Errorf("failed updating gateway mapping: %s", err) + if svc.PeerName == "" { + // Check if this service is covered by a gateway's wildcard specifier + if err = checkGatewayWildcardsAndUpdate(tx, idx, svc); err != nil { + return fmt.Errorf("failed updating gateway mapping: %s", err) + } } if err := upsertKindServiceName(tx, idx, svc.Kind, svc.CompoundServiceName()); err != nil { return fmt.Errorf("failed to persist service name: %v", err) @@ -789,28 +885,30 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool } } - // If there's a terminating gateway config entry for this service, populate the tagged addresses - // with virtual IP mappings. - termGatewayVIPsSupported, err := terminatingGatewayVirtualIPsSupported(tx, nil) - if err != nil { - return err - } - if termGatewayVIPsSupported && svc.Kind == structs.ServiceKindTerminatingGateway { - _, conf, err := configEntryTxn(tx, nil, structs.TerminatingGateway, svc.Service, &svc.EnterpriseMeta) + if svc.PeerName == "" { + // If there's a terminating gateway config entry for this service, populate the tagged addresses + // with virtual IP mappings. + termGatewayVIPsSupported, err := terminatingGatewayVirtualIPsSupported(tx, nil) if err != nil { - return fmt.Errorf("failed to retrieve terminating gateway config: %s", err) + return err } - if conf != nil { - termGatewayConf := conf.(*structs.TerminatingGatewayConfigEntry) - addrs, err := getTermGatewayVirtualIPs(tx, termGatewayConf.Services, &svc.EnterpriseMeta) + if termGatewayVIPsSupported && svc.Kind == structs.ServiceKindTerminatingGateway { + _, conf, err := configEntryTxn(tx, nil, structs.TerminatingGateway, svc.Service, &svc.EnterpriseMeta) if err != nil { - return err + return fmt.Errorf("failed to retrieve terminating gateway config: %s", err) } - if svc.TaggedAddresses == nil { - svc.TaggedAddresses = make(map[string]structs.ServiceAddress) - } - for key, addr := range addrs { - svc.TaggedAddresses[key] = addr + if conf != nil { + termGatewayConf := conf.(*structs.TerminatingGatewayConfigEntry) + addrs, err := getTermGatewayVirtualIPs(tx, termGatewayConf.Services, &svc.EnterpriseMeta) + if err != nil { + return err + } + if svc.TaggedAddresses == nil { + svc.TaggedAddresses = make(map[string]structs.ServiceAddress) + } + for key, addr := range addrs { + svc.TaggedAddresses[key] = addr + } } } } @@ -823,6 +921,7 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool n, err := tx.First(tableNodes, indexID, Query{ Value: node, EnterpriseMeta: svc.EnterpriseMeta, + PeerName: svc.PeerName, }) if err != nil { return fmt.Errorf("failed node lookup: %s", err) @@ -856,6 +955,7 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool // assignServiceVirtualIP assigns a virtual IP to the target service and updates // the global virtual IP counter if necessary. func assignServiceVirtualIP(tx WriteTxn, sn structs.ServiceName) (string, error) { + // TODO(peering): support VIPs serviceVIP, err := tx.First(tableServiceVirtualIPs, indexID, sn) if err != nil { return "", fmt.Errorf("failed service virtual IP lookup: %s", err) @@ -980,15 +1080,15 @@ func terminatingGatewayVirtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool, } // Services returns all services along with a list of associated tags. -func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Services, error) { +func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := catalogServicesMaxIndex(tx, entMeta) + idx := catalogServicesMaxIndex(tx, entMeta, peerName) // List all the services. - services, err := catalogServiceListNoWildcard(tx, entMeta) + services, err := catalogServiceListNoWildcard(tx, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed querying services: %s", err) } @@ -1020,17 +1120,24 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64 return idx, results, nil } -func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceList, error) { +func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error) { tx := s.db.Txn(false) defer tx.Abort() - return serviceListTxn(tx, ws, entMeta) + return serviceListTxn(tx, ws, entMeta, peerName) } -func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceList, error) { - idx := catalogServicesMaxIndex(tx, entMeta) +func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error) { + if entMeta == nil { + entMeta = structs.NodeEnterpriseMetaInDefaultPartition() + } - services, err := tx.Get(tableServices, indexID+"_prefix", entMeta) + idx := catalogServicesMaxIndex(tx, entMeta, peerName) + + services, err := tx.Get(tableServices, indexID+"_prefix", Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed querying services: %s", err) } @@ -1051,7 +1158,7 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) } // ServicesByNodeMeta returns all services, filtered by the given node metadata. -func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.Services, error) { +func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1061,8 +1168,9 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, } // Get the table index. - idx := catalogServicesMaxIndex(tx, entMeta) - if nodeIdx := catalogNodesMaxIndex(tx, entMeta); nodeIdx > idx { + idx := catalogServicesMaxIndex(tx, entMeta, peerName) + + if nodeIdx := catalogNodesMaxIndex(tx, entMeta, peerName); nodeIdx > idx { idx = nodeIdx } @@ -1082,6 +1190,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, Key: firstKey, Value: firstValue, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) @@ -1090,7 +1199,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, // We don't want to track an unlimited number of services, so we pull a // top-level watch to use as a fallback. - allServices, err := catalogServiceListNoWildcard(tx, entMeta) + allServices, err := catalogServiceListNoWildcard(tx, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed services lookup: %s", err) } @@ -1105,7 +1214,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, } // List all the services on the node - services, err := catalogServiceListByNode(tx, n.Node, entMeta, false) + services, err := catalogServiceListByNode(tx, n.Node, entMeta, n.PeerName, false) if err != nil { return 0, nil, fmt.Errorf("failed querying services: %s", err) } @@ -1146,8 +1255,8 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, // * return when the last instance of a service is removed // * block until an instance for this service is available, or another // service is unregistered. -func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta) uint64 { - idx, _ := maxIndexAndWatchChForService(tx, serviceName, serviceExists, checks, entMeta) +func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta, peerName string) uint64 { + idx, _ := maxIndexAndWatchChForService(tx, serviceName, serviceExists, checks, entMeta, peerName) return idx } @@ -1165,20 +1274,20 @@ func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bo // returned for the chan. This allows for blocking watchers to _only_ watch this // one chan in the common case, falling back to watching all touched MemDB // indexes in more complicated cases. -func maxIndexAndWatchChForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta) (uint64, <-chan struct{}) { +func maxIndexAndWatchChForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, <-chan struct{}) { if !serviceExists { - res, err := catalogServiceLastExtinctionIndex(tx, entMeta) + res, err := catalogServiceLastExtinctionIndex(tx, entMeta, peerName) if missingIdx, ok := res.(*IndexEntry); ok && err == nil { // Note safe to only watch the extinction index as it's not updated when new instances come along so return nil watchCh return missingIdx.Value, nil } } - ch, res, err := catalogServiceMaxIndex(tx, serviceName, entMeta) + ch, res, err := catalogServiceMaxIndex(tx, serviceName, entMeta, peerName) if idx, ok := res.(*IndexEntry); ok && err == nil { return idx.Value, ch } - return catalogMaxIndex(tx, entMeta, checks), nil + return catalogMaxIndex(tx, entMeta, peerName, checks), nil } // Wrapper for maxIndexAndWatchChForService that operates on a list of ServiceNodes @@ -1192,7 +1301,7 @@ func maxIndexAndWatchChsForServiceNodes(tx ReadTxn, for i := 0; i < len(nodes); i++ { sn := structs.NewServiceName(nodes[i].ServiceName, &nodes[i].EnterpriseMeta) if ok := seen[sn]; !ok { - idx, svcCh := maxIndexAndWatchChForService(tx, sn.Name, true, watchChecks, &sn.EnterpriseMeta) + idx, svcCh := maxIndexAndWatchChForService(tx, sn.Name, true, watchChecks, &sn.EnterpriseMeta, nodes[i].PeerName) if idx > maxIdx { maxIdx = idx } @@ -1209,7 +1318,7 @@ func maxIndexAndWatchChsForServiceNodes(tx ReadTxn, // ConnectServiceNodes returns the nodes associated with a Connect // compatible destination for the given service name. This will include // both proxies and native integrations. -func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { tx := s.db.ReadTxn() defer tx.Abort() @@ -1217,12 +1326,16 @@ func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMe if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: serviceName, EnterpriseMeta: *entMeta} + q := Query{ + Value: serviceName, + PeerName: peerName, + EnterpriseMeta: *entMeta, + } return serviceNodesTxn(tx, ws, indexConnect, q) } // ServiceNodes returns the nodes associated with a given service name. -func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { tx := s.db.ReadTxn() defer tx.Abort() @@ -1230,7 +1343,11 @@ func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: serviceName, EnterpriseMeta: *entMeta} + q := Query{ + Value: serviceName, + PeerName: peerName, + EnterpriseMeta: *entMeta, + } return serviceNodesTxn(tx, ws, indexService, q) } @@ -1253,9 +1370,9 @@ func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, index string, q Query) (uint // We append rather than replace since it allows users to migrate a service // to the mesh with a mix of sidecars and gateways until all its instances have a sidecar. var idx uint64 - if connect { + if connect && q.PeerName == "" { // Look up gateway nodes associated with the service - gwIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, &q.EnterpriseMeta) + gwIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, &q.EnterpriseMeta, structs.DefaultPeerKeyword) if err != nil { return 0, nil, fmt.Errorf("failed gateway nodes lookup: %v", err) } @@ -1278,7 +1395,7 @@ func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, index string, q Query) (uint } // Fill in the node details. - results, err = parseServiceNodes(tx, ws, results, &q.EnterpriseMeta) + results, err = parseServiceNodes(tx, ws, results, &q.EnterpriseMeta, q.PeerName) if err != nil { return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err) } @@ -1286,7 +1403,7 @@ func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, index string, q Query) (uint // Get the table index. // TODO (gateways) (freddy) Why do we always consider the main service index here? // This doesn't seem to make sense for Connect when there's more than 1 result - svcIdx := maxIndexForService(tx, serviceName, len(results) > 0, false, &q.EnterpriseMeta) + svcIdx := maxIndexForService(tx, serviceName, len(results) > 0, false, &q.EnterpriseMeta, q.PeerName) if idx < svcIdx { idx = svcIdx } @@ -1296,7 +1413,7 @@ func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, index string, q Query) (uint // ServiceTagNodes returns the nodes associated with a given service, filtering // out services that don't contain the given tags. -func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1305,8 +1422,11 @@ func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: service, EnterpriseMeta: *entMeta} - services, err := tx.Get(tableServices, indexService, q) + services, err := tx.Get(tableServices, indexService, Query{ + Value: service, + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -1324,12 +1444,12 @@ func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string } // Fill in the node details. - results, err = parseServiceNodes(tx, ws, results, entMeta) + results, err = parseServiceNodes(tx, ws, results, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err) } // Get the table index. - idx := maxIndexForService(tx, service, serviceExists, false, entMeta) + idx := maxIndexForService(tx, service, serviceExists, false, entMeta, peerName) return idx, results, nil } @@ -1366,12 +1486,16 @@ func serviceTagsFilter(sn *structs.ServiceNode, tags []string) bool { // ServiceAddressNodes returns the nodes associated with a given service, filtering // out services that don't match the given serviceAddress -func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() // List all the services. - services, err := tx.Get(tableServices, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + services, err := tx.Get(tableServices, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -1394,7 +1518,7 @@ func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta * } // Fill in the node details. - results, err = parseServiceNodes(tx, ws, results, entMeta) + results, err = parseServiceNodes(tx, ws, results, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err) } @@ -1403,10 +1527,14 @@ func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta * // parseServiceNodes iterates over a services query and fills in the node details, // returning a ServiceNodes slice. -func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNodes, entMeta *acl.EnterpriseMeta) (structs.ServiceNodes, error) { +func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNodes, entMeta *acl.EnterpriseMeta, peerName string) (structs.ServiceNodes, error) { // We don't want to track an unlimited number of nodes, so we pull a // top-level watch to use as a fallback. - allNodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + allNodes, err := tx.Get(tableNodes, indexID+"_prefix", q) if err != nil { return nil, fmt.Errorf("failed nodes lookup: %s", err) } @@ -1424,6 +1552,7 @@ func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNo watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{ Value: sn.Node, EnterpriseMeta: sn.EnterpriseMeta, + PeerName: sn.PeerName, }) if err != nil { return nil, fmt.Errorf("failed node lookup: %s", err) @@ -1448,15 +1577,15 @@ func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNo // NodeService is used to retrieve a specific service associated with the given // node. -func (s *Store) NodeService(nodeName string, serviceID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.NodeService, error) { +func (s *Store) NodeService(nodeName string, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeService, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := catalogServicesMaxIndex(tx, entMeta) + idx := catalogServicesMaxIndex(tx, entMeta, peerName) // Query the service - service, err := getNodeServiceTxn(tx, nodeName, serviceID, entMeta) + service, err := getNodeServiceTxn(tx, nodeName, serviceID, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err) } @@ -1464,8 +1593,8 @@ func (s *Store) NodeService(nodeName string, serviceID string, entMeta *acl.Ente return idx, service, nil } -func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) (*structs.NodeService, error) { - sn, err := getServiceNodeTxn(tx, nodeName, serviceID, entMeta) +func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (*structs.NodeService, error) { + sn, err := getServiceNodeTxn(tx, nodeName, serviceID, entMeta, peerName) if err != nil { return nil, err } @@ -1475,7 +1604,7 @@ func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.Ente return nil, nil } -func getServiceNodeTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) (*structs.ServiceNode, error) { +func getServiceNodeTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (*structs.ServiceNode, error) { // TODO: pass non-pointer type for ent meta if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -1486,29 +1615,32 @@ func getServiceNodeTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.Ente EnterpriseMeta: *entMeta, Node: nodeName, Service: serviceID, + PeerName: peerName, }) if err != nil { return nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err) } + if service != nil { return service.(*structs.ServiceNode), nil } + return nil, nil } // ServiceNode is used to retrieve a specific service by service ID and node ID or name. -func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ServiceNode, error) { +func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.ServiceNode, error) { var ( node *structs.Node err error ) if nodeID != "" { - _, node, err = s.GetNodeID(types.NodeID(nodeID), entMeta) + _, node, err = s.GetNodeID(types.NodeID(nodeID), entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("Failure looking up node by ID %s: %w", nodeID, err) } } else if nodeName != "" { - _, node, err = s.GetNode(nodeName, entMeta) + _, node, err = s.GetNode(nodeName, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("Failure looking up node by name %s: %w", nodeName, err) } @@ -1523,10 +1655,10 @@ func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.Ent defer tx.Abort() // Get the table index. - idx := catalogServicesMaxIndex(tx, entMeta) + idx := catalogServicesMaxIndex(tx, entMeta, peerName) // Query the service - service, err := getServiceNodeTxn(tx, node.Node, serviceID, entMeta) + service, err := getServiceNodeTxn(tx, node.Node, serviceID, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed querying service for node %q: %w", node.Node, err) } @@ -1534,7 +1666,7 @@ func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.Ent return idx, service, nil } -func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, allowWildcard bool) (bool, uint64, *structs.Node, memdb.ResultIterator, error) { +func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string, allowWildcard bool) (bool, uint64, *structs.Node, memdb.ResultIterator, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1544,10 +1676,14 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac } // Get the table index. - idx := catalogMaxIndex(tx, entMeta, false) + idx := catalogMaxIndex(tx, entMeta, peerName, false) // Query the node by node name - watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: nodeNameOrID, EnterpriseMeta: *entMeta}) + watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{ + Value: nodeNameOrID, + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return true, 0, nil, nil, fmt.Errorf("node lookup failed: %s", err) } @@ -1564,6 +1700,7 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac iter, err := tx.Get(tableNodes, indexUUID+"_prefix", Query{ Value: resizeNodeLookupKey(nodeNameOrID), EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { ws.Add(watchCh) @@ -1594,7 +1731,7 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac nodeName := node.Node // Read all of the services - services, err := catalogServiceListByNode(tx, nodeName, entMeta, allowWildcard) + services, err := catalogServiceListByNode(tx, nodeName, entMeta, peerName, allowWildcard) if err != nil { return true, 0, nil, nil, fmt.Errorf("failed querying services for node %q: %s", nodeName, err) } @@ -1604,8 +1741,8 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac } // NodeServices is used to query service registrations by node name or UUID. -func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.NodeServices, error) { - done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, false) +func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeServices, error) { + done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, peerName, false) if done || err != nil { return idx, nil, err } @@ -1628,8 +1765,8 @@ func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac } // NodeServices is used to query service registrations by node name or UUID. -func (s *Store) NodeServiceList(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.NodeServiceList, error) { - done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, true) +func (s *Store) NodeServiceList(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeServiceList, error) { + done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, peerName, true) if done || err != nil { return idx, nil, err } @@ -1655,12 +1792,12 @@ func (s *Store) NodeServiceList(ws memdb.WatchSet, nodeNameOrID string, entMeta } // DeleteService is used to delete a given service associated with a node. -func (s *Store) DeleteService(idx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) error { +func (s *Store) DeleteService(idx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) error { tx := s.db.WriteTxn(idx) defer tx.Abort() // Call the service deletion - if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID, entMeta); err != nil { + if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID, entMeta, peerName); err != nil { return err } @@ -1670,9 +1807,9 @@ func (s *Store) DeleteService(idx uint64, nodeName, serviceID string, entMeta *a // deleteServiceCASTxn is used to try doing a service delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given service, then the call is a noop, otherwise a normal delete is invoked. -func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) (bool, error) { +func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (bool, error) { // Look up the service. - service, err := getNodeServiceTxn(tx, nodeName, serviceID, entMeta) + service, err := getNodeServiceTxn(tx, nodeName, serviceID, entMeta, peerName) if err != nil { return false, fmt.Errorf("service lookup failed: %s", err) } @@ -1688,7 +1825,7 @@ func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, ser } // Call the actual deletion if the above passed. - if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID, entMeta); err != nil { + if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID, entMeta, peerName); err != nil { return false, err } @@ -1697,13 +1834,19 @@ func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, ser // deleteServiceTxn is the inner method called to remove a service // registration within an existing transaction. -func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) error { +func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) error { // TODO: pass non-pointer type for ent meta if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - service, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: *entMeta, Node: nodeName, Service: serviceID}) + service, err := tx.First(tableServices, indexID, + NodeServiceQuery{ + EnterpriseMeta: *entMeta, + Node: nodeName, + Service: serviceID, + PeerName: peerName, + }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) } @@ -1721,6 +1864,7 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st Node: nodeName, Service: serviceID, EnterpriseMeta: *entMeta, + PeerName: peerName, } checks, err := tx.Get(tableChecks, indexNodeService, nsq) if err != nil { @@ -1733,13 +1877,13 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st // Do the delete in a separate loop so we don't trash the iterator. for _, check := range deleteChecks { - if err := s.deleteCheckTxn(tx, idx, nodeName, check.CheckID, &check.EnterpriseMeta); err != nil { + if err := s.deleteCheckTxn(tx, idx, nodeName, check.CheckID, &check.EnterpriseMeta, check.PeerName); err != nil { return err } } // Update the index. - if err := catalogUpdateCheckIndexes(tx, idx, entMeta); err != nil { + if err := catalogUpdateCheckIndexes(tx, idx, entMeta, peerName); err != nil { return err } @@ -1747,30 +1891,35 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st if err := tx.Delete(tableServices, service); err != nil { return fmt.Errorf("failed deleting service: %s", err) } - if err := catalogUpdateServicesIndexes(tx, idx, entMeta); err != nil { - return fmt.Errorf("failed updating index: %s", err) - } svc := service.(*structs.ServiceNode) - name := svc.CompoundServiceName() - - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServicesIndexes(tx, idx, entMeta, svc.PeerName); err != nil { return err } + if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { + return err + } + + name := svc.CompoundServiceName() + if err := cleanupMeshTopology(tx, idx, svc); err != nil { return fmt.Errorf("failed to clean up mesh-topology associations for %q: %v", name.String(), err) } - q := Query{Value: svc.ServiceName, EnterpriseMeta: *entMeta} + q := Query{ + Value: svc.ServiceName, + EnterpriseMeta: *entMeta, + PeerName: svc.PeerName, + } if remainingService, err := tx.First(tableServices, indexService, q); err == nil { if remainingService != nil { // We have at least one remaining service, update the index - if err := catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, entMeta); err != nil { + if err := catalogUpdateServiceIndexes(tx, idx, svc.ServiceName, entMeta, svc.PeerName); err != nil { return err } } else { // There are no more service instances, cleanup the service. index - _, serviceIndex, err := catalogServiceMaxIndex(tx, svc.ServiceName, entMeta) + _, serviceIndex, err := catalogServiceMaxIndex(tx, svc.ServiceName, entMeta, svc.PeerName) if err == nil && serviceIndex != nil { // we found service. index, garbage collect it if errW := tx.Delete(tableIndex, serviceIndex); errW != nil { @@ -1778,11 +1927,13 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st } } - if err := catalogUpdateServiceExtinctionIndex(tx, idx, entMeta); err != nil { + if err := catalogUpdateServiceExtinctionIndex(tx, idx, entMeta, svc.PeerName); err != nil { return err } - if err := cleanupGatewayWildcards(tx, idx, svc); err != nil { - return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", name.String(), err) + if svc.PeerName == "" { + if err := cleanupGatewayWildcards(tx, idx, svc); err != nil { + return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", name.String(), err) + } } if err := freeServiceVirtualIP(tx, svc.ServiceName, nil, entMeta); err != nil { return fmt.Errorf("failed to clean up virtual IP for %q: %v", name.String(), err) @@ -1867,20 +2018,24 @@ func (s *Store) EnsureCheck(idx uint64, hc *structs.HealthCheck) error { } // updateAllServiceIndexesOfNode updates the Raft index of all the services associated with this node -func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string, entMeta *acl.EnterpriseMeta) error { +func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string, entMeta *acl.EnterpriseMeta, peerName string) error { + if peerName == "" { + peerName = structs.LocalPeerKeyword + } services, err := tx.Get(tableServices, indexNode, Query{ Value: nodeID, EnterpriseMeta: *entMeta.WithWildcardNamespace(), + PeerName: peerName, }) if err != nil { return fmt.Errorf("failed updating services for node %s: %s", nodeID, err) } for service := services.Next(); service != nil; service = services.Next() { svc := service.(*structs.ServiceNode) - if err := catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceIndexes(tx, idx, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } } @@ -1891,7 +2046,7 @@ func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string, entMe // Returns a bool indicating if a write happened and any error. func (s *Store) ensureCheckCASTxn(tx WriteTxn, idx uint64, hc *structs.HealthCheck) (bool, error) { // Retrieve the existing entry. - _, existing, err := getNodeCheckTxn(tx, hc.Node, hc.CheckID, &hc.EnterpriseMeta) + _, existing, err := getNodeCheckTxn(tx, hc.Node, hc.CheckID, &hc.EnterpriseMeta, hc.PeerName) if err != nil { return false, fmt.Errorf("failed health check lookup: %s", err) } @@ -1925,6 +2080,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc EnterpriseMeta: hc.EnterpriseMeta, Node: hc.Node, CheckID: string(hc.CheckID), + PeerName: hc.PeerName, }) if err != nil { return fmt.Errorf("failed health check lookup: %s", err) @@ -1948,6 +2104,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc node, err := tx.First(tableNodes, indexID, Query{ Value: hc.Node, EnterpriseMeta: hc.EnterpriseMeta, + PeerName: hc.PeerName, }) if err != nil { return fmt.Errorf("failed node lookup: %s", err) @@ -1964,6 +2121,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc EnterpriseMeta: hc.EnterpriseMeta, Node: hc.Node, Service: hc.ServiceID, + PeerName: hc.PeerName, }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) @@ -1979,10 +2137,10 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc if existing != nil && existing.(*structs.HealthCheck).IsSame(hc) { modified = false } else { - if err = catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, &svc.EnterpriseMeta); err != nil { + if err = catalogUpdateServiceIndexes(tx, idx, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } } @@ -1992,7 +2150,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc } else { // Since the check has been modified, it impacts all services of node // Update the status for all the services associated with this node - err = updateAllServiceIndexesOfNode(tx, idx, hc.Node, &hc.EnterpriseMeta) + err = updateAllServiceIndexesOfNode(tx, idx, hc.Node, &hc.EnterpriseMeta, hc.PeerName) if err != nil { return err } @@ -2000,7 +2158,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc } // Delete any sessions for this check if the health is critical. - if hc.Status == api.HealthCritical { + if hc.Status == api.HealthCritical && hc.PeerName == "" { sessions, err := checkSessionsTxn(tx, hc) if err != nil { return err @@ -2026,18 +2184,18 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc // NodeCheck is used to retrieve a specific check associated with the given // node. -func (s *Store) NodeCheck(nodeName string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) (uint64, *structs.HealthCheck, error) { +func (s *Store) NodeCheck(nodeName string, checkID types.CheckID, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.HealthCheck, error) { tx := s.db.Txn(false) defer tx.Abort() - return getNodeCheckTxn(tx, nodeName, checkID, entMeta) + return getNodeCheckTxn(tx, nodeName, checkID, entMeta, peerName) } // nodeCheckTxn is used as the inner method to handle reading a health check // from the state store. -func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) (uint64, *structs.HealthCheck, error) { +func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.HealthCheck, error) { // Get the table index. - idx := catalogChecksMaxIndex(tx, entMeta) + idx := catalogChecksMaxIndex(tx, entMeta, peerName) // TODO: accept non-pointer value if entMeta == nil { @@ -2045,7 +2203,13 @@ func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta } // Return the check. - check, err := tx.First(tableChecks, indexID, NodeCheckQuery{EnterpriseMeta: *entMeta, Node: nodeName, CheckID: string(checkID)}) + check, err := tx.First(tableChecks, indexID, + NodeCheckQuery{ + EnterpriseMeta: *entMeta, + Node: nodeName, + CheckID: string(checkID), + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) } @@ -2058,7 +2222,7 @@ func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta // NodeChecks is used to retrieve checks associated with the // given node from the state store. -func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2067,10 +2231,14 @@ func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *acl.Ente } // Get the table index. - idx := catalogChecksMaxIndex(tx, entMeta) + idx := catalogChecksMaxIndex(tx, entMeta, peerName) // Return the checks. - iter, err := catalogListChecksByNode(tx, Query{Value: nodeName, EnterpriseMeta: *entMeta}) + iter, err := catalogListChecksByNode(tx, Query{ + Value: nodeName, + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) } @@ -2086,17 +2254,21 @@ func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *acl.Ente // ServiceChecks is used to get all checks associated with a // given service ID. The query is performed against a service // _name_ instead of a service ID. -func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := catalogChecksMaxIndex(tx, entMeta) + idx := catalogChecksMaxIndex(tx, entMeta, peerName) if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: serviceName, EnterpriseMeta: *entMeta} + q := Query{ + Value: serviceName, + EnterpriseMeta: *entMeta, + PeerName: peerName, + } iter, err := tx.Get(tableChecks, indexService, q) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) @@ -2113,35 +2285,37 @@ func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *ac // ServiceChecksByNodeMeta is used to get all checks associated with a // given service ID, filtered by the given node metadata values. The query // is performed against a service _name_ instead of a service ID. -func (s *Store) ServiceChecksByNodeMeta(ws memdb.WatchSet, serviceName string, - filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { - +func (s *Store) ServiceChecksByNodeMeta(ws memdb.WatchSet, serviceName string, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexForService(tx, serviceName, true, true, entMeta) + idx := maxIndexForService(tx, serviceName, true, true, entMeta, peerName) if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: serviceName, EnterpriseMeta: *entMeta} + q := Query{ + Value: serviceName, + EnterpriseMeta: *entMeta, + PeerName: peerName, + } iter, err := tx.Get(tableChecks, indexService, q) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) } ws.Add(iter.WatchCh()) - return parseChecksByNodeMeta(tx, ws, idx, iter, filters, entMeta) + return parseChecksByNodeMeta(tx, ws, idx, iter, filters, entMeta, peerName) } // ChecksInState is used to query the state store for all checks // which are in the provided state. -func (s *Store) ChecksInState(ws memdb.WatchSet, state string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) ChecksInState(ws memdb.WatchSet, state string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() - idx, iter, err := checksInStateTxn(tx, ws, state, entMeta) + idx, iter, err := checksInStateTxn(tx, ws, state, entMeta, peerName) if err != nil { return 0, nil, err } @@ -2155,21 +2329,21 @@ func (s *Store) ChecksInState(ws memdb.WatchSet, state string, entMeta *acl.Ente // ChecksInStateByNodeMeta is used to query the state store for all checks // which are in the provided state, filtered by the given node metadata values. -func (s *Store) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() - idx, iter, err := checksInStateTxn(tx, ws, state, entMeta) + idx, iter, err := checksInStateTxn(tx, ws, state, entMeta, peerName) if err != nil { return 0, nil, err } - return parseChecksByNodeMeta(tx, ws, idx, iter, filters, entMeta) + return parseChecksByNodeMeta(tx, ws, idx, iter, filters, entMeta, peerName) } -func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *acl.EnterpriseMeta) (uint64, memdb.ResultIterator, error) { +func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, memdb.ResultIterator, error) { // Get the table index. - idx := catalogChecksMaxIndex(tx, entMeta) + idx := catalogChecksMaxIndex(tx, entMeta, peerName) if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -2179,9 +2353,17 @@ func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *acl. var iter memdb.ResultIterator var err error if state == api.HealthAny { - iter, err = tx.Get(tableChecks, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + iter, err = tx.Get(tableChecks, indexID+"_prefix", q) } else { - q := Query{Value: state, EnterpriseMeta: *entMeta} + q := Query{ + Value: state, + EnterpriseMeta: *entMeta, + PeerName: peerName, + } iter, err = tx.Get(tableChecks, indexStatus, q) } if err != nil { @@ -2194,13 +2376,26 @@ func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *acl. // parseChecksByNodeMeta is a helper function used to deduplicate some // repetitive code for returning health checks filtered by node metadata fields. -func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet, - idx uint64, iter memdb.ResultIterator, filters map[string]string, - entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func parseChecksByNodeMeta( + tx ReadTxn, + ws memdb.WatchSet, + idx uint64, + iter memdb.ResultIterator, + filters map[string]string, + entMeta *acl.EnterpriseMeta, + peerName string, +) (uint64, structs.HealthChecks, error) { + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } // We don't want to track an unlimited number of nodes, so we pull a // top-level watch to use as a fallback. - allNodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + allNodes, err := tx.Get(tableNodes, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) } @@ -2213,6 +2408,7 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet, watchCh, node, err := tx.FirstWatch(tableNodes, indexID, Query{ Value: healthCheck.Node, EnterpriseMeta: healthCheck.EnterpriseMeta, + PeerName: healthCheck.PeerName, }) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) @@ -2232,12 +2428,12 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet, } // DeleteCheck is used to delete a health check registration. -func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) error { +func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta, peerName string) error { tx := s.db.WriteTxn(idx) defer tx.Abort() // Call the check deletion - if err := s.deleteCheckTxn(tx, idx, node, checkID, entMeta); err != nil { + if err := s.deleteCheckTxn(tx, idx, node, checkID, entMeta, peerName); err != nil { return err } @@ -2247,9 +2443,16 @@ func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID, entM // deleteCheckCASTxn is used to try doing a check delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given check, then the call is a noop, otherwise a normal check delete is invoked. -func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) (bool, error) { +func (s *Store) deleteCheckCASTxn( + tx WriteTxn, + idx, cidx uint64, + node string, + checkID types.CheckID, + entMeta *acl.EnterpriseMeta, + peerName string, +) (bool, error) { // Try to retrieve the existing health check. - _, hc, err := getNodeCheckTxn(tx, node, checkID, entMeta) + _, hc, err := getNodeCheckTxn(tx, node, checkID, entMeta, peerName) if err != nil { return false, fmt.Errorf("check lookup failed: %s", err) } @@ -2265,7 +2468,7 @@ func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, ch } // Call the actual deletion if the above passed. - if err := s.deleteCheckTxn(tx, idx, node, checkID, entMeta); err != nil { + if err := s.deleteCheckTxn(tx, idx, node, checkID, entMeta, peerName); err != nil { return false, err } @@ -2274,11 +2477,16 @@ func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, ch // NodeServiceQuery is a type used to query the checks table. type NodeServiceQuery struct { - Node string - Service string + Node string + Service string + PeerName string acl.EnterpriseMeta } +func (q NodeServiceQuery) PeerOrEmpty() string { + return q.PeerName +} + // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer // receiver for this method. Remove once that is fixed. func (q NodeServiceQuery) NamespaceOrDefault() string { @@ -2293,13 +2501,19 @@ func (q NodeServiceQuery) PartitionOrDefault() string { // deleteCheckTxn is the inner method used to call a health // check deletion within an existing transaction. -func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) error { +func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta, peerName string) error { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } // Try to retrieve the existing health check. - hc, err := tx.First(tableChecks, indexID, NodeCheckQuery{EnterpriseMeta: *entMeta, Node: node, CheckID: string(checkID)}) + hc, err := tx.First(tableChecks, indexID, + NodeCheckQuery{ + EnterpriseMeta: *entMeta, + Node: node, + CheckID: string(checkID), + PeerName: peerName, + }) if err != nil { return fmt.Errorf("check lookup failed: %s", err) } @@ -2310,24 +2524,29 @@ func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID typ if existing != nil { // When no service is linked to this service, update all services of node if existing.ServiceID != "" { - if err := catalogUpdateServiceIndexes(tx, existing.ServiceName, idx, &existing.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceIndexes(tx, idx, existing.ServiceName, &existing.EnterpriseMeta, existing.PeerName); err != nil { return err } - - svcRaw, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: existing.EnterpriseMeta, Node: existing.Node, Service: existing.ServiceID}) + svcRaw, err := tx.First(tableServices, indexID, + NodeServiceQuery{ + EnterpriseMeta: existing.EnterpriseMeta, + Node: existing.Node, + Service: existing.ServiceID, + PeerName: existing.PeerName, + }) if err != nil { return fmt.Errorf("failed retrieving service from state store: %v", err) } svc := svcRaw.(*structs.ServiceNode) - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } } else { - if err := updateAllServiceIndexesOfNode(tx, idx, existing.Node, &existing.EnterpriseMeta); err != nil { + if err := updateAllServiceIndexesOfNode(tx, idx, existing.Node, &existing.EnterpriseMeta, existing.PeerName); err != nil { return fmt.Errorf("Failed to update services linked to deleted healthcheck: %s", err) } - if err := catalogUpdateServicesIndexes(tx, idx, entMeta); err != nil { + if err := catalogUpdateServicesIndexes(tx, idx, entMeta, existing.PeerName); err != nil { return err } } @@ -2338,20 +2557,22 @@ func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID typ return fmt.Errorf("failed removing check: %s", err) } - if err := catalogUpdateCheckIndexes(tx, idx, entMeta); err != nil { + if err := catalogUpdateCheckIndexes(tx, idx, entMeta, peerName); err != nil { return err } - // Delete any sessions for this check. - sessions, err := checkSessionsTxn(tx, existing) - if err != nil { - return err - } + if peerName == "" { + // Delete any sessions for this check. + sessions, err := checkSessionsTxn(tx, existing) + if err != nil { + return err + } - // Do the delete in a separate loop so we don't trash the iterator. - for _, sess := range sessions { - if err := s.deleteSessionTxn(tx, idx, sess.Session, &sess.EnterpriseMeta); err != nil { - return fmt.Errorf("failed deleting session: %s", err) + // Do the delete in a separate loop so we don't trash the iterator. + for _, sess := range sessions { + if err := s.deleteSessionTxn(tx, idx, sess.Session, &sess.EnterpriseMeta); err != nil { + return fmt.Errorf("failed deleting session: %s", err) + } } } @@ -2359,12 +2580,12 @@ func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID typ } // CombinedCheckServiceNodes is used to query all nodes and checks for both typical and Connect endpoints of a service -func (s *Store) CombinedCheckServiceNodes(ws memdb.WatchSet, service structs.ServiceName) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) CombinedCheckServiceNodes(ws memdb.WatchSet, service structs.ServiceName, peerName string) (uint64, structs.CheckServiceNodes, error) { var ( resp structs.CheckServiceNodes maxIdx uint64 ) - idx, csn, err := s.CheckServiceNodes(ws, service.Name, &service.EnterpriseMeta) + idx, csn, err := s.CheckServiceNodes(ws, service.Name, &service.EnterpriseMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed to get downstream nodes for %q: %v", service, err) } @@ -2372,8 +2593,7 @@ func (s *Store) CombinedCheckServiceNodes(ws memdb.WatchSet, service structs.Ser maxIdx = idx } resp = append(resp, csn...) - - idx, csn, err = s.CheckConnectServiceNodes(ws, service.Name, &service.EnterpriseMeta) + idx, csn, err = s.CheckConnectServiceNodes(ws, service.Name, &service.EnterpriseMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed to get downstream connect nodes for %q: %v", service, err) } @@ -2386,14 +2606,14 @@ func (s *Store) CombinedCheckServiceNodes(ws memdb.WatchSet, service structs.Ser } // CheckServiceNodes is used to query all nodes and checks for a given service. -func (s *Store) CheckServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { - return s.checkServiceNodes(ws, serviceName, false, entMeta) +func (s *Store) CheckServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { + return s.checkServiceNodes(ws, serviceName, false, entMeta, peerName) } // CheckConnectServiceNodes is used to query all nodes and checks for Connect // compatible endpoints for a given service. -func (s *Store) CheckConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { - return s.checkServiceNodes(ws, serviceName, true, entMeta) +func (s *Store) CheckConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { + return s.checkServiceNodes(ws, serviceName, true, entMeta, peerName) } // CheckIngressServiceNodes is used to query all nodes and checks for ingress @@ -2402,7 +2622,7 @@ func (s *Store) CheckIngressServiceNodes(ws memdb.WatchSet, serviceName string, tx := s.db.Txn(false) defer tx.Abort() - maxIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindIngressGateway, entMeta) + maxIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindIngressGateway, entMeta, structs.DefaultPeerKeyword) if err != nil { return 0, nil, fmt.Errorf("failed gateway nodes lookup: %v", err) } @@ -2424,7 +2644,7 @@ func (s *Store) CheckIngressServiceNodes(ws memdb.WatchSet, serviceName string, var results structs.CheckServiceNodes for sn := range names { - idx, n, err := checkServiceNodesTxn(tx, ws, sn.Name, false, &sn.EnterpriseMeta) + idx, n, err := checkServiceNodesTxn(tx, ws, sn.Name, false, &sn.EnterpriseMeta, structs.DefaultPeerKeyword) if err != nil { return 0, nil, err } @@ -2434,14 +2654,14 @@ func (s *Store) CheckIngressServiceNodes(ws memdb.WatchSet, serviceName string, return maxIdx, results, nil } -func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() - return checkServiceNodesTxn(tx, ws, serviceName, connect, entMeta) + return checkServiceNodesTxn(tx, ws, serviceName, connect, entMeta, peerName) } -func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, connect bool, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, connect bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { index := indexService if connect { index = indexConnect @@ -2455,6 +2675,7 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con q := Query{ Value: serviceName, EnterpriseMeta: *entMeta, + PeerName: peerName, } iter, err := tx.Get(tableServices, index, q) if err != nil { @@ -2489,9 +2710,10 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con // We append rather than replace since it allows users to migrate a service // to the mesh with a mix of sidecars and gateways until all its instances have a sidecar. var idx uint64 - if connect { + if connect && peerName == "" { // Look up gateway nodes associated with the service - gwIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, entMeta) + // TODO(peering): we'll have to do something here + gwIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, entMeta, structs.DefaultPeerKeyword) if err != nil { return 0, nil, fmt.Errorf("failed gateway nodes lookup: %v", err) } @@ -2523,7 +2745,7 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con // We know service values should exist since the serviceNames map is only // populated if there is at least one result above. so serviceExists arg // below is always true. - svcIdx, svcCh := maxIndexAndWatchChForService(tx, n.Name, true, true, &n.EnterpriseMeta) + svcIdx, svcCh := maxIndexAndWatchChForService(tx, n.Name, true, true, &n.EnterpriseMeta, peerName) // Take the max index represented idx = lib.MaxUint64(idx, svcIdx) if svcCh != nil { @@ -2544,7 +2766,7 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con // use target serviceName here but it actually doesn't matter. No chan will // be returned as we can't use the optimization in this case (and don't need // to as there is only one chan to watch anyway). - svcIdx, _ := maxIndexAndWatchChForService(tx, serviceName, false, true, entMeta) + svcIdx, _ := maxIndexAndWatchChForService(tx, serviceName, false, true, entMeta, peerName) idx = lib.MaxUint64(idx, svcIdx) } @@ -2570,12 +2792,12 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con ws.Add(iter.WatchCh()) } - return parseCheckServiceNodes(tx, fallbackWS, idx, results, entMeta, err) + return parseCheckServiceNodes(tx, fallbackWS, idx, results, entMeta, peerName, err) } // CheckServiceTagNodes is used to query all nodes and checks for a given // service, filtering out services that don't contain the given tag. -func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags []string, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags []string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2584,7 +2806,7 @@ func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: serviceName, EnterpriseMeta: *entMeta} + q := Query{Value: serviceName, EnterpriseMeta: *entMeta, PeerName: peerName} iter, err := tx.Get(tableServices, indexService, q) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) @@ -2603,8 +2825,8 @@ func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags } // Get the table index. - idx := maxIndexForService(tx, serviceName, serviceExists, true, entMeta) - return parseCheckServiceNodes(tx, ws, idx, results, entMeta, err) + idx := maxIndexForService(tx, serviceName, serviceExists, true, entMeta, peerName) + return parseCheckServiceNodes(tx, ws, idx, results, entMeta, peerName, err) } // GatewayServices is used to query all services associated with a gateway @@ -2650,23 +2872,37 @@ func (s *Store) ServiceNamesOfKind(ws memdb.WatchSet, kind structs.ServiceKind) tx := s.db.Txn(false) defer tx.Abort() - return serviceNamesOfKindTxn(tx, ws, kind) + wildcardMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier) + return serviceNamesOfKindTxn(tx, ws, kind, *wildcardMeta) } -func serviceNamesOfKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind) (uint64, []*KindServiceName, error) { - var names []*KindServiceName - iter, err := tx.Get(tableKindServiceNames, indexKindOnly, kind) +func serviceNamesOfKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta acl.EnterpriseMeta) (uint64, []*KindServiceName, error) { + iter, err := tx.Get(tableKindServiceNames, indexKind, Query{Value: string(kind), EnterpriseMeta: entMeta}) if err != nil { return 0, nil, err } + + // TODO(peering): Maybe delete this watch and rely on the max idx tables below, to avoid waking up on unrelated changes ws.Add(iter.WatchCh()) - idx := kindServiceNamesMaxIndex(tx, ws, kind) + var names []*KindServiceName for name := iter.Next(); name != nil; name = iter.Next() { ksn := name.(*KindServiceName) names = append(names, ksn) } + var idx uint64 + switch { + case entMeta.PartitionOrDefault() == structs.WildcardSpecifier: + idx = kindServiceNamesMaxIndex(tx, ws, kind.Normalized()) + + case entMeta.NamespaceOrDefault() == structs.WildcardSpecifier: + idx = kindServiceNamesMaxIndex(tx, ws, partitionedIndexEntryName(kind.Normalized(), entMeta.PartitionOrDefault())) + + default: + idx = kindServiceNamesMaxIndex(tx, ws, partitionedAndNamespacedIndexEntryName(kind.Normalized(), &entMeta)) + + } return idx, names, nil } @@ -2681,7 +2917,9 @@ func parseCheckServiceNodes( tx ReadTxn, ws memdb.WatchSet, idx uint64, services structs.ServiceNodes, entMeta *acl.EnterpriseMeta, - err error) (uint64, structs.CheckServiceNodes, error) { + peerName string, + err error, +) (uint64, structs.CheckServiceNodes, error) { if err != nil { return 0, nil, err } @@ -2692,9 +2930,16 @@ func parseCheckServiceNodes( return idx, nil, nil } + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + // We don't want to track an unlimited number of nodes, so we pull a // top-level watch to use as a fallback. - allNodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) + allNodes, err := tx.Get(tableNodes, indexID+"_prefix", Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) } @@ -2703,7 +2948,10 @@ func parseCheckServiceNodes( // We need a similar fallback for checks. Since services need the // status of node + service-specific checks, we pull in a top-level // watch over all checks. - allChecks, err := tx.Get(tableChecks, indexID+"_prefix", entMeta) + allChecks, err := tx.Get(tableChecks, indexID+"_prefix", Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed checks lookup: %s", err) } @@ -2715,6 +2963,7 @@ func parseCheckServiceNodes( watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{ Value: sn.Node, EnterpriseMeta: sn.EnterpriseMeta, + PeerName: sn.PeerName, }) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) @@ -2733,6 +2982,7 @@ func parseCheckServiceNodes( Node: sn.Node, Service: "", // node checks have no service EnterpriseMeta: *sn.EnterpriseMeta.WithWildcardNamespace(), + PeerName: sn.PeerName, } iter, err := tx.Get(tableChecks, indexNodeService, q) if err != nil { @@ -2748,6 +2998,7 @@ func parseCheckServiceNodes( Node: sn.Node, Service: sn.ServiceID, EnterpriseMeta: sn.EnterpriseMeta, + PeerName: sn.PeerName, } iter, err = tx.Get(tableChecks, indexNodeService, q) if err != nil { @@ -2771,7 +3022,7 @@ func parseCheckServiceNodes( // NodeInfo is used to generate a dump of a single node. The dump includes // all services and checks which are registered against the node. -func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *acl.EnterpriseMeta) (uint64, structs.NodeDump, error) { +func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.NodeDump, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2780,55 +3031,72 @@ func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *acl.Enterprise } // Get the table index. - idx := catalogMaxIndex(tx, entMeta, true) + idx := catalogMaxIndex(tx, entMeta, peerName, true) // Query the node by the passed node nodes, err := tx.Get(tableNodes, indexID, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) } ws.Add(nodes.WatchCh()) - return parseNodes(tx, ws, idx, nodes, entMeta) + return parseNodes(tx, ws, idx, nodes, entMeta, peerName) } // NodeDump is used to generate a dump of all nodes. This call is expensive // as it has to query every node, service, and check. The response can also // be quite large since there is currently no filtering applied. -func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.NodeDump, error) { +func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.NodeDump, error) { tx := s.db.Txn(false) defer tx.Abort() + if entMeta == nil { + entMeta = structs.NodeEnterpriseMetaInDefaultPartition() + } + // Get the table index. - idx := catalogMaxIndex(tx, entMeta, true) + idx := catalogMaxIndex(tx, entMeta, peerName, true) // Fetch all of the registered nodes - nodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + nodes, err := tx.Get(tableNodes, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) } ws.Add(nodes.WatchCh()) - return parseNodes(tx, ws, idx, nodes, entMeta) + return parseNodes(tx, ws, idx, nodes, entMeta, peerName) } -func (s *Store) ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() if useKind { - return serviceDumpKindTxn(tx, ws, kind, entMeta) + return serviceDumpKindTxn(tx, ws, kind, entMeta, peerName) } else { - return serviceDumpAllTxn(tx, ws, entMeta) + return serviceDumpAllTxn(tx, ws, entMeta, peerName) } } -func serviceDumpAllTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func serviceDumpAllTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { // Get the table index - idx := catalogMaxIndexWatch(tx, ws, entMeta, true) + idx := catalogMaxIndexWatch(tx, ws, entMeta, "", true) - services, err := tx.Get(tableServices, indexID+"_prefix", entMeta) + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + services, err := tx.Get(tableServices, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -2839,19 +3107,23 @@ func serviceDumpAllTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMet results = append(results, sn) } - return parseCheckServiceNodes(tx, nil, idx, results, entMeta, err) + return parseCheckServiceNodes(tx, nil, idx, results, entMeta, peerName, err) } -func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { // unlike when we are dumping all services here we only need to watch the kind specific index entry for changing (or nodes, checks) // updating any services, nodes or checks will bump the appropriate service kind index so there is no need to watch any of the individual // entries - idx := catalogServiceKindMaxIndex(tx, ws, kind, entMeta) + idx := catalogServiceKindMaxIndex(tx, ws, kind, entMeta, peerName) if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: string(kind), EnterpriseMeta: *entMeta} + q := Query{ + Value: string(kind), + EnterpriseMeta: *entMeta, + PeerName: peerName, + } services, err := tx.Get(tableServices, indexKind, q) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) @@ -2863,14 +3135,15 @@ func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, results = append(results, sn) } - return parseCheckServiceNodes(tx, nil, idx, results, entMeta, err) + return parseCheckServiceNodes(tx, nil, idx, results, entMeta, peerName, err) } // parseNodes takes an iterator over a set of nodes and returns a struct // containing the nodes along with all of their associated services // and/or health checks. +// TODO(peering): support parsing by peerName func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, - iter memdb.ResultIterator, entMeta *acl.EnterpriseMeta) (uint64, structs.NodeDump, error) { + iter memdb.ResultIterator, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.NodeDump, error) { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -2878,7 +3151,11 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, // We don't want to track an unlimited number of services, so we pull a // top-level watch to use as a fallback. - allServices, err := tx.Get(tableServices, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + allServices, err := tx.Get(tableServices, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed services lookup: %s", err) } @@ -2900,13 +3177,14 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, ID: node.ID, Node: node.Node, Partition: node.Partition, + PeerName: node.PeerName, Address: node.Address, TaggedAddresses: node.TaggedAddresses, Meta: node.Meta, } // Query the node services - services, err := catalogServiceListByNode(tx, node.Node, entMeta, true) + services, err := catalogServiceListByNode(tx, node.Node, entMeta, node.PeerName, true) if err != nil { return 0, nil, fmt.Errorf("failed services lookup: %s", err) } @@ -2917,7 +3195,11 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, } // Query the service level checks - checks, err := catalogListChecksByNode(tx, Query{Value: node.Node, EnterpriseMeta: *entMeta}) + checks, err := catalogListChecksByNode(tx, Query{ + Value: node.Node, + EnterpriseMeta: *entMeta, + PeerName: node.PeerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) } @@ -2935,6 +3217,7 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, // checkSessionsTxn returns the IDs of all sessions associated with a health check func checkSessionsTxn(tx ReadTxn, hc *structs.HealthCheck) ([]*sessionCheck, error) { + // TODO(peering): what are implications for imported health checks? mappings, err := tx.Get(tableSessionChecks, indexNodeCheck, MultiQuery{Value: []string{hc.Node, string(hc.CheckID)}, EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(hc.PartitionOrDefault())}) if err != nil { @@ -3392,7 +3675,11 @@ func (s *Store) collectGatewayServices(tx ReadTxn, ws memdb.WatchSet, iter memdb // TODO(ingress): How to handle index rolling back when a config entry is // deleted that references a service? // We might need something like the service_last_extinction index? -func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { + if peerName != "" { + return 0, nil, nil + } + // Look up gateway name associated with the service gws, err := tx.Get(tableGatewayServices, indexService, structs.NewServiceName(service, entMeta)) if err != nil { @@ -3434,7 +3721,7 @@ func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind str } // This prevents the index from sliding back if case all instances of the gateway service are deregistered - svcIdx := maxIndexForService(tx, mapping.Gateway.Name, exists, false, &mapping.Gateway.EnterpriseMeta) + svcIdx := maxIndexForService(tx, mapping.Gateway.Name, exists, false, &mapping.Gateway.EnterpriseMeta, structs.DefaultPeerKeyword) maxIdx = lib.MaxUint64(maxIdx, svcIdx) // Ensure that blocking queries wake up if the gateway-service mapping exists, but the gateway does not exist yet @@ -3530,7 +3817,11 @@ func (s *Store) ServiceTopology( if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: service, EnterpriseMeta: *entMeta} + q := Query{ + Value: service, + EnterpriseMeta: *entMeta, + PeerName: structs.TODOPeerKeyword, + } idx, proxies, err := serviceNodesTxn(tx, ws, indexConnect, q) if err != nil { @@ -3645,7 +3936,7 @@ func (s *Store) ServiceTopology( upstreamDecisions[un.String()] = decision } - idx, unfilteredUpstreams, err := s.combinedServiceNodesTxn(tx, ws, upstreamNames) + idx, unfilteredUpstreams, err := s.combinedServiceNodesTxn(tx, ws, upstreamNames, structs.DefaultPeerKeyword) if err != nil { return 0, nil, fmt.Errorf("failed to get upstreams for %q: %v", sn.String(), err) } @@ -3763,7 +4054,7 @@ func (s *Store) ServiceTopology( downstreamDecisions[dn.String()] = decision } - idx, unfilteredDownstreams, err := s.combinedServiceNodesTxn(tx, ws, downstreamNames) + idx, unfilteredDownstreams, err := s.combinedServiceNodesTxn(tx, ws, downstreamNames, structs.DefaultPeerKeyword) if err != nil { return 0, nil, fmt.Errorf("failed to get downstreams for %q: %v", sn.String(), err) @@ -3812,14 +4103,14 @@ func (s *Store) ServiceTopology( // combinedServiceNodesTxn returns typical and connect endpoints for a list of services. // This enabled aggregating checks statuses across both. -func (s *Store) combinedServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, names []structs.ServiceName) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) combinedServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, names []structs.ServiceName, peerName string) (uint64, structs.CheckServiceNodes, error) { var ( maxIdx uint64 resp structs.CheckServiceNodes ) for _, u := range names { // Collect typical then connect instances - idx, csn, err := checkServiceNodesTxn(tx, ws, u.Name, false, &u.EnterpriseMeta) + idx, csn, err := checkServiceNodesTxn(tx, ws, u.Name, false, &u.EnterpriseMeta, peerName) if err != nil { return 0, nil, err } @@ -3828,7 +4119,7 @@ func (s *Store) combinedServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, names []s } resp = append(resp, csn...) - idx, csn, err = checkServiceNodesTxn(tx, ws, u.Name, true, &u.EnterpriseMeta) + idx, csn, err = checkServiceNodesTxn(tx, ws, u.Name, true, &u.EnterpriseMeta, peerName) if err != nil { return 0, nil, err } @@ -3929,6 +4220,7 @@ func linkedFromRegistrationTxn(tx ReadTxn, ws memdb.WatchSet, service structs.Se // updateMeshTopology creates associations between the input service and its upstreams in the topology table func updateMeshTopology(tx WriteTxn, idx uint64, node string, svc *structs.NodeService, existing interface{}) error { + // TODO(peering): make this peering aware oldUpstreams := make(map[structs.ServiceName]bool) if e, ok := existing.(*structs.ServiceNode); ok { for _, u := range e.ServiceProxy.Upstreams { @@ -4009,6 +4301,7 @@ func updateMeshTopology(tx WriteTxn, idx uint64, node string, svc *structs.NodeS // cleanupMeshTopology removes a service from the mesh topology table // This is only safe to call when there are no more known instances of this proxy func cleanupMeshTopology(tx WriteTxn, idx uint64, service *structs.ServiceNode) error { + // TODO(peering): make this peering aware? if service.ServiceKind != structs.ServiceKindConnectProxy { return nil } @@ -4116,6 +4409,7 @@ func truncateGatewayServiceTopologyMappings(tx WriteTxn, idx uint64, gateway str } func upsertKindServiceName(tx WriteTxn, idx uint64, kind structs.ServiceKind, name structs.ServiceName) error { + // TODO(peering): make this peering aware q := KindServiceNameQuery{Name: name.Name, Kind: kind, EnterpriseMeta: name.EnterpriseMeta} existing, err := tx.First(tableKindServiceNames, indexID, q) if err != nil { @@ -4138,10 +4432,7 @@ func upsertKindServiceName(tx WriteTxn, idx uint64, kind structs.ServiceKind, na if err := tx.Insert(tableKindServiceNames, &ksn); err != nil { return fmt.Errorf("failed inserting %s/%s into %s: %s", kind, name.String(), tableKindServiceNames, err) } - if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind)); err != nil { - return fmt.Errorf("failed updating %s index: %v", tableKindServiceNames, err) - } - return nil + return updateKindServiceNamesIndex(tx, idx, kind, name.EnterpriseMeta) } func cleanupKindServiceName(tx WriteTxn, idx uint64, name structs.ServiceName, kind structs.ServiceKind) error { @@ -4149,15 +4440,12 @@ func cleanupKindServiceName(tx WriteTxn, idx uint64, name structs.ServiceName, k if _, err := tx.DeleteAll(tableKindServiceNames, indexID, q); err != nil { return fmt.Errorf("failed to delete %s from %s: %s", name, tableKindServiceNames, err) } - - if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind)); err != nil { - return fmt.Errorf("failed updating %s index: %v", tableKindServiceNames, err) - } - return nil + return updateKindServiceNamesIndex(tx, idx, kind, name.EnterpriseMeta) } // CatalogDump returns all the contents of the node, service and check tables. // In Enterprise, this will return entries across all partitions and namespaces. +// TODO(peering) make this peering aware? func (s *Store) CatalogDump() (*structs.CatalogContents, error) { tx := s.db.Txn(false) contents := &structs.CatalogContents{} diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index 13c5c4ba0..ba80a2c74 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" ) @@ -17,33 +18,13 @@ import ( type EventSubjectService struct { Key string EnterpriseMeta acl.EnterpriseMeta + PeerName string overrideKey string overrideNamespace string overridePartition string } -// String satisfies the stream.Subject interface. -func (s EventSubjectService) String() string { - partition := s.EnterpriseMeta.PartitionOrDefault() - if v := s.overridePartition; v != "" { - partition = strings.ToLower(v) - } - - namespace := s.EnterpriseMeta.NamespaceOrDefault() - if v := s.overrideNamespace; v != "" { - namespace = strings.ToLower(v) - } - - key := s.Key - if v := s.overrideKey; v != "" { - key = v - } - key = strings.ToLower(key) - - return partition + "/" + namespace + "/" + key -} - // EventPayloadCheckServiceNode is used as the Payload for a stream.Event to // indicates changes to a CheckServiceNode for service health. // @@ -62,6 +43,7 @@ type EventPayloadCheckServiceNode struct { } func (e EventPayloadCheckServiceNode) HasReadPermission(authz acl.Authorizer) bool { + // TODO(peering): figure out how authz works for peered data return e.Value.CanRead(authz) == acl.Allow } @@ -76,6 +58,31 @@ func (e EventPayloadCheckServiceNode) Subject() stream.Subject { } } +func (e EventPayloadCheckServiceNode) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + return &pbsubscribe.Event{ + Index: idx, + Payload: &pbsubscribe.Event_ServiceHealth{ + ServiceHealth: &pbsubscribe.ServiceHealthUpdate{ + Op: e.Op, + CheckServiceNode: pbservice.NewCheckServiceNodeFromStructs(e.Value), + }, + }, + } +} + +func PBToStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta acl.EnterpriseMeta) *stream.SubscribeRequest { + return &stream.SubscribeRequest{ + Topic: req.Topic, + Subject: EventSubjectService{ + Key: req.Key, + EnterpriseMeta: entMeta, + PeerName: req.PeerName, + }, + Token: req.Token, + Index: req.Index, + } +} + // serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot // of stream.Events that describe the current state of a service health query. func (s *Store) ServiceHealthSnapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) { @@ -89,7 +96,7 @@ func (s *Store) ServiceHealthSnapshot(req stream.SubscribeRequest, buf stream.Sn return 0, fmt.Errorf("expected SubscribeRequest.Subject to be a: state.EventSubjectService, was a: %T", req.Subject) } - idx, nodes, err := checkServiceNodesTxn(tx, nil, subject.Key, connect, &subject.EnterpriseMeta) + idx, nodes, err := checkServiceNodesTxn(tx, nil, subject.Key, connect, &subject.EnterpriseMeta, subject.PeerName) if err != nil { return 0, err } @@ -127,6 +134,7 @@ type nodeServiceTuple struct { Node string ServiceID string EntMeta acl.EnterpriseMeta + PeerName string } func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTuple { @@ -134,6 +142,7 @@ func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTupl Node: strings.ToLower(sn.Node), ServiceID: sn.ServiceID, EntMeta: sn.EnterpriseMeta, + PeerName: sn.PeerName, } } @@ -142,6 +151,7 @@ func newNodeServiceTupleFromServiceHealthCheck(hc *structs.HealthCheck) nodeServ Node: strings.ToLower(hc.Node), ServiceID: hc.ServiceID, EntMeta: hc.EnterpriseMeta, + PeerName: hc.PeerName, } } @@ -153,6 +163,7 @@ type serviceChange struct { type nodeTuple struct { Node string Partition string + PeerName string } var serviceChangeIndirect = serviceChange{changeType: changeIndirect} @@ -286,7 +297,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event } // Rebuild events for all services on this node es, err := newServiceHealthEventsForNode(tx, changes.Index, node.Node, - structs.WildcardEnterpriseMetaInPartition(node.Partition)) + structs.WildcardEnterpriseMetaInPartition(node.Partition), node.PeerName) if err != nil { return nil, err } @@ -342,6 +353,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event q := Query{ Value: gs.Gateway.Name, EnterpriseMeta: gatewayName.EnterpriseMeta, + PeerName: structs.TODOPeerKeyword, } _, nodes, err := serviceNodesTxn(tx, nil, indexService, q) if err != nil { @@ -504,6 +516,8 @@ func connectEventsByServiceKind(tx ReadTxn, origEvent stream.Event) ([]stream.Ev case structs.ServiceKindTerminatingGateway: var result []stream.Event + // TODO(peering): handle terminating gateways somehow + sn := structs.ServiceName{ Name: node.Service.Service, EnterpriseMeta: node.Service.EnterpriseMeta, @@ -551,16 +565,17 @@ func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNod // given node. This mirrors some of the the logic in the oddly-named // parseCheckServiceNodes but is more efficient since we know they are all on // the same node. -func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta *acl.EnterpriseMeta) ([]stream.Event, error) { +func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta *acl.EnterpriseMeta, peerName string) ([]stream.Event, error) { services, err := tx.Get(tableServices, indexNode, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return nil, err } - n, checksFunc, err := getNodeAndChecks(tx, node, entMeta) + n, checksFunc, err := getNodeAndChecks(tx, node, entMeta, peerName) if err != nil { return nil, err } @@ -578,11 +593,12 @@ func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta // getNodeAndNodeChecks returns a the node structure and a function that returns // the full list of checks for a specific service on that node. -func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta) (*structs.Node, serviceChecksFunc, error) { +func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta, peerName string) (*structs.Node, serviceChecksFunc, error) { // Fetch the node nodeRaw, err := tx.First(tableNodes, indexID, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return nil, nil, err @@ -595,6 +611,7 @@ func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta) (*st iter, err := tx.Get(tableChecks, indexNode, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return nil, nil, err @@ -629,7 +646,7 @@ func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta) (*st type serviceChecksFunc func(serviceID string) structs.HealthChecks func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTuple) (stream.Event, error) { - n, checksFunc, err := getNodeAndChecks(tx, tuple.Node, &tuple.EntMeta) + n, checksFunc, err := getNodeAndChecks(tx, tuple.Node, &tuple.EntMeta, tuple.PeerName) if err != nil { return stream.Event{}, err } @@ -638,6 +655,7 @@ func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTu EnterpriseMeta: tuple.EntMeta, Node: tuple.Node, Service: tuple.ServiceID, + PeerName: tuple.PeerName, }) if err != nil { return stream.Event{}, err @@ -690,6 +708,7 @@ func newServiceHealthEventDeregister(idx uint64, sn *structs.ServiceNode) stream Node: &structs.Node{ Node: sn.Node, Partition: entMeta.PartitionOrEmpty(), + PeerName: sn.PeerName, }, Service: sn.ToNodeService(), } diff --git a/agent/consul/state/catalog_events_oss.go b/agent/consul/state/catalog_events_oss.go index d088a6cfd..e59636d31 100644 --- a/agent/consul/state/catalog_events_oss.go +++ b/agent/consul/state/catalog_events_oss.go @@ -13,6 +13,7 @@ func (nst nodeServiceTuple) nodeTuple() nodeTuple { return nodeTuple{ Node: strings.ToLower(nst.Node), Partition: "", + PeerName: nst.PeerName, } } @@ -20,6 +21,7 @@ func newNodeTupleFromNode(node *structs.Node) nodeTuple { return nodeTuple{ Node: strings.ToLower(node.Node), Partition: "", + PeerName: node.PeerName, } } @@ -27,5 +29,20 @@ func newNodeTupleFromHealthCheck(hc *structs.HealthCheck) nodeTuple { return nodeTuple{ Node: strings.ToLower(hc.Node), Partition: "", + PeerName: hc.PeerName, } } + +// String satisfies the stream.Subject interface. +func (s EventSubjectService) String() string { + key := s.Key + if v := s.overrideKey; v != "" { + key = v + } + key = strings.ToLower(key) + + if s.PeerName == "" { + return key + } + return s.PeerName + "/" + key +} diff --git a/agent/consul/state/catalog_events_oss_test.go b/agent/consul/state/catalog_events_oss_test.go new file mode 100644 index 000000000..ace7cfe71 --- /dev/null +++ b/agent/consul/state/catalog_events_oss_test.go @@ -0,0 +1,45 @@ +//go:build !consulent +// +build !consulent + +package state + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/structs" +) + +func TestEventPayloadCheckServiceNode_Subject_OSS(t *testing.T) { + for desc, tc := range map[string]struct { + evt EventPayloadCheckServiceNode + sub string + }{ + "mixed casing": { + EventPayloadCheckServiceNode{ + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + Service: "FoO", + }, + }, + }, + "foo", + }, + "override key": { + EventPayloadCheckServiceNode{ + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + Service: "foo", + }, + }, + overrideKey: "bar", + }, + "bar", + }, + } { + t.Run(desc, func(t *testing.T) { + require.Equal(t, tc.sub, tc.evt.Subject().String()) + }) + } +} diff --git a/agent/consul/state/catalog_events_test.go b/agent/consul/state/catalog_events_test.go index 1f6f6d885..0b455543e 100644 --- a/agent/consul/state/catalog_events_test.go +++ b/agent/consul/state/catalog_events_test.go @@ -16,49 +16,6 @@ import ( "github.com/hashicorp/consul/types" ) -func TestEventPayloadCheckServiceNode_Subject(t *testing.T) { - for desc, tc := range map[string]struct { - evt EventPayloadCheckServiceNode - sub string - }{ - "default partition and namespace": { - EventPayloadCheckServiceNode{ - Value: &structs.CheckServiceNode{ - Service: &structs.NodeService{ - Service: "foo", - }, - }, - }, - "default/default/foo", - }, - "mixed casing": { - EventPayloadCheckServiceNode{ - Value: &structs.CheckServiceNode{ - Service: &structs.NodeService{ - Service: "FoO", - }, - }, - }, - "default/default/foo", - }, - "override key": { - EventPayloadCheckServiceNode{ - Value: &structs.CheckServiceNode{ - Service: &structs.NodeService{ - Service: "foo", - }, - }, - overrideKey: "bar", - }, - "default/default/bar", - }, - } { - t.Run(desc, func(t *testing.T) { - require.Equal(t, tc.sub, tc.evt.Subject().String()) - }) - } -} - func TestServiceHealthSnapshot(t *testing.T) { store := NewStateStore(nil) @@ -307,7 +264,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { return nil }, Mutate: func(s *Store, tx *txn) error { - return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil) + return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil, "") }, WantEvents: []stream.Event{ // Should only publish deregistration for that service @@ -327,7 +284,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { return nil }, Mutate: func(s *Store, tx *txn) error { - return s.deleteNodeTxn(tx, tx.Index, "node1", nil) + return s.deleteNodeTxn(tx, tx.Index, "node1", nil, "") }, WantEvents: []stream.Event{ // Should publish deregistration events for all services @@ -380,7 +337,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { return s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "web", regConnectNative), false) }, Mutate: func(s *Store, tx *txn) error { - return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil) + return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil, "") }, WantEvents: []stream.Event{ // We should see both a regular service dereg event and a connect one @@ -444,7 +401,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { }, Mutate: func(s *Store, tx *txn) error { // Delete only the sidecar - return s.deleteServiceTxn(tx, tx.Index, "node1", "web_sidecar_proxy", nil) + return s.deleteServiceTxn(tx, tx.Index, "node1", "web_sidecar_proxy", nil, "") }, WantEvents: []stream.Event{ // We should see both a regular service dereg event and a connect one @@ -910,7 +867,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { }, Mutate: func(s *Store, tx *txn) error { // Delete only the node-level check - if err := s.deleteCheckTxn(tx, tx.Index, "node1", "serf-health", nil); err != nil { + if err := s.deleteCheckTxn(tx, tx.Index, "node1", "serf-health", nil, ""); err != nil { return err } return nil @@ -964,11 +921,11 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { }, Mutate: func(s *Store, tx *txn) error { // Delete the service-level check for the main service - if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web", nil); err != nil { + if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web", nil, ""); err != nil { return err } // Also delete for a proxy - if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web_sidecar_proxy", nil); err != nil { + if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web_sidecar_proxy", nil, ""); err != nil { return err } return nil @@ -1029,10 +986,10 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { // In one transaction the operator moves the web service and it's // sidecar from node2 back to node1 and deletes them from node2 - if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web", nil); err != nil { + if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web", nil, ""); err != nil { return err } - if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web_sidecar_proxy", nil); err != nil { + if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web_sidecar_proxy", nil, ""); err != nil { return err } @@ -1544,7 +1501,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { testServiceRegistration(t, "tgate1", regTerminatingGateway), false) }, Mutate: func(s *Store, tx *txn) error { - return s.deleteServiceTxn(tx, tx.Index, "node1", "srv1", nil) + return s.deleteServiceTxn(tx, tx.Index, "node1", "srv1", nil, "") }, WantEvents: []stream.Event{ testServiceHealthDeregistrationEvent(t, "srv1"), @@ -1649,7 +1606,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { testServiceRegistration(t, "tgate1", regTerminatingGateway), false) }, Mutate: func(s *Store, tx *txn) error { - return s.deleteServiceTxn(tx, tx.Index, "node1", "tgate1", structs.DefaultEnterpriseMetaInDefaultPartition()) + return s.deleteServiceTxn(tx, tx.Index, "node1", "tgate1", structs.DefaultEnterpriseMetaInDefaultPartition(), "") }, WantEvents: []stream.Event{ testServiceHealthDeregistrationEvent(t, diff --git a/agent/consul/state/catalog_oss.go b/agent/consul/state/catalog_oss.go index 8a30d4589..b0c0c5337 100644 --- a/agent/consul/state/catalog_oss.go +++ b/agent/consul/state/catalog_oss.go @@ -15,54 +15,83 @@ import ( func withEnterpriseSchema(_ *memdb.DBSchema) {} -func serviceIndexName(name string, _ *acl.EnterpriseMeta) string { - return fmt.Sprintf("service.%s", name) +func serviceIndexName(name string, _ *acl.EnterpriseMeta, peerName string) string { + return peeredIndexEntryName(fmt.Sprintf("service.%s", name), peerName) } -func serviceKindIndexName(kind structs.ServiceKind, _ *acl.EnterpriseMeta) string { - return "service_kind." + kind.Normalized() +func serviceKindIndexName(kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerName string) string { + base := "service_kind." + kind.Normalized() + return peeredIndexEntryName(base, peerName) } -func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, entMeta *acl.EnterpriseMeta) error { +func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { // overall nodes index if err := indexUpdateMaxTxn(tx, idx, tableNodes); err != nil { return fmt.Errorf("failed updating index: %s", err) } + // peered index + if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(tableNodes, peerName)); err != nil { + return fmt.Errorf("failed updating partitioned+peered index for nodes table: %w", err) + } + return nil } -func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error { +// catalogUpdateServicesIndexes upserts the max index for the entire services table with varying levels +// of granularity (no-op if `idx` is lower than what exists for that index key): +// - all services +// - all services in a specified peer (including internal) +func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { // overall services index if err := indexUpdateMaxTxn(tx, idx, tableServices); err != nil { - return fmt.Errorf("failed updating index: %s", err) + return fmt.Errorf("failed updating index for services table: %w", err) + } + + // peered services index + if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(tableServices, peerName)); err != nil { + return fmt.Errorf("failed updating peered index for services table: %w", err) } return nil } -func catalogUpdateServiceKindIndexes(tx WriteTxn, kind structs.ServiceKind, idx uint64, _ *acl.EnterpriseMeta) error { +// catalogUpdateServiceKindIndexes upserts the max index for the ServiceKind with varying levels +// of granularity (no-op if `idx` is lower than what exists for that index key): +// - all services of ServiceKind +// - all services of ServiceKind in a specified peer (including internal) +func catalogUpdateServiceKindIndexes(tx WriteTxn, idx uint64, kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerName string) error { + base := "service_kind." + kind.Normalized() // service-kind index - if err := indexUpdateMaxTxn(tx, idx, serviceKindIndexName(kind, nil)); err != nil { - return fmt.Errorf("failed updating index: %s", err) + if err := indexUpdateMaxTxn(tx, idx, base); err != nil { + return fmt.Errorf("failed updating index for service kind: %w", err) } + // peered index + if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(base, peerName)); err != nil { + return fmt.Errorf("failed updating peered index for service kind: %w", err) + } return nil } -func catalogUpdateServiceIndexes(tx WriteTxn, serviceName string, idx uint64, _ *acl.EnterpriseMeta) error { +func catalogUpdateServiceIndexes(tx WriteTxn, idx uint64, serviceName string, _ *acl.EnterpriseMeta, peerName string) error { // per-service index - if err := indexUpdateMaxTxn(tx, idx, serviceIndexName(serviceName, nil)); err != nil { + if err := indexUpdateMaxTxn(tx, idx, serviceIndexName(serviceName, nil, peerName)); err != nil { return fmt.Errorf("failed updating index: %s", err) } return nil } -func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error { - if err := tx.Insert(tableIndex, &IndexEntry{indexServiceExtinction, idx}); err != nil { - return fmt.Errorf("failed updating missing service extinction index: %s", err) +func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { + if err := indexUpdateMaxTxn(tx, idx, indexServiceExtinction); err != nil { + return fmt.Errorf("failed updating missing service extinction index: %w", err) } + // update the peer index + if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(indexServiceExtinction, peerName)); err != nil { + return fmt.Errorf("failed updating missing service extinction peered index: %w", err) + } + return nil } @@ -75,14 +104,14 @@ func catalogInsertNode(tx WriteTxn, node *structs.Node) error { return fmt.Errorf("failed inserting node: %s", err) } - if err := catalogUpdateNodesIndexes(tx, node.ModifyIndex, node.GetEnterpriseMeta()); err != nil { + if err := catalogUpdateNodesIndexes(tx, node.ModifyIndex, node.GetEnterpriseMeta(), node.PeerName); err != nil { return err } // Update the node's service indexes as the node information is included // in health queries and we would otherwise miss node updates in some cases // for those queries. - if err := updateAllServiceIndexesOfNode(tx, node.ModifyIndex, node.Node, node.GetEnterpriseMeta()); err != nil { + if err := updateAllServiceIndexesOfNode(tx, node.ModifyIndex, node.Node, node.GetEnterpriseMeta(), node.PeerName); err != nil { return fmt.Errorf("failed updating index: %s", err) } @@ -95,73 +124,95 @@ func catalogInsertService(tx WriteTxn, svc *structs.ServiceNode) error { return fmt.Errorf("failed inserting service: %s", err) } - if err := catalogUpdateServicesIndexes(tx, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServicesIndexes(tx, svc.ModifyIndex, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } - if err := catalogUpdateServiceIndexes(tx, svc.ServiceName, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceIndexes(tx, svc.ModifyIndex, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceKindIndexes(tx, svc.ModifyIndex, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } return nil } -func catalogNodesMaxIndex(tx ReadTxn, entMeta *acl.EnterpriseMeta) uint64 { - return maxIndexTxn(tx, tableNodes) +func catalogNodesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 { + return maxIndexTxn(tx, peeredIndexEntryName(tableNodes, peerName)) } -func catalogServicesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta) uint64 { - return maxIndexTxn(tx, tableServices) +func catalogServicesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 { + return maxIndexTxn(tx, peeredIndexEntryName(tableServices, peerName)) } -func catalogServiceMaxIndex(tx ReadTxn, serviceName string, _ *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) { - return tx.FirstWatch(tableIndex, "id", serviceIndexName(serviceName, nil)) +func catalogServiceMaxIndex(tx ReadTxn, serviceName string, _ *acl.EnterpriseMeta, peerName string) (<-chan struct{}, interface{}, error) { + return tx.FirstWatch(tableIndex, indexID, serviceIndexName(serviceName, nil, peerName)) } -func catalogServiceKindMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta) uint64 { - return maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind, nil)) +func catalogServiceKindMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerName string) uint64 { + return maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind, nil, peerName)) } -func catalogServiceListNoWildcard(tx ReadTxn, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { - return tx.Get(tableServices, indexID) -} - -func catalogServiceListByNode(tx ReadTxn, node string, _ *acl.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) { - return tx.Get(tableServices, indexNode, Query{Value: node}) -} - -func catalogServiceLastExtinctionIndex(tx ReadTxn, _ *acl.EnterpriseMeta) (interface{}, error) { - return tx.First(tableIndex, "id", indexServiceExtinction) -} - -func catalogMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, checks bool) uint64 { - if checks { - return maxIndexTxn(tx, tableNodes, tableServices, tableChecks) +func catalogServiceListNoWildcard(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) (memdb.ResultIterator, error) { + q := Query{ + PeerName: peerName, } - return maxIndexTxn(tx, tableNodes, tableServices) + return tx.Get(tableServices, indexID+"_prefix", q) } -func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *acl.EnterpriseMeta, checks bool) uint64 { +func catalogServiceListByNode(tx ReadTxn, node string, _ *acl.EnterpriseMeta, peerName string, _ bool) (memdb.ResultIterator, error) { + return tx.Get(tableServices, indexNode, Query{Value: node, PeerName: peerName}) +} + +func catalogServiceLastExtinctionIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) (interface{}, error) { + return tx.First(tableIndex, indexID, peeredIndexEntryName(indexServiceExtinction, peerName)) +} + +func catalogMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string, checks bool) uint64 { if checks { - return maxIndexWatchTxn(tx, ws, tableNodes, tableServices, tableChecks) + return maxIndexTxn(tx, + peeredIndexEntryName(tableChecks, peerName), + peeredIndexEntryName(tableServices, peerName), + peeredIndexEntryName(tableNodes, peerName), + ) } - return maxIndexWatchTxn(tx, ws, tableNodes, tableServices) + return maxIndexTxn(tx, + peeredIndexEntryName(tableServices, peerName), + peeredIndexEntryName(tableNodes, peerName), + ) } -func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error { +func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *acl.EnterpriseMeta, peerName string, checks bool) uint64 { + // TODO(peering_indexes): pipe peerName here + if checks { + return maxIndexWatchTxn(tx, ws, + peeredIndexEntryName(tableChecks, peerName), + peeredIndexEntryName(tableServices, peerName), + peeredIndexEntryName(tableNodes, peerName), + ) + } + return maxIndexWatchTxn(tx, ws, + peeredIndexEntryName(tableServices, peerName), + peeredIndexEntryName(tableNodes, peerName), + ) +} + +func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { // update the universal index entry - if err := tx.Insert(tableIndex, &IndexEntry{tableChecks, idx}); err != nil { + if err := indexUpdateMaxTxn(tx, idx, tableChecks); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(tableChecks, peerName)); err != nil { return fmt.Errorf("failed updating index: %s", err) } return nil } -func catalogChecksMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta) uint64 { - return maxIndexTxn(tx, tableChecks) +func catalogChecksMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 { + return maxIndexTxn(tx, peeredIndexEntryName(tableChecks, peerName)) } func catalogListChecksByNode(tx ReadTxn, q Query) (memdb.ResultIterator, error) { @@ -174,7 +225,7 @@ func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error return fmt.Errorf("failed inserting check: %s", err) } - if err := catalogUpdateCheckIndexes(tx, idx, &chk.EnterpriseMeta); err != nil { + if err := catalogUpdateCheckIndexes(tx, idx, &chk.EnterpriseMeta, chk.PeerName); err != nil { return err } @@ -207,3 +258,10 @@ func indexFromKindServiceName(arg interface{}) ([]byte, error) { return nil, fmt.Errorf("type must be KindServiceNameQuery or *KindServiceName: %T", arg) } } + +func updateKindServiceNamesIndex(tx WriteTxn, idx uint64, kind structs.ServiceKind, entMeta acl.EnterpriseMeta) error { + if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind.Normalized())); err != nil { + return fmt.Errorf("failed updating %s table index: %v", tableKindServiceNames, err) + } + return nil +} diff --git a/agent/consul/state/catalog_oss_test.go b/agent/consul/state/catalog_oss_test.go index 9edaff833..7ed7429fc 100644 --- a/agent/consul/state/catalog_oss_test.go +++ b/agent/consul/state/catalog_oss_test.go @@ -19,6 +19,14 @@ func testIndexerTableChecks() map[string]indexerTestCase { CheckID: "CheckID", Status: "PASSING", } + objWPeer := &structs.HealthCheck{ + Node: "NoDe", + ServiceID: "SeRvIcE", + ServiceName: "ServiceName", + CheckID: "CheckID", + Status: "PASSING", + PeerName: "Peer1", + } return map[string]indexerTestCase{ indexID: { read: indexValue{ @@ -26,11 +34,11 @@ func testIndexerTableChecks() map[string]indexerTestCase { Node: "NoDe", CheckID: "CheckId", }, - expected: []byte("node\x00checkid\x00"), + expected: []byte("internal\x00node\x00checkid\x00"), }, write: indexValue{ source: obj, - expected: []byte("node\x00checkid\x00"), + expected: []byte("internal\x00node\x00checkid\x00"), }, prefix: []indexValue{ { @@ -39,28 +47,75 @@ func testIndexerTableChecks() map[string]indexerTestCase { }, { source: Query{Value: "nOdE"}, - expected: []byte("node\x00"), + expected: []byte("internal\x00node\x00"), + }, + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: NodeCheckQuery{ + Node: "NoDe", + CheckID: "CheckId", + PeerName: "Peer1", + }, + expected: []byte("peer1\x00node\x00checkid\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00node\x00checkid\x00"), + }, + prefix: []indexValue{ + { + source: Query{Value: "nOdE", + PeerName: "Peer1"}, + expected: []byte("peer1\x00node\x00"), + }, + }, }, }, }, indexStatus: { read: indexValue{ source: Query{Value: "PASSING"}, - expected: []byte("passing\x00"), + expected: []byte("internal\x00passing\x00"), }, write: indexValue{ source: obj, - expected: []byte("passing\x00"), + expected: []byte("internal\x00passing\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{Value: "PASSING", PeerName: "Peer1"}, + expected: []byte("peer1\x00passing\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00passing\x00"), + }, + }, }, }, indexService: { read: indexValue{ source: Query{Value: "ServiceName"}, - expected: []byte("servicename\x00"), + expected: []byte("internal\x00servicename\x00"), }, write: indexValue{ source: obj, - expected: []byte("servicename\x00"), + expected: []byte("internal\x00servicename\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{Value: "ServiceName", PeerName: "Peer1"}, + expected: []byte("peer1\x00servicename\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00servicename\x00"), + }, + }, }, }, indexNodeService: { @@ -69,11 +124,27 @@ func testIndexerTableChecks() map[string]indexerTestCase { Node: "NoDe", Service: "SeRvIcE", }, - expected: []byte("node\x00service\x00"), + expected: []byte("internal\x00node\x00service\x00"), }, write: indexValue{ source: obj, - expected: []byte("node\x00service\x00"), + expected: []byte("internal\x00node\x00service\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: NodeServiceQuery{ + Node: "NoDe", + PeerName: "Peer1", + Service: "SeRvIcE", + }, + expected: []byte("peer1\x00node\x00service\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00node\x00service\x00"), + }, + }, }, }, indexNode: { @@ -81,11 +152,26 @@ func testIndexerTableChecks() map[string]indexerTestCase { source: Query{ Value: "NoDe", }, - expected: []byte("node\x00"), + expected: []byte("internal\x00node\x00"), }, write: indexValue{ source: obj, - expected: []byte("node\x00"), + expected: []byte("internal\x00node\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{ + Value: "NoDe", + PeerName: "Peer1", + }, + expected: []byte("peer1\x00node\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00node\x00"), + }, + }, }, }, } @@ -186,11 +272,11 @@ func testIndexerTableNodes() map[string]indexerTestCase { indexID: { read: indexValue{ source: Query{Value: "NoDeId"}, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), }, write: indexValue{ source: &structs.Node{Node: "NoDeId"}, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), }, prefix: []indexValue{ { @@ -203,38 +289,90 @@ func testIndexerTableNodes() map[string]indexerTestCase { }, { source: Query{Value: "NoDeId"}, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), + }, + { + source: Query{}, + expected: []byte("internal\x00"), + }, + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{Value: "NoDeId", PeerName: "Peer1"}, + expected: []byte("peer1\x00nodeid\x00"), + }, + write: indexValue{ + source: &structs.Node{Node: "NoDeId", PeerName: "Peer1"}, + expected: []byte("peer1\x00nodeid\x00"), + }, + prefix: []indexValue{ + { + source: Query{PeerName: "Peer1"}, + expected: []byte("peer1\x00"), + }, + { + source: Query{Value: "NoDeId", PeerName: "Peer1"}, + expected: []byte("peer1\x00nodeid\x00"), + }, + }, }, }, }, indexUUID: { read: indexValue{ source: Query{Value: uuid}, - expected: uuidBuf, + expected: append([]byte("internal\x00"), uuidBuf...), }, write: indexValue{ source: &structs.Node{ ID: types.NodeID(uuid), Node: "NoDeId", }, - expected: uuidBuf, + expected: append([]byte("internal\x00"), uuidBuf...), }, prefix: []indexValue{ - { - source: (*acl.EnterpriseMeta)(nil), - expected: nil, - }, - { - source: acl.EnterpriseMeta{}, - expected: nil, - }, { // partial length source: Query{Value: uuid[:6]}, - expected: uuidBuf[:3], + expected: append([]byte("internal\x00"), uuidBuf[:3]...), }, { // full length source: Query{Value: uuid}, - expected: uuidBuf, + expected: append([]byte("internal\x00"), uuidBuf...), + }, + { + source: Query{}, + expected: []byte("internal\x00"), + }, + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{Value: uuid, PeerName: "Peer1"}, + expected: append([]byte("peer1\x00"), uuidBuf...), + }, + write: indexValue{ + source: &structs.Node{ + ID: types.NodeID(uuid), + PeerName: "Peer1", + Node: "NoDeId", + }, + expected: append([]byte("peer1\x00"), uuidBuf...), + }, + prefix: []indexValue{ + { // partial length + source: Query{Value: uuid[:6], PeerName: "Peer1"}, + expected: append([]byte("peer1\x00"), uuidBuf[:3]...), + }, + { // full length + source: Query{Value: uuid, PeerName: "Peer1"}, + expected: append([]byte("peer1\x00"), uuidBuf...), + }, + { + source: Query{PeerName: "Peer1"}, + expected: []byte("peer1\x00"), + }, + }, }, }, }, @@ -244,7 +382,7 @@ func testIndexerTableNodes() map[string]indexerTestCase { Key: "KeY", Value: "VaLuE", }, - expected: []byte("KeY\x00VaLuE\x00"), + expected: []byte("internal\x00KeY\x00VaLuE\x00"), }, writeMulti: indexValueMulti{ source: &structs.Node{ @@ -255,8 +393,34 @@ func testIndexerTableNodes() map[string]indexerTestCase { }, }, expected: [][]byte{ - []byte("MaP-kEy-1\x00mAp-VaL-1\x00"), - []byte("mAp-KeY-2\x00MaP-vAl-2\x00"), + []byte("internal\x00MaP-kEy-1\x00mAp-VaL-1\x00"), + []byte("internal\x00mAp-KeY-2\x00MaP-vAl-2\x00"), + }, + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: KeyValueQuery{ + Key: "KeY", + Value: "VaLuE", + PeerName: "Peer1", + }, + expected: []byte("peer1\x00KeY\x00VaLuE\x00"), + }, + writeMulti: indexValueMulti{ + source: &structs.Node{ + Node: "NoDeId", + Meta: map[string]string{ + "MaP-kEy-1": "mAp-VaL-1", + "mAp-KeY-2": "MaP-vAl-2", + }, + PeerName: "Peer1", + }, + expected: [][]byte{ + []byte("peer1\x00MaP-kEy-1\x00mAp-VaL-1\x00"), + []byte("peer1\x00mAp-KeY-2\x00MaP-vAl-2\x00"), + }, + }, }, }, }, @@ -271,6 +435,12 @@ func testIndexerTableServices() map[string]indexerTestCase { ServiceID: "SeRviCe", ServiceName: "ServiceName", } + objWPeer := &structs.ServiceNode{ + Node: "NoDeId", + ServiceID: "SeRviCe", + ServiceName: "ServiceName", + PeerName: "Peer1", + } return map[string]indexerTestCase{ indexID: { @@ -279,11 +449,11 @@ func testIndexerTableServices() map[string]indexerTestCase { Node: "NoDeId", Service: "SeRvIcE", }, - expected: []byte("nodeid\x00service\x00"), + expected: []byte("internal\x00nodeid\x00service\x00"), }, write: indexValue{ source: obj, - expected: []byte("nodeid\x00service\x00"), + expected: []byte("internal\x00nodeid\x00service\x00"), }, prefix: []indexValue{ { @@ -294,9 +464,39 @@ func testIndexerTableServices() map[string]indexerTestCase { source: acl.EnterpriseMeta{}, expected: nil, }, + { + source: Query{}, + expected: []byte("internal\x00"), + }, { source: Query{Value: "NoDeId"}, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), + }, + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: NodeServiceQuery{ + Node: "NoDeId", + PeerName: "Peer1", + Service: "SeRvIcE", + }, + expected: []byte("peer1\x00nodeid\x00service\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00nodeid\x00service\x00"), + }, + prefix: []indexValue{ + { + source: Query{Value: "NoDeId", PeerName: "Peer1"}, + expected: []byte("peer1\x00nodeid\x00"), + }, + { + source: Query{PeerName: "Peer1"}, + expected: []byte("peer1\x00"), + }, + }, }, }, }, @@ -305,34 +505,61 @@ func testIndexerTableServices() map[string]indexerTestCase { source: Query{ Value: "NoDeId", }, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), }, write: indexValue{ source: obj, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{ + Value: "NoDeId", + PeerName: "Peer1", + }, + expected: []byte("peer1\x00nodeid\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00nodeid\x00"), + }, + }, }, }, indexService: { read: indexValue{ source: Query{Value: "ServiceName"}, - expected: []byte("servicename\x00"), + expected: []byte("internal\x00servicename\x00"), }, write: indexValue{ source: obj, - expected: []byte("servicename\x00"), + expected: []byte("internal\x00servicename\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{Value: "ServiceName", PeerName: "Peer1"}, + expected: []byte("peer1\x00servicename\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00servicename\x00"), + }, + }, }, }, indexConnect: { read: indexValue{ source: Query{Value: "ConnectName"}, - expected: []byte("connectname\x00"), + expected: []byte("internal\x00connectname\x00"), }, write: indexValue{ source: &structs.ServiceNode{ ServiceName: "ConnectName", ServiceConnect: structs.ServiceConnect{Native: true}, }, - expected: []byte("connectname\x00"), + expected: []byte("internal\x00connectname\x00"), }, extra: []indexerTestCase{ { @@ -344,7 +571,20 @@ func testIndexerTableServices() map[string]indexerTestCase { DestinationServiceName: "ConnectName", }, }, - expected: []byte("connectname\x00"), + expected: []byte("internal\x00connectname\x00"), + }, + }, + { + write: indexValue{ + source: &structs.ServiceNode{ + ServiceName: "ServiceName", + ServiceKind: structs.ServiceKindConnectProxy, + ServiceProxy: structs.ConnectProxyConfig{ + DestinationServiceName: "ConnectName", + }, + PeerName: "Peer1", + }, + expected: []byte("peer1\x00connectname\x00"), }, }, { @@ -362,18 +602,32 @@ func testIndexerTableServices() map[string]indexerTestCase { expectedIndexMissing: true, }, }, + { + read: indexValue{ + source: Query{Value: "ConnectName", PeerName: "Peer1"}, + expected: []byte("peer1\x00connectname\x00"), + }, + write: indexValue{ + source: &structs.ServiceNode{ + ServiceName: "ConnectName", + ServiceConnect: structs.ServiceConnect{Native: true}, + PeerName: "Peer1", + }, + expected: []byte("peer1\x00connectname\x00"), + }, + }, }, }, indexKind: { read: indexValue{ source: Query{Value: "connect-proxy"}, - expected: []byte("connect-proxy\x00"), + expected: []byte("internal\x00connect-proxy\x00"), }, write: indexValue{ source: &structs.ServiceNode{ ServiceKind: structs.ServiceKindConnectProxy, }, - expected: []byte("connect-proxy\x00"), + expected: []byte("internal\x00connect-proxy\x00"), }, extra: []indexerTestCase{ { @@ -382,7 +636,30 @@ func testIndexerTableServices() map[string]indexerTestCase { ServiceName: "ServiceName", ServiceKind: structs.ServiceKindTypical, }, - expected: []byte("\x00"), + expected: []byte("internal\x00\x00"), + }, + }, + { + write: indexValue{ + source: &structs.ServiceNode{ + ServiceName: "ServiceName", + ServiceKind: structs.ServiceKindTypical, + PeerName: "Peer1", + }, + expected: []byte("peer1\x00\x00"), + }, + }, + { + read: indexValue{ + source: Query{Value: "connect-proxy", PeerName: "Peer1"}, + expected: []byte("peer1\x00connect-proxy\x00"), + }, + write: indexValue{ + source: &structs.ServiceNode{ + ServiceKind: structs.ServiceKindConnectProxy, + PeerName: "Peer1", + }, + expected: []byte("peer1\x00connect-proxy\x00"), }, }, }, @@ -440,7 +717,7 @@ func testIndexerTableKindServiceNames() map[string]indexerTestCase { }, indexKind: { read: indexValue{ - source: structs.ServiceKindConnectProxy, + source: Query{Value: string(structs.ServiceKindConnectProxy)}, expected: []byte("connect-proxy\x00"), }, write: indexValue{ diff --git a/agent/consul/state/catalog_schema.go b/agent/consul/state/catalog_schema.go index b2d0907dc..9a2fbecad 100644 --- a/agent/consul/state/catalog_schema.go +++ b/agent/consul/state/catalog_schema.go @@ -48,9 +48,9 @@ func nodesTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: true, Indexer: indexerSingleWithPrefix{ - readIndex: indexFromQuery, - writeIndex: indexFromNode, - prefixIndex: prefixIndexFromQueryNoNamespace, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexFromNode), + prefixIndex: prefixIndexFromQueryWithPeer, }, }, indexUUID: { @@ -58,9 +58,9 @@ func nodesTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: true, Indexer: indexerSingleWithPrefix{ - readIndex: indexFromUUIDQuery, - writeIndex: indexIDFromNode, - prefixIndex: prefixIndexFromUUIDQuery, + readIndex: indexWithPeerName(indexFromUUIDQuery), + writeIndex: indexWithPeerName(indexIDFromNode), + prefixIndex: prefixIndexFromUUIDWithPeerQuery, }, }, indexMeta: { @@ -68,8 +68,8 @@ func nodesTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerMulti{ - readIndex: indexFromKeyValueQuery, - writeIndexMulti: indexMetaFromNode, + readIndex: indexWithPeerName(indexFromKeyValueQuery), + writeIndexMulti: multiIndexWithPeerName(indexMetaFromNode), }, }, }, @@ -146,9 +146,9 @@ func servicesTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: true, Indexer: indexerSingleWithPrefix{ - readIndex: indexFromNodeServiceQuery, - writeIndex: indexFromServiceNode, - prefixIndex: prefixIndexFromQuery, + readIndex: indexWithPeerName(indexFromNodeServiceQuery), + writeIndex: indexWithPeerName(indexFromServiceNode), + prefixIndex: prefixIndexFromQueryWithPeer, }, }, indexNode: { @@ -156,8 +156,8 @@ func servicesTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexFromNodeIdentity, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexFromNodeIdentity), }, }, indexService: { @@ -165,8 +165,8 @@ func servicesTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexServiceNameFromServiceNode, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexServiceNameFromServiceNode), }, }, indexConnect: { @@ -174,8 +174,8 @@ func servicesTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexConnectNameFromServiceNode, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexConnectNameFromServiceNode), }, }, indexKind: { @@ -183,8 +183,8 @@ func servicesTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexKindFromServiceNode, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexKindFromServiceNode), }, }, }, @@ -295,6 +295,61 @@ func indexKindFromServiceNode(raw interface{}) ([]byte, error) { return b.Bytes(), nil } +// indexWithPeerName adds peer name to the index. +func indexWithPeerName( + fn func(interface{}) ([]byte, error), +) func(interface{}) ([]byte, error) { + return func(raw interface{}) ([]byte, error) { + v, err := fn(raw) + if err != nil { + return nil, err + } + + n, ok := raw.(peerIndexable) + if !ok { + return nil, fmt.Errorf("type must be peerIndexable: %T", raw) + } + + peername := n.PeerOrEmpty() + if peername == "" { + peername = structs.LocalPeerKeyword + } + b := newIndexBuilder(len(v) + len(peername) + 1) + b.String(strings.ToLower(peername)) + b.Raw(v) + return b.Bytes(), nil + } +} + +// multiIndexWithPeerName adds peer name to multiple indices, and returns multiple indices. +func multiIndexWithPeerName( + fn func(interface{}) ([][]byte, error), +) func(interface{}) ([][]byte, error) { + return func(raw interface{}) ([][]byte, error) { + results, err := fn(raw) + if err != nil { + return nil, err + } + + n, ok := raw.(peerIndexable) + if !ok { + return nil, fmt.Errorf("type must be peerIndexable: %T", raw) + } + + peername := n.PeerOrEmpty() + if peername == "" { + peername = structs.LocalPeerKeyword + } + for i, v := range results { + b := newIndexBuilder(len(v) + len(peername) + 1) + b.String(strings.ToLower(peername)) + b.Raw(v) + results[i] = b.Bytes() + } + return results, nil + } +} + // checksTableSchema returns a new table schema used for storing and indexing // health check information. Health checks have a number of different attributes // we want to filter by, so this table is a bit more complex. @@ -307,9 +362,9 @@ func checksTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: true, Indexer: indexerSingleWithPrefix{ - readIndex: indexFromNodeCheckQuery, - writeIndex: indexFromHealthCheck, - prefixIndex: prefixIndexFromQuery, + readIndex: indexWithPeerName(indexFromNodeCheckQuery), + writeIndex: indexWithPeerName(indexFromHealthCheck), + prefixIndex: prefixIndexFromQueryWithPeer, }, }, indexStatus: { @@ -317,8 +372,8 @@ func checksTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexStatusFromHealthCheck, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexStatusFromHealthCheck), }, }, indexService: { @@ -326,8 +381,8 @@ func checksTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexServiceNameFromHealthCheck, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexServiceNameFromHealthCheck), }, }, indexNode: { @@ -335,8 +390,8 @@ func checksTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexFromNodeIdentity, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexFromNodeIdentity), }, }, indexNodeService: { @@ -344,8 +399,8 @@ func checksTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromNodeServiceQuery, - writeIndex: indexNodeServiceFromHealthCheck, + readIndex: indexWithPeerName(indexFromNodeServiceQuery), + writeIndex: indexWithPeerName(indexNodeServiceFromHealthCheck), }, }, }, @@ -588,11 +643,20 @@ type upstreamDownstream struct { // NodeCheckQuery is used to query the ID index of the checks table. type NodeCheckQuery struct { - Node string - CheckID string + Node string + CheckID string + PeerName string acl.EnterpriseMeta } +type peerIndexable interface { + PeerOrEmpty() string +} + +func (q NodeCheckQuery) PeerOrEmpty() string { + return q.PeerName +} + // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer // receiver for this method. Remove once that is fixed. func (q NodeCheckQuery) NamespaceOrDefault() string { @@ -680,7 +744,16 @@ type KindServiceName struct { structs.RaftIndex } +func (n *KindServiceName) PartitionOrDefault() string { + return n.Service.PartitionOrDefault() +} + +func (n *KindServiceName) NamespaceOrDefault() string { + return n.Service.NamespaceOrDefault() +} + func kindServiceNameTableSchema() *memdb.TableSchema { + // TODO(peering): make this peer-aware return &memdb.TableSchema{ Name: tableKindServiceNames, Indexes: map[string]*memdb.IndexSchema{ @@ -693,8 +766,8 @@ func kindServiceNameTableSchema() *memdb.TableSchema { writeIndex: indexFromKindServiceName, }, }, - indexKindOnly: { - Name: indexKindOnly, + indexKind: { + Name: indexKind, AllowMissing: false, Unique: false, Indexer: indexerSingle{ @@ -732,20 +805,20 @@ func indexFromKindServiceNameKindOnly(raw interface{}) ([]byte, error) { b.String(strings.ToLower(string(x.Kind))) return b.Bytes(), nil - case structs.ServiceKind: + case Query: var b indexBuilder - b.String(strings.ToLower(string(x))) + b.String(strings.ToLower(x.Value)) return b.Bytes(), nil default: - return nil, fmt.Errorf("type must be *KindServiceName or structs.ServiceKind: %T", raw) + return nil, fmt.Errorf("type must be *KindServiceName or Query: %T", raw) } } -func kindServiceNamesMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind) uint64 { +func kindServiceNamesMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind string) uint64 { return maxIndexWatchTxn(tx, ws, kindServiceNameIndexName(kind)) } -func kindServiceNameIndexName(kind structs.ServiceKind) string { - return "kind_service_names." + kind.Normalized() +func kindServiceNameIndexName(kind string) string { + return "kind_service_names." + kind } diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index aee7781a0..efd862838 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib/stringslice" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/types" ) @@ -30,21 +31,21 @@ func makeRandomNodeID(t *testing.T) types.NodeID { func TestStateStore_GetNodeID(t *testing.T) { s := testStateStore(t) - _, out, err := s.GetNodeID(types.NodeID("wrongId"), nil) + _, out, err := s.GetNodeID(types.NodeID("wrongId"), nil, "") if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed: index error: UUID (without hyphens) must be") { t.Errorf("want an error, nil value, err:=%q ; out:=%q", err.Error(), out) } - _, out, err = s.GetNodeID(types.NodeID("0123456789abcdefghijklmnopqrstuvwxyz"), nil) + _, out, err = s.GetNodeID(types.NodeID("0123456789abcdefghijklmnopqrstuvwxyz"), nil, "") if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed: index error: invalid UUID") { t.Errorf("want an error, nil value, err:=%q ; out:=%q", err, out) } - _, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee50Z"), nil) + _, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee50Z"), nil, "") if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed: index error: invalid UUID") { t.Errorf("want an error, nil value, err:=%q ; out:=%q", err, out) } - _, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee506"), nil) + _, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee506"), nil, "") if err != nil || out != nil { t.Errorf("do not want any error nor returned value, err:=%q ; out:=%q", err, out) } @@ -57,14 +58,14 @@ func TestStateStore_GetNodeID(t *testing.T) { } require.NoError(t, s.EnsureRegistration(1, req)) - _, out, err = s.GetNodeID(nodeID, nil) + _, out, err = s.GetNodeID(nodeID, nil, "") require.NoError(t, err) if out == nil || out.ID != nodeID { t.Fatalf("out should not be nil and contain nodeId, but was:=%#v", out) } // Case insensitive lookup should work as well - _, out, err = s.GetNodeID(types.NodeID("00a916bC-a357-4a19-b886-59419fceeAAA"), nil) + _, out, err = s.GetNodeID(types.NodeID("00a916bC-a357-4a19-b886-59419fceeAAA"), nil, "") require.NoError(t, err) if out == nil || out.ID != nodeID { t.Fatalf("out should not be nil and contain nodeId, but was:=%#v", out) @@ -72,30 +73,59 @@ func TestStateStore_GetNodeID(t *testing.T) { } func TestStateStore_GetNode(t *testing.T) { - s := testStateStore(t) + assertExists := func(t *testing.T, s *Store, node, peerName string, expectIndex uint64) { + idx, out, err := s.GetNode(node, nil, peerName) + require.NoError(t, err) + require.NotNil(t, out) + require.Equal(t, expectIndex, idx) + require.Equal(t, strings.ToLower(node), out.Node) + require.Equal(t, strings.ToLower(peerName), out.PeerName) + } + assertNotExist := func(t *testing.T, s *Store, node, peerName string) { + idx, out, err := s.GetNode(node, nil, peerName) + require.NoError(t, err) + require.Nil(t, out) + require.Equal(t, uint64(0), idx) + } - // initially does not exist - idx, out, err := s.GetNode("node1", nil) - require.NoError(t, err) - require.Nil(t, out) - require.Equal(t, uint64(0), idx) + t.Run("default peer", func(t *testing.T) { + s := testStateStore(t) - // Create it - testRegisterNode(t, s, 1, "node1") + // initially does not exist + assertNotExist(t, s, "node1", "") - // now exists - idx, out, err = s.GetNode("node1", nil) - require.NoError(t, err) - require.NotNil(t, out) - require.Equal(t, uint64(1), idx) - require.Equal(t, "node1", out.Node) + // Create it + testRegisterNode(t, s, 1, "node1") - // Case insensitive lookup should work as well - idx, out, err = s.GetNode("NoDe1", nil) - require.NoError(t, err) - require.NotNil(t, out) - require.Equal(t, uint64(1), idx) - require.Equal(t, "node1", out.Node) + // now exists + assertExists(t, s, "node1", "", 1) + + // Case insensitive lookup should work as well + assertExists(t, s, "NoDe1", "", 1) + }) + + t.Run("random peer", func(t *testing.T) { + s := testStateStore(t) + + // initially do not exist + assertNotExist(t, s, "node1", "") + assertNotExist(t, s, "node1", "my-peer") + + // Create one with no peer, and one with a peer to test a peer-name crossing issue. + testRegisterNode(t, s, 1, "node1") + testRegisterNodeOpts(t, s, 2, "node1", func(n *structs.Node) error { + n.PeerName = "my-peer" + return nil + }) + + // now exist + assertExists(t, s, "node1", "", 1) + assertExists(t, s, "node1", "my-peer", 2) + + // Case insensitive lookup should work as well + assertExists(t, s, "NoDe1", "", 1) + assertExists(t, s, "NoDe1", "my-peer", 2) + }) } func TestStateStore_ensureNoNodeWithSimilarNameTxn(t *testing.T) { @@ -169,427 +199,570 @@ func TestStateStore_ensureNoNodeWithSimilarNameTxn(t *testing.T) { func TestStateStore_EnsureRegistration(t *testing.T) { t.Parallel() - s := testStateStore(t) - // Start with just a node. - nodeID := makeRandomNodeID(t) - req := &structs.RegisterRequest{ - ID: nodeID, - Node: "node1", - Address: "1.2.3.4", - TaggedAddresses: map[string]string{"hello": "world"}, - NodeMeta: map[string]string{"somekey": "somevalue"}, - } - if err := s.EnsureRegistration(1, req); err != nil { - t.Fatalf("err: %s", err) - } + run := func(t *testing.T, peerName string) { + s := testStateStore(t) + // Start with just a node. + nodeID := makeRandomNodeID(t) - // Retrieve the node and verify its contents. - verifyNode := func() { - node := &structs.Node{ - ID: nodeID, - Node: "node1", - Address: "1.2.3.4", - Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), - TaggedAddresses: map[string]string{"hello": "world"}, - Meta: map[string]string{"somekey": "somevalue"}, - RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 1}, + makeReq := func(f func(*structs.RegisterRequest)) *structs.RegisterRequest { + req := &structs.RegisterRequest{ + ID: nodeID, + Node: "node1", + Address: "1.2.3.4", + TaggedAddresses: map[string]string{"hello": "world"}, + NodeMeta: map[string]string{"somekey": "somevalue"}, + PeerName: peerName, + } + if f != nil { + f(req) + } + return req } - _, out, err := s.GetNode("node1", nil) - if err != nil { - t.Fatalf("got err %s want nil", err) - } - require.Equal(t, node, out) + verifyNode := func(t *testing.T) { + node := &structs.Node{ + ID: nodeID, + Node: "node1", + Address: "1.2.3.4", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + TaggedAddresses: map[string]string{"hello": "world"}, + Meta: map[string]string{"somekey": "somevalue"}, + RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 1}, + PeerName: peerName, + } - _, out2, err := s.GetNodeID(nodeID, nil) - if err != nil { - t.Fatalf("got err %s want nil", err) - } - if out2 == nil { - t.Fatalf("out2 should not be nil") - } - require.Equal(t, out, out2) - } - verifyNode() + _, out, err := s.GetNode("node1", nil, peerName) + require.NoError(t, err) + require.Equal(t, node, out) - // Add in a invalid service definition with too long Key value for Meta - req.Service = &structs.NodeService{ - ID: "redis1", - Service: "redis", - Address: "1.1.1.1", - Port: 8080, - Meta: map[string]string{strings.Repeat("a", 129): "somevalue"}, - Tags: []string{"primary"}, - } - if err := s.EnsureRegistration(9, req); err == nil { - t.Fatalf("Service should not have been registered since Meta is invalid") + _, out2, err := s.GetNodeID(nodeID, nil, peerName) + require.NoError(t, err) + require.NotNil(t, out2) + require.Equal(t, out, out2) + } + verifyService := func(t *testing.T) { + svcmap := map[string]*structs.NodeService{ + "redis1": { + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + RaftIndex: structs.RaftIndex{CreateIndex: 2, ModifyIndex: 2}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + PeerName: peerName, + }, + } + + idx, out, err := s.NodeServices(nil, "node1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(2), idx) + require.Equal(t, svcmap, out.Services) + + idx, r, err := s.NodeService("node1", "redis1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(2), idx) + require.Equal(t, svcmap["redis1"], r) + + // lookup service by node name + idx, sn, err := s.ServiceNode("", "node1", "redis1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(2), idx) + require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) + + // lookup service by node ID + idx, sn, err = s.ServiceNode(string(nodeID), "", "redis1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(2), idx) + require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) + + // lookup service by invalid node + _, _, err = s.ServiceNode("", "invalid-node", "redis1", nil, peerName) + testutil.RequireErrorContains(t, err, "node not found") + + // lookup service without node name or ID + _, _, err = s.ServiceNode("", "", "redis1", nil, peerName) + testutil.RequireErrorContains(t, err, "Node ID or name required to lookup the service") + } + verifyCheck := func(t *testing.T) { + checks := structs.HealthChecks{ + &structs.HealthCheck{ + Node: "node1", + CheckID: "check1", + Name: "check", + Status: "critical", + RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + PeerName: peerName, + }, + } + + idx, out, err := s.NodeChecks(nil, "node1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(3), idx) + require.Equal(t, checks, out) + + idx, c, err := s.NodeCheck("node1", "check1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(3), idx) + require.Equal(t, checks[0], c) + } + verifyChecks := func(t *testing.T) { + checks := structs.HealthChecks{ + &structs.HealthCheck{ + Node: "node1", + CheckID: "check1", + Name: "check", + Status: "critical", + RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + PeerName: peerName, + }, + &structs.HealthCheck{ + Node: "node1", + CheckID: "check2", + Name: "check", + Status: "critical", + ServiceID: "redis1", + ServiceName: "redis", + ServiceTags: []string{"primary"}, + RaftIndex: structs.RaftIndex{CreateIndex: 4, ModifyIndex: 4}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + PeerName: peerName, + }, + } + + idx, out, err := s.NodeChecks(nil, "node1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(4), idx) + require.Equal(t, checks, out) + } + + runStep(t, "add a node", func(t *testing.T) { + req := makeReq(nil) + require.NoError(t, s.EnsureRegistration(1, req)) + + // Retrieve the node and verify its contents. + verifyNode(t) + }) + + runStep(t, "add a node with invalid meta", func(t *testing.T) { + // Add in a invalid service definition with too long Key value for Meta + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Meta: map[string]string{strings.Repeat("a", 129): "somevalue"}, + Tags: []string{"primary"}, + PeerName: peerName, + } + }) + testutil.RequireErrorContains(t, s.EnsureRegistration(9, req), `Key is too long (limit: 128 characters)`) + }) + + // Add in a service definition. + runStep(t, "add a service definition", func(t *testing.T) { + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + PeerName: peerName, + } + }) + require.NoError(t, s.EnsureRegistration(2, req)) + + // Verify that the service got registered. + verifyNode(t) + verifyService(t) + }) + + // Add in a top-level check. + runStep(t, "add a top level check", func(t *testing.T) { + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + PeerName: peerName, + } + req.Check = &structs.HealthCheck{ + Node: "node1", + CheckID: "check1", + Name: "check", + PeerName: peerName, + } + }) + require.NoError(t, s.EnsureRegistration(3, req)) + + // Verify that the check got registered. + verifyNode(t) + verifyService(t) + verifyCheck(t) + }) + + // Add a service check which should populate the ServiceName + // and ServiceTags fields in the response. + runStep(t, "add a service check", func(t *testing.T) { + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + PeerName: peerName, + } + req.Check = &structs.HealthCheck{ + Node: "node1", + CheckID: "check1", + Name: "check", + PeerName: peerName, + } + req.Checks = structs.HealthChecks{ + &structs.HealthCheck{ + Node: "node1", + CheckID: "check2", + Name: "check", + ServiceID: "redis1", + PeerName: peerName, + }, + } + }) + require.NoError(t, s.EnsureRegistration(4, req)) + + // Verify that the additional check got registered. + verifyNode(t) + verifyService(t) + verifyChecks(t) + }) + + // Try to register a check for some other node (top-level check). + runStep(t, "try to register a check for some other node via the top level check", func(t *testing.T) { + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + PeerName: peerName, + } + req.Check = &structs.HealthCheck{ + Node: "nope", + CheckID: "check1", + Name: "check", + PeerName: peerName, + } + req.Checks = structs.HealthChecks{ + &structs.HealthCheck{ + Node: "node1", + CheckID: "check2", + Name: "check", + ServiceID: "redis1", + PeerName: peerName, + }, + } + }) + testutil.RequireErrorContains(t, s.EnsureRegistration(5, req), `does not match node`) + verifyNode(t) + verifyService(t) + verifyChecks(t) + }) + + runStep(t, "try to register a check for some other node via the checks array", func(t *testing.T) { + // Try to register a check for some other node (checks array). + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + PeerName: peerName, + } + req.Checks = structs.HealthChecks{ + &structs.HealthCheck{ + Node: "nope", + CheckID: "check2", + Name: "check", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + PeerName: peerName, + }, + } + }) + testutil.RequireErrorContains(t, s.EnsureRegistration(6, req), `does not match node`) + verifyNode(t) + verifyService(t) + verifyChecks(t) + }) } - // Add in a service definition. - req.Service = &structs.NodeService{ - ID: "redis1", - Service: "redis", - Address: "1.1.1.1", - Port: 8080, - Tags: []string{"primary"}, - Weights: &structs.Weights{Passing: 1, Warning: 1}, - } - if err := s.EnsureRegistration(2, req); err != nil { - t.Fatalf("err: %s", err) - } + t.Run("default peer", func(t *testing.T) { + run(t, structs.DefaultPeerKeyword) + }) - // Verify that the service got registered. - verifyService := func() { - svcmap := map[string]*structs.NodeService{ - "redis1": { - ID: "redis1", - Service: "redis", - Address: "1.1.1.1", - Port: 8080, - Tags: []string{"primary"}, - Weights: &structs.Weights{Passing: 1, Warning: 1}, - RaftIndex: structs.RaftIndex{CreateIndex: 2, ModifyIndex: 2}, - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - }, - } - - idx, out, err := s.NodeServices(nil, "node1", nil) - if gotidx, wantidx := idx, uint64(2); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, svcmap, out.Services) - - idx, r, err := s.NodeService("node1", "redis1", nil) - if gotidx, wantidx := idx, uint64(2); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, svcmap["redis1"], r) - - // look service by node name - idx, sn, err := s.ServiceNode("", "node1", "redis1", nil) - if gotidx, wantidx := idx, uint64(2); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) - - // lookup service by node ID - idx, sn, err = s.ServiceNode(string(nodeID), "", "redis1", nil) - if gotidx, wantidx := idx, uint64(2); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) - - // lookup service by invalid node - _, _, err = s.ServiceNode("", "invalid-node", "redis1", nil) - if err != nil { - require.Equal(t, "node not found", err.Error()) - } - if err == nil { - t.Fatalf("Expected error on lookup of \"invalid-node\"") - } - // lookup service without node name or ID - _, _, err = s.ServiceNode("", "", "redis1", nil) - if err != nil { - require.Equal(t, "Node ID or name required to lookup the service", err.Error()) - } - if err == nil { - t.Fatalf("Expected error on lookup of service without node name or ID") - } - } - verifyNode() - verifyService() - - // Add in a top-level check. - req.Check = &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - Name: "check", - } - if err := s.EnsureRegistration(3, req); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify that the check got registered. - verifyCheck := func() { - checks := structs.HealthChecks{ - &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - Name: "check", - Status: "critical", - RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - }, - } - - idx, out, err := s.NodeChecks(nil, "node1", nil) - if gotidx, wantidx := idx, uint64(3); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, checks, out) - - idx, c, err := s.NodeCheck("node1", "check1", nil) - if gotidx, wantidx := idx, uint64(3); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, checks[0], c) - } - verifyNode() - verifyService() - verifyCheck() - - // Add a service check which should populate the ServiceName - // and ServiceTags fields in the response. - req.Checks = structs.HealthChecks{ - &structs.HealthCheck{ - Node: "node1", - CheckID: "check2", - Name: "check", - ServiceID: "redis1", - }, - } - if err := s.EnsureRegistration(4, req); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify that the additional check got registered. - verifyNode() - verifyService() - verifyChecks := func() { - checks := structs.HealthChecks{ - &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - Name: "check", - Status: "critical", - RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - }, - &structs.HealthCheck{ - Node: "node1", - CheckID: "check2", - Name: "check", - Status: "critical", - ServiceID: "redis1", - ServiceName: "redis", - ServiceTags: []string{"primary"}, - RaftIndex: structs.RaftIndex{CreateIndex: 4, ModifyIndex: 4}, - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - }, - } - - idx, out, err := s.NodeChecks(nil, "node1", nil) - if gotidx, wantidx := idx, uint64(4); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, checks, out) - } - verifyChecks() - - // Try to register a check for some other node (top-level check). - req.Check = &structs.HealthCheck{ - Node: "nope", - CheckID: "check1", - Name: "check", - } - err := s.EnsureRegistration(5, req) - if err == nil || !strings.Contains(err.Error(), "does not match node") { - t.Fatalf("err: %s", err) - } - verifyNode() - verifyService() - verifyChecks() - - // Try to register a check for some other node (checks array). - req.Check = nil - req.Checks = structs.HealthChecks{ - &structs.HealthCheck{ - Node: "nope", - CheckID: "check2", - Name: "check", - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - }, - } - err = s.EnsureRegistration(6, req) - if err == nil || !strings.Contains(err.Error(), "does not match node") { - t.Fatalf("err: %s", err) - } - verifyNode() - verifyService() - verifyChecks() + t.Run("random peer", func(t *testing.T) { + run(t, "my-peer") + }) } func TestStateStore_EnsureRegistration_Restore(t *testing.T) { - s := testStateStore(t) + const ( + nodeID = "099eac9d-8e3e-464b-b3f5-8d7dcfcf9f71" + nodeName = "node1" + ) - // Start with just a node. - req := &structs.RegisterRequest{ - ID: makeRandomNodeID(t), - Node: "node1", - Address: "1.2.3.4", - RaftIndex: structs.RaftIndex{ - CreateIndex: 1, - ModifyIndex: 1, - }, - } - nodeID := string(req.ID) - nodeName := req.Node - restore := s.Restore() - if err := restore.Registration(1, req); err != nil { - t.Fatalf("err: %s", err) - } - restore.Commit() - - // Retrieve the node and verify its contents. - verifyNode := func(nodeLookup string) { - _, out, err := s.GetNode(nodeLookup, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if out == nil { - _, out, err = s.GetNodeID(types.NodeID(nodeLookup), nil) - if err != nil { - t.Fatalf("err: %s", err) + run := func(t *testing.T, peerName string) { + verifyNode := func(t *testing.T, s *Store, nodeLookup string) { + idx, out, err := s.GetNode(nodeLookup, nil, peerName) + require.NoError(t, err) + byID := false + if out == nil { + _, out, err = s.GetNodeID(types.NodeID(nodeLookup), nil, peerName) + require.NoError(t, err) + byID = true } + + require.NotNil(t, out) + require.Equal(t, uint64(1), idx) + + require.Equal(t, "1.2.3.4", out.Address) + if byID { + require.Equal(t, nodeLookup, string(out.ID)) + } else { + require.Equal(t, nodeLookup, out.Node) + } + require.Equal(t, peerName, out.PeerName) + require.Equal(t, uint64(1), out.CreateIndex) + require.Equal(t, uint64(1), out.ModifyIndex) + } + verifyService := func(t *testing.T, s *Store, nodeLookup string) { + idx, out, err := s.NodeServices(nil, nodeLookup, nil, peerName) + require.NoError(t, err) + + require.Len(t, out.Services, 1) + require.Equal(t, uint64(2), idx) + svc := out.Services["redis1"] + + require.Equal(t, "redis1", svc.ID) + require.Equal(t, "redis", svc.Service) + require.Equal(t, peerName, svc.PeerName) + require.Equal(t, "1.1.1.1", svc.Address) + require.Equal(t, 8080, svc.Port) + require.Equal(t, uint64(2), svc.CreateIndex) + require.Equal(t, uint64(2), svc.ModifyIndex) + } + verifyCheck := func(t *testing.T, s *Store) { + idx, out, err := s.NodeChecks(nil, nodeName, nil, peerName) + require.NoError(t, err) + + require.Len(t, out, 1) + require.Equal(t, uint64(3), idx) + + c := out[0] + + require.Equal(t, strings.ToUpper(nodeName), c.Node) + require.Equal(t, "check1", string(c.CheckID)) + require.Equal(t, "check", c.Name) + require.Equal(t, peerName, c.PeerName) + require.Equal(t, uint64(3), c.CreateIndex) + require.Equal(t, uint64(3), c.ModifyIndex) + } + verifyChecks := func(t *testing.T, s *Store) { + idx, out, err := s.NodeChecks(nil, nodeName, nil, peerName) + require.NoError(t, err) + + require.Len(t, out, 2) + require.Equal(t, uint64(4), idx) + + c1 := out[0] + require.Equal(t, strings.ToUpper(nodeName), c1.Node) + require.Equal(t, "check1", string(c1.CheckID)) + require.Equal(t, "check", c1.Name) + require.Equal(t, peerName, c1.PeerName) + require.Equal(t, uint64(3), c1.CreateIndex) + require.Equal(t, uint64(3), c1.ModifyIndex) + + c2 := out[1] + require.Equal(t, nodeName, c2.Node) + require.Equal(t, "check2", string(c2.CheckID)) + require.Equal(t, "check", c2.Name) + require.Equal(t, peerName, c2.PeerName) + require.Equal(t, uint64(4), c2.CreateIndex) + require.Equal(t, uint64(4), c2.ModifyIndex) } - if out == nil || out.Address != "1.2.3.4" || - !(out.Node == nodeLookup || string(out.ID) == nodeLookup) || - out.CreateIndex != 1 || out.ModifyIndex != 1 { - t.Fatalf("bad node returned: %#v", out) - } - } - verifyNode(nodeID) - verifyNode(nodeName) - - // Add in a service definition. - req.Service = &structs.NodeService{ - ID: "redis1", - Service: "redis", - Address: "1.1.1.1", - Port: 8080, - Weights: &structs.Weights{Passing: 1, Warning: 1}, - RaftIndex: structs.RaftIndex{ - CreateIndex: 2, - ModifyIndex: 2, - }, - } - restore = s.Restore() - if err := restore.Registration(2, req); err != nil { - t.Fatalf("err: %s", err) - } - restore.Commit() - - // Verify that the service got registered. - verifyService := func(nodeLookup string) { - idx, out, err := s.NodeServices(nil, nodeLookup, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 2 { - t.Fatalf("bad index: %d", idx) - } - if len(out.Services) != 1 { - t.Fatalf("bad: %#v", out.Services) - } - s := out.Services["redis1"] - if s.ID != "redis1" || s.Service != "redis" || - s.Address != "1.1.1.1" || s.Port != 8080 || - s.CreateIndex != 2 || s.ModifyIndex != 2 { - t.Fatalf("bad service returned: %#v", s) - } - } - - // Add in a top-level check. - // - // Verify that node name references in checks are case-insensitive during - // restore. - req.Check = &structs.HealthCheck{ - Node: strings.ToUpper(nodeName), - CheckID: "check1", - Name: "check", - RaftIndex: structs.RaftIndex{ - CreateIndex: 3, - ModifyIndex: 3, - }, - } - restore = s.Restore() - if err := restore.Registration(3, req); err != nil { - t.Fatalf("err: %s", err) - } - restore.Commit() - - // Verify that the check got registered. - verifyCheck := func() { - idx, out, err := s.NodeChecks(nil, nodeName, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 3 { - t.Fatalf("bad index: %d", idx) - } - if len(out) != 1 { - t.Fatalf("bad: %#v", out) - } - c := out[0] - if c.Node != strings.ToUpper(nodeName) || c.CheckID != "check1" || c.Name != "check" || - c.CreateIndex != 3 || c.ModifyIndex != 3 { - t.Fatalf("bad check returned: %#v", c) - } - } - verifyNode(nodeID) - verifyNode(nodeName) - verifyService(nodeID) - verifyService(nodeName) - verifyCheck() - - // Add in another check via the slice. - req.Checks = structs.HealthChecks{ - &structs.HealthCheck{ - Node: nodeName, - CheckID: "check2", - Name: "check", - RaftIndex: structs.RaftIndex{ - CreateIndex: 4, - ModifyIndex: 4, - }, - }, - } - restore = s.Restore() - if err := restore.Registration(4, req); err != nil { - t.Fatalf("err: %s", err) - } - restore.Commit() - - // Verify that the additional check got registered. - verifyNode(nodeID) - verifyNode(nodeName) - verifyService(nodeID) - verifyService(nodeName) - func() { - idx, out, err := s.NodeChecks(nil, nodeName, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 4 { - t.Fatalf("bad index: %d", idx) - } - if len(out) != 2 { - t.Fatalf("bad: %#v", out) - } - c1 := out[0] - if c1.Node != strings.ToUpper(nodeName) || c1.CheckID != "check1" || c1.Name != "check" || - c1.CreateIndex != 3 || c1.ModifyIndex != 3 { - t.Fatalf("bad check returned, should not be modified: %#v", c1) + makeReq := func(f func(*structs.RegisterRequest)) *structs.RegisterRequest { + req := &structs.RegisterRequest{ + ID: types.NodeID(nodeID), + Node: nodeName, + Address: "1.2.3.4", + RaftIndex: structs.RaftIndex{ + CreateIndex: 1, + ModifyIndex: 1, + }, + PeerName: peerName, + } + if f != nil { + f(req) + } + return req } - c2 := out[1] - if c2.Node != nodeName || c2.CheckID != "check2" || c2.Name != "check" || - c2.CreateIndex != 4 || c2.ModifyIndex != 4 { - t.Fatalf("bad check returned: %#v", c2) - } - }() + s := testStateStore(t) + + // Start with just a node. + runStep(t, "add a node", func(t *testing.T) { + req := makeReq(nil) + restore := s.Restore() + require.NoError(t, restore.Registration(1, req)) + require.NoError(t, restore.Commit()) + + // Retrieve the node and verify its contents. + verifyNode(t, s, nodeID) + verifyNode(t, s, nodeName) + }) + + // Add in a service definition. + runStep(t, "add a service definition", func(t *testing.T) { + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 2, + ModifyIndex: 2, + }, + PeerName: peerName, + } + }) + restore := s.Restore() + require.NoError(t, restore.Registration(2, req)) + require.NoError(t, restore.Commit()) + + // Verify that the service got registered. + verifyNode(t, s, nodeID) + verifyNode(t, s, nodeName) + verifyService(t, s, nodeID) + verifyService(t, s, nodeName) + }) + + runStep(t, "add a top-level check", func(t *testing.T) { + // Add in a top-level check. + // + // Verify that node name references in checks are case-insensitive during + // restore. + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 2, + ModifyIndex: 2, + }, + PeerName: peerName, + } + req.Check = &structs.HealthCheck{ + Node: strings.ToUpper(nodeName), + CheckID: "check1", + Name: "check", + RaftIndex: structs.RaftIndex{ + CreateIndex: 3, + ModifyIndex: 3, + }, + PeerName: peerName, + } + }) + restore := s.Restore() + require.NoError(t, restore.Registration(3, req)) + require.NoError(t, restore.Commit()) + + // Verify that the check got registered. + verifyNode(t, s, nodeID) + verifyNode(t, s, nodeName) + verifyService(t, s, nodeID) + verifyService(t, s, nodeName) + verifyCheck(t, s) + }) + + runStep(t, "add another check via the slice", func(t *testing.T) { + // Add in another check via the slice. + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 2, + ModifyIndex: 2, + }, + PeerName: peerName, + } + req.Check = &structs.HealthCheck{ + Node: strings.ToUpper(nodeName), + CheckID: "check1", + Name: "check", + RaftIndex: structs.RaftIndex{ + CreateIndex: 3, + ModifyIndex: 3, + }, + PeerName: peerName, + } + req.Checks = structs.HealthChecks{ + &structs.HealthCheck{ + Node: nodeName, + CheckID: "check2", + Name: "check", + RaftIndex: structs.RaftIndex{ + CreateIndex: 4, + ModifyIndex: 4, + }, + PeerName: peerName, + }, + } + }) + restore := s.Restore() + require.NoError(t, restore.Registration(4, req)) + require.NoError(t, restore.Commit()) + + // Verify that the additional check got registered. + verifyNode(t, s, nodeID) + verifyNode(t, s, nodeName) + verifyService(t, s, nodeID) + verifyService(t, s, nodeName) + verifyChecks(t, s) + }) + } + + t.Run("default peer", func(t *testing.T) { + run(t, structs.DefaultPeerKeyword) + }) + + t.Run("random peer", func(t *testing.T) { + run(t, "my-peer") + }) } func deprecatedEnsureNodeWithoutIDCanRegister(t *testing.T, s *Store, nodeName string, txIdx uint64) { @@ -604,7 +777,7 @@ func deprecatedEnsureNodeWithoutIDCanRegister(t *testing.T, s *Store, nodeName s if err := s.EnsureNode(txIdx, in); err != nil { t.Fatalf("err: %s", err) } - idx, out, err := s.GetNode(nodeName, nil) + idx, out, err := s.GetNode(nodeName, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -634,7 +807,7 @@ func TestStateStore_EnsureNodeDeprecated(t *testing.T) { t.Fatalf("err: %v", err) } // Retrieve the node again - idx, out, err := s.GetNode(firstNodeName, nil) + idx, out, err := s.GetNode(firstNodeName, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -657,7 +830,7 @@ func TestStateStore_EnsureNodeDeprecated(t *testing.T) { t.Fatalf("err: %v", err) } // Retrieve the node again - idx, out, err = s.GetNode(firstNodeName, nil) + idx, out, err = s.GetNode(firstNodeName, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -730,11 +903,11 @@ func TestNodeRenamingNodes(t *testing.T) { t.Fatalf("err: %s", err) } - if _, node, err := s.GetNodeID(nodeID1, nil); err != nil || node == nil || node.ID != nodeID1 { + if _, node, err := s.GetNodeID(nodeID1, nil, ""); err != nil || node == nil || node.ID != nodeID1 { t.Fatalf("err: %s, node:= %q", err, node) } - if _, node, err := s.GetNodeID(nodeID2, nil); err != nil && node == nil || node.ID != nodeID2 { + if _, node, err := s.GetNodeID(nodeID2, nil, ""); err != nil && node == nil || node.ID != nodeID2 { t.Fatalf("err: %s", err) } @@ -779,13 +952,13 @@ func TestNodeRenamingNodes(t *testing.T) { } // Retrieve the node again - idx, out, err := s.GetNode("node2bis", nil) + idx, out, err := s.GetNode("node2bis", nil, "") if err != nil { t.Fatalf("err: %s", err) } // Retrieve the node again - idx2, out2, err := s.GetNodeID(nodeID2, nil) + idx2, out2, err := s.GetNodeID(nodeID2, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -803,7 +976,7 @@ func TestStateStore_EnsureNode(t *testing.T) { s := testStateStore(t) // Fetching a non-existent node returns nil - if _, node, err := s.GetNode("node1", nil); node != nil || err != nil { + if _, node, err := s.GetNode("node1", nil, ""); node != nil || err != nil { t.Fatalf("expected (nil, nil), got: (%#v, %#v)", node, err) } @@ -820,7 +993,7 @@ func TestStateStore_EnsureNode(t *testing.T) { } // Retrieve the node again - idx, out, err := s.GetNode("node1", nil) + idx, out, err := s.GetNode("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -849,7 +1022,7 @@ func TestStateStore_EnsureNode(t *testing.T) { } // Retrieve the node - idx, out, err = s.GetNode("node1", nil) + idx, out, err = s.GetNode("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -866,7 +1039,7 @@ func TestStateStore_EnsureNode(t *testing.T) { if err := s.EnsureNode(3, in2); err != nil { t.Fatalf("err: %s", err) } - _, out, err = s.GetNode("node1", nil) + _, out, err = s.GetNode("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -883,7 +1056,7 @@ func TestStateStore_EnsureNode(t *testing.T) { if err := s.EnsureNode(3, in3); err != nil { t.Fatalf("err: %s", err) } - idx, out, err = s.GetNode("node1", nil) + idx, out, err = s.GetNode("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -910,13 +1083,13 @@ func TestStateStore_EnsureNode(t *testing.T) { } // Retrieve the node - _, out, err = s.GetNode("node1", nil) + _, out, err = s.GetNode("node1", nil, "") require.NoError(t, err) if out != nil { t.Fatalf("Node should not exist anymore: %q", out) } - idx, out, err = s.GetNode("node1-renamed", nil) + idx, out, err = s.GetNode("node1-renamed", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -975,7 +1148,7 @@ func TestStateStore_EnsureNode(t *testing.T) { } // Retrieve the node - _, out, err = s.GetNode("Node1bis", nil) + _, out, err = s.GetNode("Node1bis", nil, "") require.NoError(t, err) if out == nil { t.Fatalf("Node should exist, but was null") @@ -991,7 +1164,7 @@ func TestStateStore_EnsureNode(t *testing.T) { t.Fatalf("err: %s", err) } - idx, out, err = s.GetNode("Node1bis", nil) + idx, out, err = s.GetNode("Node1bis", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1034,7 +1207,7 @@ func TestStateStore_EnsureNode(t *testing.T) { if err := s.EnsureNode(12, in); err != nil { t.Fatalf("err: %s", err) } - idx, out, err = s.GetNode("Node1-Renamed2", nil) + idx, out, err = s.GetNode("Node1-Renamed2", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1064,7 +1237,7 @@ func TestStateStore_EnsureNode(t *testing.T) { if err := s.EnsureNode(15, in); err != nil { t.Fatalf("[DEPRECATED] it should work, err:= %q", err) } - _, out, err = s.GetNode("Node1-Renamed2", nil) + _, out, err = s.GetNode("Node1-Renamed2", nil, "") if err != nil { t.Fatalf("[DEPRECATED] err: %s", err) } @@ -1081,7 +1254,7 @@ func TestStateStore_GetNodes(t *testing.T) { // Listing with no results returns nil. ws := memdb.NewWatchSet() - idx, res, err := s.Nodes(ws, nil) + idx, res, err := s.Nodes(ws, nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -1096,7 +1269,7 @@ func TestStateStore_GetNodes(t *testing.T) { // Retrieve the nodes. ws = memdb.NewWatchSet() - idx, nodes, err := s.Nodes(ws, nil) + idx, nodes, err := s.Nodes(ws, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1126,7 +1299,7 @@ func TestStateStore_GetNodes(t *testing.T) { if watchFired(ws) { t.Fatalf("bad") } - if err := s.DeleteNode(3, "node1", nil); err != nil { + if err := s.DeleteNode(3, "node1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -1146,7 +1319,7 @@ func BenchmarkGetNodes(b *testing.B) { ws := memdb.NewWatchSet() for i := 0; i < b.N; i++ { - s.Nodes(ws, nil) + s.Nodes(ws, nil, "") } } @@ -1155,7 +1328,7 @@ func TestStateStore_GetNodesByMeta(t *testing.T) { // Listing with no results returns nil ws := memdb.NewWatchSet() - idx, res, err := s.NodesByMeta(ws, map[string]string{"somekey": "somevalue"}, nil) + idx, res, err := s.NodesByMeta(ws, map[string]string{"somekey": "somevalue"}, nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -1200,7 +1373,7 @@ func TestStateStore_GetNodesByMeta(t *testing.T) { } for _, tc := range cases { - _, result, err := s.NodesByMeta(nil, tc.filters, nil) + _, result, err := s.NodesByMeta(nil, tc.filters, nil, "") if err != nil { t.Fatalf("bad: %v", err) } @@ -1218,7 +1391,7 @@ func TestStateStore_GetNodesByMeta(t *testing.T) { // Set up a watch. ws = memdb.NewWatchSet() - _, _, err = s.NodesByMeta(ws, map[string]string{"role": "client"}, nil) + _, _, err = s.NodesByMeta(ws, map[string]string{"role": "client"}, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -1260,13 +1433,13 @@ func TestStateStore_NodeServices(t *testing.T) { // Look up by name. t.Run("Look up by name", func(t *testing.T) { { - _, ns, err := s.NodeServices(nil, "node1", nil) + _, ns, err := s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) require.NotNil(t, ns) require.Equal(t, "node1", ns.Node.Node) } { - _, ns, err := s.NodeServices(nil, "node2", nil) + _, ns, err := s.NodeServices(nil, "node2", nil, "") require.NoError(t, err) require.NotNil(t, ns) require.Equal(t, "node2", ns.Node.Node) @@ -1275,13 +1448,13 @@ func TestStateStore_NodeServices(t *testing.T) { t.Run("Look up by UUID", func(t *testing.T) { { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-aaaaaaaaaaaa", nil) + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-aaaaaaaaaaaa", nil, "") require.NoError(t, err) require.NotNil(t, ns) require.Equal(t, "node1", ns.Node.Node) } { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bbbbbbbbbbbb", nil) + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bbbbbbbbbbbb", nil, "") require.NoError(t, err) require.NotNil(t, ns) require.Equal(t, "node2", ns.Node.Node) @@ -1289,20 +1462,20 @@ func TestStateStore_NodeServices(t *testing.T) { }) t.Run("Ambiguous prefix", func(t *testing.T) { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510", nil) + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510", nil, "") require.NoError(t, err) require.Nil(t, ns) }) t.Run("Bad node", func(t *testing.T) { // Bad node, and not a UUID (should not get a UUID error). - _, ns, err := s.NodeServices(nil, "nope", nil) + _, ns, err := s.NodeServices(nil, "nope", nil, "") require.NoError(t, err) require.Nil(t, ns) }) t.Run("Specific prefix", func(t *testing.T) { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bb", nil) + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bb", nil, "") require.NoError(t, err) require.NotNil(t, ns) require.Equal(t, "node2", ns.Node.Node) @@ -1318,12 +1491,12 @@ func TestStateStore_DeleteNode(t *testing.T) { testRegisterCheck(t, s, 2, "node1", "", "check1", api.HealthPassing) // Delete the node - if err := s.DeleteNode(3, "node1", nil); err != nil { + if err := s.DeleteNode(3, "node1", nil, ""); err != nil { t.Fatalf("err: %s", err) } // The node was removed - if idx, n, err := s.GetNode("node1", nil); err != nil || n != nil || idx != 3 { + if idx, n, err := s.GetNode("node1", nil, ""); err != nil || n != nil || idx != 3 { t.Fatalf("bad: %#v %d (err: %#v)", n, idx, err) } @@ -1357,7 +1530,7 @@ func TestStateStore_DeleteNode(t *testing.T) { // Deleting a nonexistent node should be idempotent and not return // an error - if err := s.DeleteNode(4, "node1", nil); err != nil { + if err := s.DeleteNode(4, "node1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if idx := s.maxIndex(tableNodes); idx != 3 { @@ -1415,7 +1588,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Fetching services for a node with none returns nil. ws := memdb.NewWatchSet() - idx, res, err := s.NodeServices(ws, "node1", nil) + idx, res, err := s.NodeServices(ws, "node1", nil, "") if err != nil || res != nil || idx != 0 { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -1448,7 +1621,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Service successfully registers into the state store. ws = memdb.NewWatchSet() - _, _, err = s.NodeServices(ws, "node1", nil) + _, _, err = s.NodeServices(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -1470,7 +1643,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Register a different service on the bad node. ws = memdb.NewWatchSet() - _, _, err = s.NodeServices(ws, "node1", nil) + _, _, err = s.NodeServices(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -1485,7 +1658,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Retrieve the services. ws = memdb.NewWatchSet() - idx, out, err := s.NodeServices(ws, "node1", nil) + idx, out, err := s.NodeServices(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1526,7 +1699,7 @@ func TestStateStore_EnsureService(t *testing.T) { } // Retrieve the service again and ensure it matches.. - idx, out, err = s.NodeServices(nil, "node1", nil) + idx, out, err = s.NodeServices(nil, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1571,7 +1744,7 @@ func TestStateStore_EnsureService_connectProxy(t *testing.T) { assert.Nil(t, s.EnsureService(10, "node1", ns1)) // Retrieve and verify - _, out, err := s.NodeServices(nil, "node1", nil) + _, out, err := s.NodeServices(nil, "node1", nil, "") assert.Nil(t, err) assert.NotNil(t, out) assert.Len(t, out.Services, 1) @@ -1610,7 +1783,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) { assert.Equal(t, "240.0.0.1", vip) // Retrieve and verify - _, out, err := s.NodeServices(nil, "node1", nil) + _, out, err := s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) assert.NotNil(t, out) assert.Len(t, out.Services, 1) @@ -1641,7 +1814,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) { assert.Equal(t, "240.0.0.2", vip) // Retrieve and verify - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") assert.Nil(t, err) assert.NotNil(t, out) assert.Len(t, out.Services, 2) @@ -1651,7 +1824,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) { assert.Equal(t, ns2.Port, taggedAddress.Port) // Delete the first service and make sure it no longer has a virtual IP assigned. - require.NoError(t, s.DeleteService(12, "node1", "foo", entMeta)) + require.NoError(t, s.DeleteService(12, "node1", "foo", entMeta, "")) vip, err = s.VirtualIPForService(structs.ServiceName{Name: "connect-proxy"}) require.NoError(t, err) assert.Equal(t, "", vip) @@ -1678,7 +1851,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) { assert.Equal(t, "240.0.0.2", vip) // Make sure the new instance has the same virtual IP. - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) taggedAddress = out.Services["redis-proxy2"].TaggedAddresses[structs.TaggedAddressVirtualIP] assert.Equal(t, vip, taggedAddress.Address) @@ -1706,7 +1879,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) { assert.Equal(t, "240.0.0.1", vip) // Retrieve and verify - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) taggedAddress = out.Services["web-proxy"].TaggedAddresses[structs.TaggedAddressVirtualIP] assert.Equal(t, vip, taggedAddress.Address) @@ -1742,7 +1915,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { assert.Equal(t, "240.0.0.1", vip) // Retrieve and verify - _, out, err := s.NodeServices(nil, "node1", nil) + _, out, err := s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) assert.NotNil(t, out) @@ -1772,7 +1945,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { assert.Equal(t, "240.0.0.2", vip) // Retrieve and verify - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") assert.Nil(t, err) assert.NotNil(t, out) @@ -1781,7 +1954,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { assert.Equal(t, ns2.Port, taggedAddress.Port) // Delete the last service and make sure it no longer has a virtual IP assigned. - require.NoError(t, s.DeleteService(12, "node1", "redis", entMeta)) + require.NoError(t, s.DeleteService(12, "node1", "redis", entMeta, "")) vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"}) require.NoError(t, err) assert.Equal(t, "", vip) @@ -1807,7 +1980,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { assert.Equal(t, "240.0.0.2", vip) // Retrieve and verify - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") assert.Nil(t, err) assert.NotNil(t, out) @@ -1837,7 +2010,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { assert.Equal(t, "240.0.0.3", vip) // Retrieve and verify - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") assert.Nil(t, err) assert.NotNil(t, out) @@ -1851,7 +2024,7 @@ func TestStateStore_Services(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, services, err := s.Services(ws, nil) + idx, services, err := s.Services(ws, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1892,7 +2065,7 @@ func TestStateStore_Services(t *testing.T) { // Pull all the services. ws = memdb.NewWatchSet() - idx, services, err = s.Services(ws, nil) + idx, services, err = s.Services(ws, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1915,7 +2088,7 @@ func TestStateStore_Services(t *testing.T) { } // Deleting a node with a service should fire the watch. - if err := s.DeleteNode(6, "node1", nil); err != nil { + if err := s.DeleteNode(6, "node1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -1929,7 +2102,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { ws := memdb.NewWatchSet() t.Run("Listing with no results returns nil", func(t *testing.T) { - idx, res, err := s.ServicesByNodeMeta(ws, map[string]string{"somekey": "somevalue"}, nil) + idx, res, err := s.ServicesByNodeMeta(ws, map[string]string{"somekey": "somevalue"}, nil, "") if idx != 0 || len(res) != 0 || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -1971,7 +2144,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { ws = memdb.NewWatchSet() t.Run("Filter the services by the first node's meta value", func(t *testing.T) { - _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client"}, nil) + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1983,7 +2156,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { }) t.Run("Get all services using the common meta value", func(t *testing.T) { - _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil) + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1995,7 +2168,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { }) t.Run("Get an empty list for an invalid meta value", func(t *testing.T) { - _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"invalid": "nope"}, nil) + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"invalid": "nope"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2004,7 +2177,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { }) t.Run("Get the first node's service instance using multiple meta filters", func(t *testing.T) { - _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client", "common": "1"}, nil) + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client", "common": "1"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2038,7 +2211,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { // Now get a fresh watch, which will be forced to watch the whole // service table. ws := memdb.NewWatchSet() - _, _, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil) + _, _, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil, "") require.NoError(t, err) testRegisterService(t, s, idx, "nope", "more-nope") @@ -2063,7 +2236,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ServiceNodes(ws, "db", nil) + idx, nodes, err := s.ServiceNodes(ws, "db", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2102,7 +2275,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ServiceNodes(ws, "db", nil) + idx, nodes, err = s.ServiceNodes(ws, "db", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2165,7 +2338,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { } // But removing a node with the "db" service should fire the watch. - if err := s.DeleteNode(18, "bar", nil); err != nil { + if err := s.DeleteNode(18, "bar", nil, ""); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -2189,7 +2362,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { // Now get a fresh watch, which will be forced to watch the whole nodes // table. ws = memdb.NewWatchSet() - _, _, err = s.ServiceNodes(ws, "db", nil) + _, _, err = s.ServiceNodes(ws, "db", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2206,7 +2379,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ServiceTagNodes(ws, "db", []string{"primary"}, nil) + idx, nodes, err := s.ServiceTagNodes(ws, "db", []string{"primary"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2239,7 +2412,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ServiceTagNodes(ws, "db", []string{"primary"}, nil) + idx, nodes, err = s.ServiceTagNodes(ws, "db", []string{"primary"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2269,7 +2442,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) { } // But removing a node with the "db:primary" service should fire the watch. - if err := s.DeleteNode(21, "foo", nil); err != nil { + if err := s.DeleteNode(21, "foo", nil, ""); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -2300,7 +2473,7 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) { t.Fatalf("err: %v", err) } - idx, nodes, err := s.ServiceTagNodes(nil, "db", []string{"primary"}, nil) + idx, nodes, err := s.ServiceTagNodes(nil, "db", []string{"primary"}, nil, "") require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 1) @@ -2309,13 +2482,13 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) { require.Contains(t, nodes[0].ServiceTags, "primary") require.Equal(t, nodes[0].ServicePort, 8000) - idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2"}, nil) + idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2"}, nil, "") require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 3) // Test filtering on multiple tags - idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2", "replica"}, nil) + idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2", "replica"}, nil, "") require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 2) @@ -2324,7 +2497,7 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) { require.Contains(t, nodes[1].ServiceTags, "v2") require.Contains(t, nodes[1].ServiceTags, "replica") - idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"dev"}, nil) + idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"dev"}, nil, "") require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 1) @@ -2344,9 +2517,9 @@ func TestStateStore_DeleteService(t *testing.T) { // Delete the service. ws := memdb.NewWatchSet() - _, _, err := s.NodeServices(ws, "node1", nil) + _, _, err := s.NodeServices(ws, "node1", nil, "") require.NoError(t, err) - if err := s.DeleteService(4, "node1", "service1", nil); err != nil { + if err := s.DeleteService(4, "node1", "service1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -2355,7 +2528,7 @@ func TestStateStore_DeleteService(t *testing.T) { // Service doesn't exist. ws = memdb.NewWatchSet() - _, ns, err := s.NodeServices(ws, "node1", nil) + _, ns, err := s.NodeServices(ws, "node1", nil, "") if err != nil || ns == nil || len(ns.Services) != 0 { t.Fatalf("bad: %#v (err: %#v)", ns, err) } @@ -2379,7 +2552,7 @@ func TestStateStore_DeleteService(t *testing.T) { // Deleting a nonexistent service should be idempotent and not return an // error, nor fire a watch. - if err := s.DeleteService(5, "node1", "service1", nil); err != nil { + if err := s.DeleteService(5, "node1", "service1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if idx := s.maxIndex(tableServices); idx != 4 { @@ -2395,7 +2568,7 @@ func TestStateStore_ConnectServiceNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(0)) assert.Len(t, nodes, 0) @@ -2413,7 +2586,7 @@ func TestStateStore_ConnectServiceNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(17)) assert.Len(t, nodes, 3) @@ -2429,7 +2602,7 @@ func TestStateStore_ConnectServiceNodes(t *testing.T) { assert.False(t, watchFired(ws)) // But removing a node with the "db" service should fire the watch. - assert.Nil(t, s.DeleteNode(18, "bar", nil)) + assert.Nil(t, s.DeleteNode(18, "bar", nil, "")) assert.True(t, watchFired(ws)) } @@ -2438,7 +2611,7 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(0)) assert.Len(t, nodes, 0) @@ -2459,7 +2632,7 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) { // Reset WatchSet to ensure watch fires when associating db with gateway ws = memdb.NewWatchSet() - _, _, err = s.ConnectServiceNodes(ws, "db", nil) + _, _, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) // Associate gateway with db @@ -2477,7 +2650,7 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(17)) assert.Len(t, nodes, 2) @@ -2503,15 +2676,15 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) { // Reset WatchSet to ensure watch fires when deregistering gateway ws = memdb.NewWatchSet() - _, _, err = s.ConnectServiceNodes(ws, "db", nil) + _, _, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) // Watch should fire when a gateway instance is deregistered - assert.Nil(t, s.DeleteService(19, "bar", "gateway", nil)) + assert.Nil(t, s.DeleteService(19, "bar", "gateway", nil, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() - idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(19)) assert.Len(t, nodes, 2) @@ -2524,10 +2697,10 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) { assert.Equal(t, 443, nodes[1].ServicePort) // Index should not slide back after deleting all instances of the gateway - assert.Nil(t, s.DeleteService(20, "foo", "gateway-2", nil)) + assert.Nil(t, s.DeleteService(20, "foo", "gateway-2", nil, "")) assert.True(t, watchFired(ws)) - idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(20)) assert.Len(t, nodes, 1) @@ -2587,7 +2760,7 @@ func TestStateStore_Service_Snapshot(t *testing.T) { if idx := snap.LastIndex(); idx != 4 { t.Fatalf("bad index: %d", idx) } - services, err := snap.Services("node1", nil) + services, err := snap.Services("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2644,7 +2817,7 @@ func TestStateStore_EnsureCheck(t *testing.T) { } // Retrieve the check and make sure it matches - idx, checks, err := s.NodeChecks(nil, "node1", nil) + idx, checks, err := s.NodeChecks(nil, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2661,7 +2834,7 @@ func TestStateStore_EnsureCheck(t *testing.T) { testCheckOutput := func(t *testing.T, expectedNodeIndex, expectedIndexForCheck uint64, outputTxt string) { t.Helper() // Check that we successfully updated - idx, checks, err = s.NodeChecks(nil, "node1", nil) + idx, checks, err = s.NodeChecks(nil, "node1", nil, "") require.NoError(t, err) require.Equal(t, expectedNodeIndex, idx, "bad raft index") @@ -2727,7 +2900,7 @@ func TestStateStore_EnsureCheck_defaultStatus(t *testing.T) { } // Get the check again - _, result, err := s.NodeChecks(nil, "node1", nil) + _, result, err := s.NodeChecks(nil, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2743,7 +2916,7 @@ func TestStateStore_NodeChecks(t *testing.T) { // Do an initial query for a node that doesn't exist. ws := memdb.NewWatchSet() - idx, checks, err := s.NodeChecks(ws, "node1", nil) + idx, checks, err := s.NodeChecks(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2768,7 +2941,7 @@ func TestStateStore_NodeChecks(t *testing.T) { // Try querying for all checks associated with node1 ws = memdb.NewWatchSet() - idx, checks, err = s.NodeChecks(ws, "node1", nil) + idx, checks, err = s.NodeChecks(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2788,7 +2961,7 @@ func TestStateStore_NodeChecks(t *testing.T) { // Try querying for all checks associated with node2 ws = memdb.NewWatchSet() - idx, checks, err = s.NodeChecks(ws, "node2", nil) + idx, checks, err = s.NodeChecks(ws, "node2", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2811,7 +2984,7 @@ func TestStateStore_ServiceChecks(t *testing.T) { // Do an initial query for a service that doesn't exist. ws := memdb.NewWatchSet() - idx, checks, err := s.ServiceChecks(ws, "service1", nil) + idx, checks, err := s.ServiceChecks(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2836,7 +3009,7 @@ func TestStateStore_ServiceChecks(t *testing.T) { // Try querying for all checks associated with service1. ws = memdb.NewWatchSet() - idx, checks, err = s.ServiceChecks(ws, "service1", nil) + idx, checks, err = s.ServiceChecks(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2870,7 +3043,7 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) { // Querying with no results returns nil. ws := memdb.NewWatchSet() - idx, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", nil, nil) + idx, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", nil, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2923,7 +3096,7 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) { idx = 7 for _, tc := range cases { ws = memdb.NewWatchSet() - _, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", tc.filters, nil) + _, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", tc.filters, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2959,8 +3132,7 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) { // Now get a fresh watch, which will be forced to watch the whole // node table. ws = memdb.NewWatchSet() - _, _, err = s.ServiceChecksByNodeMeta(ws, "service1", - map[string]string{"common": "1"}, nil) + _, _, err = s.ServiceChecksByNodeMeta(ws, "service1", map[string]string{"common": "1"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2981,7 +3153,7 @@ func TestStateStore_ChecksInState(t *testing.T) { // Querying with no results returns nil ws := memdb.NewWatchSet() - idx, res, err := s.ChecksInState(ws, api.HealthPassing, nil) + idx, res, err := s.ChecksInState(ws, api.HealthPassing, nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -2997,7 +3169,7 @@ func TestStateStore_ChecksInState(t *testing.T) { // Query the state store for passing checks. ws = memdb.NewWatchSet() - _, checks, err := s.ChecksInState(ws, api.HealthPassing, nil) + _, checks, err := s.ChecksInState(ws, api.HealthPassing, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3021,7 +3193,7 @@ func TestStateStore_ChecksInState(t *testing.T) { // HealthAny just returns everything. ws = memdb.NewWatchSet() - _, checks, err = s.ChecksInState(ws, api.HealthAny, nil) + _, checks, err = s.ChecksInState(ws, api.HealthAny, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3048,7 +3220,7 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) { // Querying with no results returns nil. ws := memdb.NewWatchSet() - idx, res, err := s.ChecksInStateByNodeMeta(ws, api.HealthPassing, nil, nil) + idx, res, err := s.ChecksInStateByNodeMeta(ws, api.HealthPassing, nil, nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -3116,7 +3288,7 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) { for i, tc := range cases { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { ws = memdb.NewWatchSet() - _, checks, err := s.ChecksInStateByNodeMeta(ws, tc.state, tc.filters, nil) + _, checks, err := s.ChecksInStateByNodeMeta(ws, tc.state, tc.filters, nil, "") require.NoError(t, err) var foundIDs []string @@ -3148,8 +3320,7 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) { // Now get a fresh watch, which will be forced to watch the whole // node table. ws = memdb.NewWatchSet() - _, _, err = s.ChecksInStateByNodeMeta(ws, api.HealthPassing, - map[string]string{"common": "1"}, nil) + _, _, err = s.ChecksInStateByNodeMeta(ws, api.HealthPassing, map[string]string{"common": "1"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3171,7 +3342,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { // Make sure the check is there. ws := memdb.NewWatchSet() - _, checks, err := s.NodeChecks(ws, "node1", nil) + _, checks, err := s.NodeChecks(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3182,10 +3353,10 @@ func TestStateStore_DeleteCheck(t *testing.T) { ensureServiceVersion(t, s, ws, "service1", 2, 1) // Delete the check. - if err := s.DeleteCheck(3, "node1", "check1", nil); err != nil { + if err := s.DeleteCheck(3, "node1", "check1", nil, ""); err != nil { t.Fatalf("err: %s", err) } - if idx, check, err := s.NodeCheck("node1", "check1", nil); idx != 3 || err != nil || check != nil { + if idx, check, err := s.NodeCheck("node1", "check1", nil, ""); idx != 3 || err != nil || check != nil { t.Fatalf("Node check should have been deleted idx=%d, node=%v, err=%s", idx, check, err) } if idx := s.maxIndex(tableChecks); idx != 3 { @@ -3199,7 +3370,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { // Check is gone ws = memdb.NewWatchSet() - _, checks, err = s.NodeChecks(ws, "node1", nil) + _, checks, err = s.NodeChecks(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3214,7 +3385,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { // Deleting a nonexistent check should be idempotent and not return an // error. - if err := s.DeleteCheck(4, "node1", "check1", nil); err != nil { + if err := s.DeleteCheck(4, "node1", "check1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if idx := s.maxIndex(tableChecks); idx != 3 { @@ -3226,7 +3397,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { } func ensureServiceVersion(t *testing.T, s *Store, ws memdb.WatchSet, serviceID string, expectedIdx uint64, expectedSize int) { - idx, services, err := s.ServiceNodes(ws, serviceID, nil) + idx, services, err := s.ServiceNodes(ws, serviceID, nil, "") t.Helper() if err != nil { t.Fatalf("err: %s", err) @@ -3244,7 +3415,7 @@ func ensureIndexForService(t *testing.T, s *Store, serviceName string, expectedI t.Helper() tx := s.db.Txn(false) defer tx.Abort() - transaction, err := tx.First(tableIndex, "id", serviceIndexName(serviceName, nil)) + transaction, err := tx.First(tableIndex, "id", serviceIndexName(serviceName, nil, "")) if err == nil { if idx, ok := transaction.(*IndexEntry); ok { if expectedIndex != idx.Value { @@ -3266,7 +3437,7 @@ func TestStateStore_IndexIndependence(t *testing.T) { // Querying with no matches gives an empty response ws := memdb.NewWatchSet() - idx, res, err := s.CheckServiceNodes(ws, "service1", nil) + idx, res, err := s.CheckServiceNodes(ws, "service1", nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -3323,13 +3494,13 @@ func TestStateStore_IndexIndependence(t *testing.T) { testRegisterCheck(t, s, 14, "node2", "service_shared", "check_service_shared", api.HealthPassing) ensureServiceVersion(t, s, ws, "service_shared", 14, 2) - s.DeleteCheck(15, "node2", types.CheckID("check_service_shared"), nil) + s.DeleteCheck(15, "node2", types.CheckID("check_service_shared"), nil, "") ensureServiceVersion(t, s, ws, "service_shared", 15, 2) ensureIndexForService(t, s, "service_shared", 15) - s.DeleteService(16, "node2", "service_shared", nil) + s.DeleteService(16, "node2", "service_shared", nil, "") ensureServiceVersion(t, s, ws, "service_shared", 16, 1) ensureIndexForService(t, s, "service_shared", 16) - s.DeleteService(17, "node1", "service_shared", nil) + s.DeleteService(17, "node1", "service_shared", nil, "") ensureServiceVersion(t, s, ws, "service_shared", 17, 0) testRegisterService(t, s, 18, "node1", "service_new") @@ -3388,7 +3559,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // The connect index and gateway-services iterators are watched wantBeforeWatchSetSize: 2, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(5, "node1", "test", nil)) + require.NoError(t, s.DeleteService(5, "node1", "test", nil, "")) }, // Note that the old implementation would unblock in this case since it // always watched the target service's index even though some updates @@ -3449,7 +3620,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // connect index iterator, and gateway-services iterator. wantBeforeWatchSetSize: 3, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node2", "test", nil)) + require.NoError(t, s.DeleteService(6, "node2", "test", nil, "")) }, shouldFire: true, wantAfterIndex: 6, @@ -3469,7 +3640,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // connect index iterator, and gateway-services iterator. wantBeforeWatchSetSize: 3, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node1", "test", nil)) + require.NoError(t, s.DeleteService(6, "node1", "test", nil, "")) }, shouldFire: true, wantAfterIndex: 6, @@ -3526,7 +3697,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // connect index iterator, and gateway-services iterator. wantBeforeWatchSetSize: 3, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node2", "test-sidecar-proxy", nil)) + require.NoError(t, s.DeleteService(6, "node2", "test-sidecar-proxy", nil, "")) }, shouldFire: true, wantAfterIndex: 6, @@ -3546,7 +3717,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // connect index iterator, and gateway-services iterator. wantBeforeWatchSetSize: 3, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node1", "test-sidecar-proxy", nil)) + require.NoError(t, s.DeleteService(6, "node1", "test-sidecar-proxy", nil, "")) }, shouldFire: true, wantAfterIndex: 6, @@ -3710,7 +3881,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // Run the query ws := memdb.NewWatchSet() - _, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil) + _, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil, "") require.NoError(t, err) require.Len(t, res, tt.wantBeforeResLen) require.Len(t, ws, tt.wantBeforeWatchSetSize) @@ -3729,7 +3900,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // Re-query the same result. Should return the desired index and len ws = memdb.NewWatchSet() - idx, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil) + idx, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil, "") require.NoError(t, err) require.Len(t, res, tt.wantAfterResLen) require.Equal(t, tt.wantAfterIndex, idx) @@ -3743,7 +3914,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { // Querying with no matches gives an empty response ws := memdb.NewWatchSet() - idx, res, err := s.CheckServiceNodes(ws, "service1", nil) + idx, res, err := s.CheckServiceNodes(ws, "service1", nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -3776,7 +3947,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { // with a specific service. ws = memdb.NewWatchSet() ensureServiceVersion(t, s, ws, "service1", 6, 1) - idx, results, err := s.CheckServiceNodes(ws, "service1", nil) + idx, results, err := s.CheckServiceNodes(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3802,7 +3973,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { t.Fatalf("bad") } ws = memdb.NewWatchSet() - idx, _, err = s.CheckServiceNodes(ws, "service1", nil) + idx, _, err = s.CheckServiceNodes(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3818,7 +3989,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { t.Fatalf("bad") } ws = memdb.NewWatchSet() - idx, _, err = s.CheckServiceNodes(ws, "service1", nil) + idx, _, err = s.CheckServiceNodes(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3832,7 +4003,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { t.Fatalf("bad") } ws = memdb.NewWatchSet() - idx, _, err = s.CheckServiceNodes(ws, "service1", nil) + idx, _, err = s.CheckServiceNodes(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3859,7 +4030,7 @@ func TestStateStore_CheckConnectServiceNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(0)) assert.Len(t, nodes, 0) @@ -3884,7 +4055,7 @@ func TestStateStore_CheckConnectServiceNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(20)) assert.Len(t, nodes, 2) @@ -3904,7 +4075,7 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(0)) assert.Len(t, nodes, 0) @@ -3938,7 +4109,7 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) { assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(18)) assert.Len(t, nodes, 0) @@ -3957,7 +4128,7 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(21)) assert.Len(t, nodes, 2) @@ -3982,17 +4153,17 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) { assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(22)) assert.Len(t, nodes, 3) // Watch should fire when a gateway instance is deregistered - assert.Nil(t, s.DeleteService(23, "bar", "gateway", nil)) + assert.Nil(t, s.DeleteService(23, "bar", "gateway", nil, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(23)) assert.Len(t, nodes, 2) @@ -4005,10 +4176,10 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) { assert.Equal(t, 443, nodes[1].Service.Port) // Index should not slide back after deleting all instances of the gateway - assert.Nil(t, s.DeleteService(24, "foo", "gateway-2", nil)) + assert.Nil(t, s.DeleteService(24, "foo", "gateway-2", nil, "")) assert.True(t, watchFired(ws)) - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(24)) assert.Len(t, nodes, 1) @@ -4052,7 +4223,7 @@ func BenchmarkCheckServiceNodes(b *testing.B) { ws := memdb.NewWatchSet() for i := 0; i < b.N; i++ { - s.CheckServiceNodes(ws, "db", nil) + s.CheckServiceNodes(ws, "db", nil, "") } } @@ -4086,7 +4257,7 @@ func TestStateStore_CheckServiceTagNodes(t *testing.T) { } ws := memdb.NewWatchSet() - idx, nodes, err := s.CheckServiceTagNodes(ws, "db", []string{"primary"}, nil) + idx, nodes, err := s.CheckServiceTagNodes(ws, "db", []string{"primary"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -4165,7 +4336,7 @@ func TestStateStore_Check_Snapshot(t *testing.T) { if idx := snap.LastIndex(); idx != 5 { t.Fatalf("bad index: %d", idx) } - iter, err := snap.Checks("node1", nil) + iter, err := snap.Checks("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -4421,7 +4592,7 @@ func TestStateStore_ServiceDump(t *testing.T) { { name: "delete a node", modFn: func(t *testing.T) { - s.DeleteNode(12, "node2", nil) + s.DeleteNode(12, "node2", nil, "") }, allFired: true, // fires due to "index" kindFired: true, // fires due to "index" @@ -4457,11 +4628,11 @@ func TestStateStore_ServiceDump(t *testing.T) { op := op require.True(t, t.Run(op.name, func(t *testing.T) { wsAll := memdb.NewWatchSet() - _, _, err := s.ServiceDump(wsAll, "", false, nil) + _, _, err := s.ServiceDump(wsAll, "", false, nil, "") require.NoError(t, err) wsKind := memdb.NewWatchSet() - _, _, err = s.ServiceDump(wsKind, structs.ServiceKindConnectProxy, true, nil) + _, _, err = s.ServiceDump(wsKind, structs.ServiceKindConnectProxy, true, nil, "") require.NoError(t, err) op.modFn(t) @@ -4469,12 +4640,12 @@ func TestStateStore_ServiceDump(t *testing.T) { require.Equal(t, op.allFired, watchFired(wsAll), "all dump watch firing busted") require.Equal(t, op.kindFired, watchFired(wsKind), "kind dump watch firing busted") - _, dump, err := s.ServiceDump(nil, "", false, nil) + _, dump, err := s.ServiceDump(nil, "", false, nil, "") require.NoError(t, err) sortDump(dump) op.checkAll(t, dump) - _, dump, err = s.ServiceDump(nil, structs.ServiceKindConnectProxy, true, nil) + _, dump, err = s.ServiceDump(nil, structs.ServiceKindConnectProxy, true, nil, "") require.NoError(t, err) sortDump(dump) op.checkKind(t, dump) @@ -4487,12 +4658,12 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { // Generating a node dump that matches nothing returns empty wsInfo := memdb.NewWatchSet() - idx, dump, err := s.NodeInfo(wsInfo, "node1", nil) + idx, dump, err := s.NodeInfo(wsInfo, "node1", nil, "") if idx != 0 || dump != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, dump, err) } wsDump := memdb.NewWatchSet() - idx, dump, err = s.NodeDump(wsDump, nil) + idx, dump, err = s.NodeDump(wsDump, nil, "") if idx != 0 || dump != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, dump, err) } @@ -4645,7 +4816,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { // Get a dump of just a single node ws := memdb.NewWatchSet() - idx, dump, err = s.NodeInfo(ws, "node1", nil) + idx, dump, err = s.NodeInfo(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -4656,7 +4827,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { require.Equal(t, expect[0], dump[0]) // Generate a dump of all the nodes - idx, dump, err = s.NodeDump(nil, nil) + idx, dump, err = s.NodeDump(nil, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -4687,7 +4858,7 @@ func TestStateStore_ServiceIdxUpdateOnNodeUpdate(t *testing.T) { // Store the current service index ws := memdb.NewWatchSet() - lastIdx, _, err := s.ServiceNodes(ws, "srv", nil) + lastIdx, _, err := s.ServiceNodes(ws, "srv", nil, "") require.Nil(t, err) // Update the node with some meta @@ -4696,7 +4867,7 @@ func TestStateStore_ServiceIdxUpdateOnNodeUpdate(t *testing.T) { // Read the new service index ws = memdb.NewWatchSet() - newIdx, _, err := s.ServiceNodes(ws, "srv", nil) + newIdx, _, err := s.ServiceNodes(ws, "srv", nil, "") require.Nil(t, err) require.True(t, newIdx > lastIdx) @@ -4727,7 +4898,7 @@ func TestStateStore_ensureServiceCASTxn(t *testing.T) { // ensure no update happened roTxn := s.db.Txn(false) - _, nsRead, err := s.NodeService("node1", "foo", nil) + _, nsRead, err := s.NodeService("node1", "foo", nil, "") require.NoError(t, err) require.NotNil(t, nsRead) require.Equal(t, uint64(2), nsRead.ModifyIndex) @@ -4742,7 +4913,7 @@ func TestStateStore_ensureServiceCASTxn(t *testing.T) { // ensure no update happened roTxn = s.db.Txn(false) - _, nsRead, err = s.NodeService("node1", "foo", nil) + _, nsRead, err = s.NodeService("node1", "foo", nil, "") require.NoError(t, err) require.NotNil(t, nsRead) require.Equal(t, uint64(2), nsRead.ModifyIndex) @@ -4757,7 +4928,7 @@ func TestStateStore_ensureServiceCASTxn(t *testing.T) { // ensure the update happened roTxn = s.db.Txn(false) - _, nsRead, err = s.NodeService("node1", "foo", nil) + _, nsRead, err = s.NodeService("node1", "foo", nil, "") require.NoError(t, err) require.NotNil(t, nsRead) require.Equal(t, uint64(7), nsRead.ModifyIndex) @@ -5004,7 +5175,7 @@ func TestStateStore_GatewayServices_Terminating(t *testing.T) { assert.Equal(t, expect, out) // Delete a service covered by wildcard - assert.Nil(t, s.DeleteService(24, "bar", "redis", nil)) + assert.Nil(t, s.DeleteService(24, "bar", "redis", nil, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -5229,7 +5400,7 @@ func TestStateStore_GatewayServices_ServiceDeletion(t *testing.T) { assert.Equal(t, expect, out) // Delete a service specified directly. - assert.Nil(t, s.DeleteService(20, "foo", "db", nil)) + assert.Nil(t, s.DeleteService(20, "foo", "db", nil, "")) // Only the watch for other-gateway should fire, since its association to db came from a wildcard assert.False(t, watchFired(ws)) @@ -5513,7 +5684,7 @@ func TestStateStore_GatewayServices_Ingress(t *testing.T) { }) t.Run("deregistering a service", func(t *testing.T) { - require.Nil(t, s.DeleteService(18, "node1", "service1", nil)) + require.Nil(t, s.DeleteService(18, "node1", "service1", nil, "")) require.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -6257,7 +6428,7 @@ func TestStateStore_DumpGatewayServices(t *testing.T) { // Delete a service covered by wildcard t.Run("delete-wc-service", func(t *testing.T) { - assert.Nil(t, s.DeleteService(23, "bar", "redis", nil)) + assert.Nil(t, s.DeleteService(23, "bar", "redis", nil, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -6562,7 +6733,7 @@ func TestCatalog_catalogDownstreams_Watches(t *testing.T) { require.ElementsMatch(t, exp.names, names) // Now delete the web-proxy service and the result should be empty - require.NoError(t, s.DeleteService(3, "foo", "web-proxy", defaultMeta)) + require.NoError(t, s.DeleteService(3, "foo", "web-proxy", defaultMeta, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -7013,7 +7184,7 @@ func TestCatalog_upstreamsFromRegistration_Watches(t *testing.T) { require.ElementsMatch(t, exp.names, names) // Now delete the web-proxy service and the result should mirror the one of the remaining instance - require.NoError(t, s.DeleteService(4, "foo", "web-proxy", defaultMeta)) + require.NoError(t, s.DeleteService(4, "foo", "web-proxy", defaultMeta, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -7033,7 +7204,7 @@ func TestCatalog_upstreamsFromRegistration_Watches(t *testing.T) { require.ElementsMatch(t, exp.names, names) // Now delete the last web-proxy instance and the mappings should be cleared - require.NoError(t, s.DeleteService(5, "foo", "web-proxy-2", defaultMeta)) + require.NoError(t, s.DeleteService(5, "foo", "web-proxy-2", defaultMeta, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -7110,7 +7281,7 @@ func TestCatalog_topologyCleanupPanic(t *testing.T) { assert.True(t, watchFired(ws)) // Now delete the node Foo, and this would panic because of the deletion within an iterator - require.NoError(t, s.DeleteNode(3, "foo", nil)) + require.NoError(t, s.DeleteNode(3, "foo", nil, "")) assert.True(t, watchFired(ws)) } @@ -7271,7 +7442,7 @@ func TestCatalog_upstreamsFromRegistration_Ingress(t *testing.T) { require.ElementsMatch(t, exp.names, names) // Deleting a service covered by a wildcard should delete its mapping - require.NoError(t, s.DeleteService(6, "foo", svc.ID, &svc.EnterpriseMeta)) + require.NoError(t, s.DeleteService(6, "foo", svc.ID, &svc.EnterpriseMeta, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -7393,7 +7564,7 @@ func TestCatalog_cleanupGatewayWildcards_panic(t *testing.T) { require.NoError(t, s.EnsureService(5, "foo", &api2)) // Now delete the node "foo", and this would panic because of the deletion within an iterator - require.NoError(t, s.DeleteNode(6, "foo", nil)) + require.NoError(t, s.DeleteNode(6, "foo", nil, "")) } func TestCatalog_DownstreamsForService(t *testing.T) { @@ -7942,7 +8113,7 @@ func TestStateStore_EnsureService_ServiceNames(t *testing.T) { // Deregister an ingress gateway and the index should not slide back idx++ - require.NoError(t, s.DeleteService(idx, "node1", "new-ingress-gateway", entMeta)) + require.NoError(t, s.DeleteService(idx, "node1", "new-ingress-gateway", entMeta, "")) gotIdx, got, err = s.ServiceNamesOfKind(nil, structs.ServiceKindIngressGateway) require.NoError(t, err) @@ -7967,7 +8138,7 @@ func TestStateStore_EnsureService_ServiceNames(t *testing.T) { // Deregister the single typical service and the service name should also be dropped idx++ - require.NoError(t, s.DeleteService(idx, "node1", "web", entMeta)) + require.NoError(t, s.DeleteService(idx, "node1", "web", entMeta, "")) gotIdx, got, err = s.ServiceNamesOfKind(nil, structs.ServiceKindTypical) require.NoError(t, err) diff --git a/agent/consul/state/connect_ca_events.go b/agent/consul/state/connect_ca_events.go index 7d559f695..36fe8ce35 100644 --- a/agent/consul/state/connect_ca_events.go +++ b/agent/consul/state/connect_ca_events.go @@ -4,6 +4,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbsubscribe" ) // EventTopicCARoots is the streaming topic to which events will be published @@ -29,6 +30,10 @@ func (e EventPayloadCARoots) HasReadPermission(authz acl.Authorizer) bool { return authz.ServiceWriteAny(&authzContext) == acl.Allow } +func (e EventPayloadCARoots) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("EventPayloadCARoots does not implement ToSubscriptionEvent") +} + // caRootsChangeEvents returns an event on EventTopicCARoots whenever the list // of active CA Roots changes. func caRootsChangeEvents(tx ReadTxn, changes Changes) ([]stream.Event, error) { diff --git a/agent/consul/state/coordinate_test.go b/agent/consul/state/coordinate_test.go index 3a28d199b..6b576a8b8 100644 --- a/agent/consul/state/coordinate_test.go +++ b/agent/consul/state/coordinate_test.go @@ -181,7 +181,7 @@ func TestStateStore_Coordinate_Cleanup(t *testing.T) { require.Equal(t, expected, coords) // Now delete the node. - require.NoError(t, s.DeleteNode(3, "node1", nil)) + require.NoError(t, s.DeleteNode(3, "node1", nil, "")) // Make sure the coordinate is gone. _, coords, err = s.Coordinate(nil, "node1", nil) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index 2417f5741..821288f3b 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -997,8 +997,9 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet, // TODO(tproxy): One remaining improvement is that this includes non-Connect services (typical services without a proxy) // Ideally those should be excluded as well, since they can't be upstreams/downstreams without a proxy. - // Maybe narrow serviceNamesOfKindTxn to services represented by proxies? (ingress, sidecar-proxy, terminating) - index, services, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical) + // Maybe narrow serviceNamesOfKindTxn to services represented by proxies? (ingress, sidecar- + wildcardMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier) + index, services, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical, *wildcardMeta) if err != nil { return index, nil, fmt.Errorf("failed to list ingress service names: %v", err) } @@ -1008,7 +1009,7 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet, if downstreams { // Ingress gateways can only ever be downstreams, since mesh services don't dial them. - index, ingress, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindIngressGateway) + index, ingress, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindIngressGateway, *wildcardMeta) if err != nil { return index, nil, fmt.Errorf("failed to list ingress service names: %v", err) } diff --git a/agent/consul/state/peering.go b/agent/consul/state/peering.go new file mode 100644 index 000000000..b2a8b2c38 --- /dev/null +++ b/agent/consul/state/peering.go @@ -0,0 +1,486 @@ +package state + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-uuid" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +const ( + tablePeering = "peering" + tablePeeringTrustBundles = "peering-trust-bundles" +) + +func peeringTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: tablePeering, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + AllowMissing: false, + Unique: true, + Indexer: indexerSingle{ + readIndex: readIndex(indexFromUUIDString), + writeIndex: writeIndex(indexIDFromPeering), + }, + }, + indexName: { + Name: indexName, + AllowMissing: false, + Unique: true, + Indexer: indexerSingleWithPrefix{ + readIndex: indexPeeringFromQuery, + writeIndex: indexFromPeering, + prefixIndex: prefixIndexFromQueryNoNamespace, + }, + }, + }, + } +} + +func peeringTrustBundlesTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: tablePeeringTrustBundles, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + AllowMissing: false, + Unique: true, + Indexer: indexerSingle{ + readIndex: indexPeeringFromQuery, // same as peering table since we'll use the query.Value + writeIndex: indexFromPeeringTrustBundle, + }, + }, + }, + } +} + +func indexIDFromPeering(raw interface{}) ([]byte, error) { + p, ok := raw.(*pbpeering.Peering) + if !ok { + return nil, fmt.Errorf("unexpected type %T for pbpeering.Peering index", raw) + } + + if p.ID == "" { + return nil, errMissingValueForIndex + } + + uuid, err := uuidStringToBytes(p.ID) + if err != nil { + return nil, err + } + var b indexBuilder + b.Raw(uuid) + return b.Bytes(), nil +} + +func (s *Store) PeeringReadByID(ws memdb.WatchSet, id string) (uint64, *pbpeering.Peering, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + peering, err := peeringReadByIDTxn(ws, tx, id) + if err != nil { + return 0, nil, fmt.Errorf("failed to read peering by id: %w", err) + } + if peering == nil { + // Return the tables index so caller can watch it for changes if the peering doesn't exist + return maxIndexWatchTxn(tx, ws, tablePeering), nil, nil + } + + return peering.ModifyIndex, peering, nil +} + +func (s *Store) PeeringRead(ws memdb.WatchSet, q Query) (uint64, *pbpeering.Peering, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + watchCh, peeringRaw, err := tx.FirstWatch(tablePeering, indexName, q) + if err != nil { + return 0, nil, fmt.Errorf("failed peering lookup: %w", err) + } + + peering, ok := peeringRaw.(*pbpeering.Peering) + if peering != nil && !ok { + return 0, nil, fmt.Errorf("invalid type %T", peering) + } + ws.Add(watchCh) + + if peering == nil { + // Return the tables index so caller can watch it for changes if the peering doesn't exist + return maxIndexWatchTxn(tx, ws, partitionedIndexEntryName(tablePeering, q.PartitionOrDefault())), nil, nil + } + return peering.ModifyIndex, peering, nil +} + +func peeringReadByIDTxn(ws memdb.WatchSet, tx ReadTxn, id string) (*pbpeering.Peering, error) { + watchCh, peeringRaw, err := tx.FirstWatch(tablePeering, indexID, id) + if err != nil { + return nil, fmt.Errorf("failed peering lookup: %w", err) + } + ws.Add(watchCh) + + peering, ok := peeringRaw.(*pbpeering.Peering) + if peering != nil && !ok { + return nil, fmt.Errorf("invalid type %T", peering) + } + return peering, nil +} + +func (s *Store) PeeringList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + var ( + iter memdb.ResultIterator + err error + idx uint64 + ) + if entMeta.PartitionOrDefault() == structs.WildcardSpecifier { + iter, err = tx.Get(tablePeering, indexID) + idx = maxIndexWatchTxn(tx, ws, tablePeering) + } else { + iter, err = tx.Get(tablePeering, indexName+"_prefix", entMeta) + idx = maxIndexWatchTxn(tx, ws, partitionedIndexEntryName(tablePeering, entMeta.PartitionOrDefault())) + } + if err != nil { + return 0, nil, fmt.Errorf("failed peering lookup: %v", err) + } + + var result []*pbpeering.Peering + for entry := iter.Next(); entry != nil; entry = iter.Next() { + result = append(result, entry.(*pbpeering.Peering)) + } + + return idx, result, nil +} + +func generatePeeringUUID(tx ReadTxn) (string, error) { + for { + uuid, err := uuid.GenerateUUID() + if err != nil { + return "", fmt.Errorf("failed to generate UUID: %w", err) + } + existing, err := peeringReadByIDTxn(nil, tx, uuid) + if err != nil { + return "", fmt.Errorf("failed to read peering: %w", err) + } + if existing == nil { + return uuid, nil + } + } +} + +func (s *Store) PeeringWrite(idx uint64, p *pbpeering.Peering) error { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + q := Query{ + Value: p.Name, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(p.Partition), + } + existingRaw, err := tx.First(tablePeering, indexName, q) + if err != nil { + return fmt.Errorf("failed peering lookup: %w", err) + } + + existing, ok := existingRaw.(*pbpeering.Peering) + if existingRaw != nil && !ok { + return fmt.Errorf("invalid type %T", existingRaw) + } + + if existing != nil { + p.CreateIndex = existing.CreateIndex + p.ID = existing.ID + + } else { + // TODO(peering): consider keeping PeeringState enum elsewhere? + p.State = pbpeering.PeeringState_INITIAL + p.CreateIndex = idx + + p.ID, err = generatePeeringUUID(tx) + if err != nil { + return fmt.Errorf("failed to generate peering id: %w", err) + } + } + p.ModifyIndex = idx + + if err := tx.Insert(tablePeering, p); err != nil { + return fmt.Errorf("failed inserting peering: %w", err) + } + + if err := updatePeeringTableIndexes(tx, idx, p.PartitionOrDefault()); err != nil { + return err + } + return tx.Commit() +} + +// TODO(peering): replace with deferred deletion since this operation +// should involve cleanup of data associated with the peering. +func (s *Store) PeeringDelete(idx uint64, q Query) error { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + existing, err := tx.First(tablePeering, indexName, q) + if err != nil { + return fmt.Errorf("failed peering lookup: %v", err) + } + + if existing == nil { + return nil + } + + if err := tx.Delete(tablePeering, existing); err != nil { + return fmt.Errorf("failed deleting peering: %v", err) + } + + if err := updatePeeringTableIndexes(tx, idx, q.PartitionOrDefault()); err != nil { + return err + } + return tx.Commit() +} + +func (s *Store) PeeringTerminateByID(idx uint64, id string) error { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + existing, err := peeringReadByIDTxn(nil, tx, id) + if err != nil { + return fmt.Errorf("failed to read peering %q: %w", id, err) + } + if existing == nil { + return nil + } + + c := proto.Clone(existing) + clone, ok := c.(*pbpeering.Peering) + if !ok { + return fmt.Errorf("invalid type %T, expected *pbpeering.Peering", existing) + } + + clone.State = pbpeering.PeeringState_TERMINATED + clone.ModifyIndex = idx + + if err := tx.Insert(tablePeering, clone); err != nil { + return fmt.Errorf("failed inserting peering: %w", err) + } + + if err := updatePeeringTableIndexes(tx, idx, clone.PartitionOrDefault()); err != nil { + return err + } + return tx.Commit() +} + +// ExportedServicesForPeer returns the list of typical and proxy services exported to a peer. +// TODO(peering): What to do about terminating gateways? Sometimes terminating gateways are the appropriate destination +// to dial for an upstream mesh service. However, that information is handled by observing the terminating gateway's +// config entry, which we wouldn't want to replicate. How would client peers know to route through terminating gateways +// when they're not dialing through a remote mesh gateway? +func (s *Store) ExportedServicesForPeer(ws memdb.WatchSet, peerID string) (uint64, []structs.ServiceName, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + peering, err := peeringReadByIDTxn(ws, tx, peerID) + if err != nil { + return 0, nil, fmt.Errorf("failed to read peering: %w", err) + } + if peering == nil { + return 0, nil, nil + } + + maxIdx := peering.ModifyIndex + + entMeta := structs.NodeEnterpriseMetaInPartition(peering.Partition) + idx, raw, err := configEntryTxn(tx, ws, structs.ExportedServices, entMeta.PartitionOrDefault(), entMeta) + if err != nil { + return 0, nil, fmt.Errorf("failed to fetch exported-services config entry: %w", err) + } + if idx > maxIdx { + maxIdx = idx + } + if raw == nil { + return maxIdx, nil, nil + } + conf, ok := raw.(*structs.ExportedServicesConfigEntry) + if !ok { + return 0, nil, fmt.Errorf("expected type *structs.ExportedServicesConfigEntry, got %T", raw) + } + + set := make(map[structs.ServiceName]struct{}) + + for _, svc := range conf.Services { + svcMeta := acl.NewEnterpriseMetaWithPartition(entMeta.PartitionOrDefault(), svc.Namespace) + + sawPeer := false + for _, consumer := range svc.Consumers { + name := structs.NewServiceName(svc.Name, &svcMeta) + + if _, ok := set[name]; ok { + // Service was covered by a wildcard that was already accounted for + continue + } + if consumer.PeerName != peering.Name { + continue + } + sawPeer = true + + if svc.Name != structs.WildcardSpecifier { + set[name] = struct{}{} + } + } + + // If the target peer is a consumer, and all services in the namespace are exported, query those service names. + if sawPeer && svc.Name == structs.WildcardSpecifier { + var typicalServices []*KindServiceName + idx, typicalServices, err = serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical, svcMeta) + if err != nil { + return 0, nil, fmt.Errorf("failed to get service names: %w", err) + } + if idx > maxIdx { + maxIdx = idx + } + for _, s := range typicalServices { + set[s.Service] = struct{}{} + } + + var proxyServices []*KindServiceName + idx, proxyServices, err = serviceNamesOfKindTxn(tx, ws, structs.ServiceKindConnectProxy, svcMeta) + if err != nil { + return 0, nil, fmt.Errorf("failed to get service names: %w", err) + } + if idx > maxIdx { + maxIdx = idx + } + for _, s := range proxyServices { + set[s.Service] = struct{}{} + } + } + } + + var resp []structs.ServiceName + for svc := range set { + resp = append(resp, svc) + } + return maxIdx, resp, nil +} + +func (s *Store) PeeringTrustBundleRead(ws memdb.WatchSet, q Query) (uint64, *pbpeering.PeeringTrustBundle, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + watchCh, ptbRaw, err := tx.FirstWatch(tablePeeringTrustBundles, indexID, q) + if err != nil { + return 0, nil, fmt.Errorf("failed peering trust bundle lookup: %w", err) + } + + ptb, ok := ptbRaw.(*pbpeering.PeeringTrustBundle) + if ptb != nil && !ok { + return 0, nil, fmt.Errorf("invalid type %T", ptb) + } + ws.Add(watchCh) + + if ptb == nil { + // Return the tables index so caller can watch it for changes if the trust bundle doesn't exist + return maxIndexWatchTxn(tx, ws, partitionedIndexEntryName(tablePeeringTrustBundles, q.PartitionOrDefault())), nil, nil + } + return ptb.ModifyIndex, ptb, nil +} + +// PeeringTrustBundleWrite writes ptb to the state store. If there is an existing trust bundle with the given peer name, +// it will be overwritten. +func (s *Store) PeeringTrustBundleWrite(idx uint64, ptb *pbpeering.PeeringTrustBundle) error { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + q := Query{ + Value: ptb.PeerName, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(ptb.Partition), + } + existingRaw, err := tx.First(tablePeeringTrustBundles, indexID, q) + if err != nil { + return fmt.Errorf("failed peering trust bundle lookup: %w", err) + } + + existing, ok := existingRaw.(*pbpeering.PeeringTrustBundle) + if existingRaw != nil && !ok { + return fmt.Errorf("invalid type %T", existingRaw) + } + + if existing != nil { + ptb.CreateIndex = existing.CreateIndex + + } else { + ptb.CreateIndex = idx + } + + ptb.ModifyIndex = idx + + if err := tx.Insert(tablePeeringTrustBundles, ptb); err != nil { + return fmt.Errorf("failed inserting peering trust bundle: %w", err) + } + + if err := updatePeeringTrustBundlesTableIndexes(tx, idx, ptb.PartitionOrDefault()); err != nil { + return err + } + return tx.Commit() +} + +func (s *Store) PeeringTrustBundleDelete(idx uint64, q Query) error { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + existing, err := tx.First(tablePeeringTrustBundles, indexID, q) + if err != nil { + return fmt.Errorf("failed peering trust bundle lookup: %v", err) + } + + if existing == nil { + return nil + } + + if err := tx.Delete(tablePeeringTrustBundles, existing); err != nil { + return fmt.Errorf("failed deleting peering trust bundle: %v", err) + } + + if err := updatePeeringTrustBundlesTableIndexes(tx, idx, q.PartitionOrDefault()); err != nil { + return err + } + return tx.Commit() +} + +func (s *Snapshot) Peerings() (memdb.ResultIterator, error) { + return s.tx.Get(tablePeering, indexName) +} + +func (s *Snapshot) PeeringTrustBundles() (memdb.ResultIterator, error) { + return s.tx.Get(tablePeeringTrustBundles, indexID) +} + +func (r *Restore) Peering(p *pbpeering.Peering) error { + if err := r.tx.Insert(tablePeering, p); err != nil { + return fmt.Errorf("failed restoring peering: %w", err) + } + + if err := updatePeeringTableIndexes(r.tx, p.ModifyIndex, p.PartitionOrDefault()); err != nil { + return err + } + + return nil +} + +func (r *Restore) PeeringTrustBundle(ptb *pbpeering.PeeringTrustBundle) error { + if err := r.tx.Insert(tablePeeringTrustBundles, ptb); err != nil { + return fmt.Errorf("failed restoring peering trust bundle: %w", err) + } + + if err := updatePeeringTrustBundlesTableIndexes(r.tx, ptb.ModifyIndex, ptb.PartitionOrDefault()); err != nil { + return err + } + + return nil +} diff --git a/agent/consul/state/peering_oss.go b/agent/consul/state/peering_oss.go new file mode 100644 index 000000000..8229d78a6 --- /dev/null +++ b/agent/consul/state/peering_oss.go @@ -0,0 +1,66 @@ +//go:build !consulent +// +build !consulent + +package state + +import ( + "fmt" + "strings" + + "github.com/hashicorp/consul/proto/pbpeering" +) + +func indexPeeringFromQuery(raw interface{}) ([]byte, error) { + q, ok := raw.(Query) + if !ok { + return nil, fmt.Errorf("unexpected type %T for Query index", raw) + } + + var b indexBuilder + b.String(strings.ToLower(q.Value)) + return b.Bytes(), nil +} + +func indexFromPeering(raw interface{}) ([]byte, error) { + p, ok := raw.(*pbpeering.Peering) + if !ok { + return nil, fmt.Errorf("unexpected type %T for structs.Peering index", raw) + } + + if p.Name == "" { + return nil, errMissingValueForIndex + } + + var b indexBuilder + b.String(strings.ToLower(p.Name)) + return b.Bytes(), nil +} + +func indexFromPeeringTrustBundle(raw interface{}) ([]byte, error) { + ptb, ok := raw.(*pbpeering.PeeringTrustBundle) + if !ok { + return nil, fmt.Errorf("unexpected type %T for pbpeering.PeeringTrustBundle index", raw) + } + + if ptb.PeerName == "" { + return nil, errMissingValueForIndex + } + + var b indexBuilder + b.String(strings.ToLower(ptb.PeerName)) + return b.Bytes(), nil +} + +func updatePeeringTableIndexes(tx WriteTxn, idx uint64, _ string) error { + if err := tx.Insert(tableIndex, &IndexEntry{Key: tablePeering, Value: idx}); err != nil { + return fmt.Errorf("failed updating table index: %w", err) + } + return nil +} + +func updatePeeringTrustBundlesTableIndexes(tx WriteTxn, idx uint64, _ string) error { + if err := tx.Insert(tableIndex, &IndexEntry{Key: tablePeeringTrustBundles, Value: idx}); err != nil { + return fmt.Errorf("failed updating table index: %w", err) + } + return nil +} diff --git a/agent/consul/state/peering_test.go b/agent/consul/state/peering_test.go new file mode 100644 index 000000000..1fbe7af0c --- /dev/null +++ b/agent/consul/state/peering_test.go @@ -0,0 +1,811 @@ +package state + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-uuid" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +func insertTestPeerings(t *testing.T, s *Store) { + t.Helper() + + tx := s.db.WriteTxn(0) + defer tx.Abort() + + err := tx.Insert(tablePeering, &pbpeering.Peering{ + Name: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", + State: pbpeering.PeeringState_INITIAL, + CreateIndex: 1, + ModifyIndex: 1, + }) + require.NoError(t, err) + + err = tx.Insert(tablePeering, &pbpeering.Peering{ + Name: "bar", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "5ebcff30-5509-4858-8142-a8e580f1863f", + State: pbpeering.PeeringState_FAILING, + CreateIndex: 2, + ModifyIndex: 2, + }) + require.NoError(t, err) + + err = tx.Insert(tableIndex, &IndexEntry{ + Key: tablePeering, + Value: 2, + }) + require.NoError(t, err) + require.NoError(t, tx.Commit()) +} + +func insertTestPeeringTrustBundles(t *testing.T, s *Store) { + t.Helper() + + tx := s.db.WriteTxn(0) + defer tx.Abort() + + err := tx.Insert(tablePeeringTrustBundles, &pbpeering.PeeringTrustBundle{ + TrustDomain: "foo.com", + PeerName: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + RootPEMs: []string{"foo certificate bundle"}, + CreateIndex: 1, + ModifyIndex: 1, + }) + require.NoError(t, err) + + err = tx.Insert(tablePeeringTrustBundles, &pbpeering.PeeringTrustBundle{ + TrustDomain: "bar.com", + PeerName: "bar", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + RootPEMs: []string{"bar certificate bundle"}, + CreateIndex: 2, + ModifyIndex: 2, + }) + require.NoError(t, err) + + err = tx.Insert(tableIndex, &IndexEntry{ + Key: tablePeeringTrustBundles, + Value: 2, + }) + require.NoError(t, err) + require.NoError(t, tx.Commit()) +} + +func TestStateStore_PeeringReadByID(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + + type testcase struct { + name string + id string + expect *pbpeering.Peering + } + run := func(t *testing.T, tc testcase) { + _, peering, err := s.PeeringReadByID(nil, tc.id) + require.NoError(t, err) + require.Equal(t, tc.expect, peering) + } + tcs := []testcase{ + { + name: "get foo", + id: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", + expect: &pbpeering.Peering{ + Name: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", + State: pbpeering.PeeringState_INITIAL, + CreateIndex: 1, + ModifyIndex: 1, + }, + }, + { + name: "get bar", + id: "5ebcff30-5509-4858-8142-a8e580f1863f", + expect: &pbpeering.Peering{ + Name: "bar", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "5ebcff30-5509-4858-8142-a8e580f1863f", + State: pbpeering.PeeringState_FAILING, + CreateIndex: 2, + ModifyIndex: 2, + }, + }, + { + name: "get non-existent", + id: "05f54e2f-7813-4d4d-ba03-534554c88a18", + expect: nil, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestStateStore_PeeringRead(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + + type testcase struct { + name string + query Query + expect *pbpeering.Peering + } + run := func(t *testing.T, tc testcase) { + _, peering, err := s.PeeringRead(nil, tc.query) + require.NoError(t, err) + require.Equal(t, tc.expect, peering) + } + tcs := []testcase{ + { + name: "get foo", + query: Query{ + Value: "foo", + }, + expect: &pbpeering.Peering{ + Name: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", + State: pbpeering.PeeringState_INITIAL, + CreateIndex: 1, + ModifyIndex: 1, + }, + }, + { + name: "get non-existent baz", + query: Query{ + Value: "baz", + }, + expect: nil, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestStore_Peering_Watch(t *testing.T) { + s := NewStateStore(nil) + + var lastIdx uint64 + lastIdx++ + + // set up initial write + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "foo", + }) + require.NoError(t, err) + + newWatch := func(t *testing.T, q Query) memdb.WatchSet { + t.Helper() + // set up a watch + ws := memdb.NewWatchSet() + + _, _, err := s.PeeringRead(ws, q) + require.NoError(t, err) + + return ws + } + + t.Run("insert fires watch", func(t *testing.T) { + // watch on non-existent bar + ws := newWatch(t, Query{Value: "bar"}) + + lastIdx++ + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "bar", + }) + require.NoError(t, err) + require.True(t, watchFired(ws)) + + // should find bar peering + idx, p, err := s.PeeringRead(ws, Query{Value: "bar"}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.NotNil(t, p) + }) + + t.Run("update fires watch", func(t *testing.T) { + // watch on existing foo + ws := newWatch(t, Query{Value: "foo"}) + + // unrelated write shouldn't fire watch + lastIdx++ + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "bar", + }) + require.NoError(t, err) + require.False(t, watchFired(ws)) + + // foo write should fire watch + lastIdx++ + err = s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_FAILING, + }) + require.NoError(t, err) + require.True(t, watchFired(ws)) + + // check foo is updated + idx, p, err := s.PeeringRead(ws, Query{Value: "foo"}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Equal(t, pbpeering.PeeringState_FAILING, p.State) + }) + + t.Run("delete fires watch", func(t *testing.T) { + // watch on existing foo + ws := newWatch(t, Query{Value: "foo"}) + + // delete on bar shouldn't fire watch + lastIdx++ + require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.Peering{Name: "bar"})) + lastIdx++ + require.NoError(t, s.PeeringDelete(lastIdx, Query{Value: "bar"})) + require.False(t, watchFired(ws)) + + // delete on foo should fire watch + lastIdx++ + err := s.PeeringDelete(lastIdx, Query{Value: "foo"}) + require.NoError(t, err) + require.True(t, watchFired(ws)) + + // check foo is gone + idx, p, err := s.PeeringRead(ws, Query{Value: "foo"}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Nil(t, p) + }) +} + +func TestStore_PeeringList(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + + _, pps, err := s.PeeringList(nil, acl.EnterpriseMeta{}) + require.NoError(t, err) + expect := []*pbpeering.Peering{ + { + Name: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", + State: pbpeering.PeeringState_INITIAL, + CreateIndex: 1, + ModifyIndex: 1, + }, + { + Name: "bar", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "5ebcff30-5509-4858-8142-a8e580f1863f", + State: pbpeering.PeeringState_FAILING, + CreateIndex: 2, + ModifyIndex: 2, + }, + } + require.ElementsMatch(t, expect, pps) +} + +func TestStore_PeeringList_Watch(t *testing.T) { + s := NewStateStore(nil) + + var lastIdx uint64 + lastIdx++ // start at 1 + + // track number of expected peerings in state store + var count int + + newWatch := func(t *testing.T, entMeta acl.EnterpriseMeta) memdb.WatchSet { + t.Helper() + // set up a watch + ws := memdb.NewWatchSet() + + _, _, err := s.PeeringList(ws, entMeta) + require.NoError(t, err) + + return ws + } + + t.Run("insert fires watch", func(t *testing.T) { + ws := newWatch(t, acl.EnterpriseMeta{}) + + lastIdx++ + // insert a peering + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "bar", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }) + require.NoError(t, err) + count++ + + require.True(t, watchFired(ws)) + + // should find bar peering + idx, pp, err := s.PeeringList(ws, acl.EnterpriseMeta{}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Len(t, pp, count) + }) + + t.Run("update fires watch", func(t *testing.T) { + // set up initial write + lastIdx++ + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }) + require.NoError(t, err) + count++ + + ws := newWatch(t, acl.EnterpriseMeta{}) + + // update peering + lastIdx++ + err = s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_FAILING, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }) + require.NoError(t, err) + + require.True(t, watchFired(ws)) + + idx, pp, err := s.PeeringList(ws, acl.EnterpriseMeta{}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Len(t, pp, count) + }) + + t.Run("delete fires watch", func(t *testing.T) { + // set up initial write + lastIdx++ + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "baz", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }) + require.NoError(t, err) + count++ + + ws := newWatch(t, acl.EnterpriseMeta{}) + + // delete peering + lastIdx++ + err = s.PeeringDelete(lastIdx, Query{Value: "baz"}) + require.NoError(t, err) + count-- + + require.True(t, watchFired(ws)) + + idx, pp, err := s.PeeringList(ws, acl.EnterpriseMeta{}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Len(t, pp, count) + }) +} + +func TestStore_PeeringWrite(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + type testcase struct { + name string + input *pbpeering.Peering + } + run := func(t *testing.T, tc testcase) { + require.NoError(t, s.PeeringWrite(10, tc.input)) + + q := Query{ + Value: tc.input.Name, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(tc.input.Partition), + } + _, p, err := s.PeeringRead(nil, q) + require.NoError(t, err) + require.NotNil(t, p) + if tc.input.State == 0 { + require.Equal(t, pbpeering.PeeringState_INITIAL, p.State) + } + require.Equal(t, tc.input.Name, p.Name) + } + tcs := []testcase{ + { + name: "create baz", + input: &pbpeering.Peering{ + Name: "baz", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + { + name: "update foo", + input: &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_FAILING, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestStore_PeeringWrite_GenerateUUID(t *testing.T) { + rand.Seed(1) + + s := NewStateStore(nil) + + entMeta := structs.NodeEnterpriseMetaInDefaultPartition() + partition := entMeta.PartitionOrDefault() + + for i := 1; i < 11; i++ { + require.NoError(t, s.PeeringWrite(uint64(i), &pbpeering.Peering{ + Name: fmt.Sprintf("peering-%d", i), + Partition: partition, + })) + } + + idx, peerings, err := s.PeeringList(nil, *entMeta) + require.NoError(t, err) + require.Equal(t, uint64(10), idx) + require.Len(t, peerings, 10) + + // Ensure that all assigned UUIDs are unique. + uniq := make(map[string]struct{}) + for _, p := range peerings { + uniq[p.ID] = struct{}{} + } + require.Len(t, uniq, 10) + + // Ensure that the ID of an existing peering cannot be overwritten. + updated := &pbpeering.Peering{ + Name: peerings[0].Name, + Partition: peerings[0].Partition, + } + + // Attempt to overwrite ID. + updated.ID, err = uuid.GenerateUUID() + require.NoError(t, err) + require.NoError(t, s.PeeringWrite(11, updated)) + + q := Query{ + Value: updated.Name, + EnterpriseMeta: *entMeta, + } + idx, got, err := s.PeeringRead(nil, q) + require.NoError(t, err) + require.Equal(t, uint64(11), idx) + require.Equal(t, peerings[0].ID, got.ID) +} + +func TestStore_PeeringDelete(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + + q := Query{Value: "foo"} + + require.NoError(t, s.PeeringDelete(10, q)) + + _, p, err := s.PeeringRead(nil, q) + require.NoError(t, err) + require.Nil(t, p) +} + +func TestStore_PeeringTerminateByID(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + + // id corresponding to default/foo + id := "9e650110-ac74-4c5a-a6a8-9348b2bed4e9" + + require.NoError(t, s.PeeringTerminateByID(10, id)) + + _, p, err := s.PeeringReadByID(nil, id) + require.NoError(t, err) + require.Equal(t, pbpeering.PeeringState_TERMINATED, p.State) +} + +func TestStateStore_PeeringTrustBundleRead(t *testing.T) { + s := NewStateStore(nil) + insertTestPeeringTrustBundles(t, s) + + type testcase struct { + name string + query Query + expect *pbpeering.PeeringTrustBundle + } + run := func(t *testing.T, tc testcase) { + _, ptb, err := s.PeeringTrustBundleRead(nil, tc.query) + require.NoError(t, err) + require.Equal(t, tc.expect, ptb) + } + + entMeta := structs.NodeEnterpriseMetaInDefaultPartition() + + tcs := []testcase{ + { + name: "get foo", + query: Query{ + Value: "foo", + EnterpriseMeta: *entMeta, + }, + expect: &pbpeering.PeeringTrustBundle{ + TrustDomain: "foo.com", + PeerName: "foo", + Partition: entMeta.PartitionOrEmpty(), + RootPEMs: []string{"foo certificate bundle"}, + CreateIndex: 1, + ModifyIndex: 1, + }, + }, + { + name: "get non-existent baz", + query: Query{ + Value: "baz", + }, + expect: nil, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestStore_PeeringTrustBundleWrite(t *testing.T) { + s := NewStateStore(nil) + insertTestPeeringTrustBundles(t, s) + type testcase struct { + name string + input *pbpeering.PeeringTrustBundle + } + run := func(t *testing.T, tc testcase) { + require.NoError(t, s.PeeringTrustBundleWrite(10, tc.input)) + + q := Query{ + Value: tc.input.PeerName, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(tc.input.Partition), + } + _, ptb, err := s.PeeringTrustBundleRead(nil, q) + require.NoError(t, err) + require.NotNil(t, ptb) + require.Equal(t, tc.input.TrustDomain, ptb.TrustDomain) + require.Equal(t, tc.input.PeerName, ptb.PeerName) + } + tcs := []testcase{ + { + name: "create baz", + input: &pbpeering.PeeringTrustBundle{ + TrustDomain: "baz.com", + PeerName: "baz", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + { + name: "update foo", + input: &pbpeering.PeeringTrustBundle{ + TrustDomain: "foo-updated.com", + PeerName: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestStore_PeeringTrustBundleDelete(t *testing.T) { + s := NewStateStore(nil) + insertTestPeeringTrustBundles(t, s) + + q := Query{Value: "foo"} + + require.NoError(t, s.PeeringTrustBundleDelete(10, q)) + + _, ptb, err := s.PeeringRead(nil, q) + require.NoError(t, err) + require.Nil(t, ptb) +} + +func TestStateStore_ExportedServicesForPeer(t *testing.T) { + s := NewStateStore(nil) + + var lastIdx uint64 + + lastIdx++ + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "my-peering", + }) + require.NoError(t, err) + + q := Query{Value: "my-peering"} + _, p, err := s.PeeringRead(nil, q) + require.NoError(t, err) + require.NotNil(t, p) + + id := p.ID + + ws := memdb.NewWatchSet() + + runStep(t, "no exported services", func(t *testing.T) { + idx, exported, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Empty(t, exported) + }) + + runStep(t, "config entry with exact service names", func(t *testing.T) { + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "mysql", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + { + Name: "redis", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + { + Name: "mongo", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-other-peering", + }, + }, + }, + }, + } + lastIdx++ + err = s.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + require.True(t, watchFired(ws)) + ws = memdb.NewWatchSet() + + expect := []structs.ServiceName{ + { + Name: "mysql", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + { + Name: "redis", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + } + idx, got, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.ElementsMatch(t, expect, got) + }) + + runStep(t, "config entry with wildcard service name picks up existing service", func(t *testing.T) { + lastIdx++ + require.NoError(t, s.EnsureNode(lastIdx, &structs.Node{Node: "foo", Address: "127.0.0.1"})) + + lastIdx++ + require.NoError(t, s.EnsureService(lastIdx, "foo", &structs.NodeService{ID: "billing", Service: "billing", Port: 5000})) + + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "*", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + }, + } + lastIdx++ + err = s.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + require.True(t, watchFired(ws)) + ws = memdb.NewWatchSet() + + expect := []structs.ServiceName{ + { + Name: "billing", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + } + idx, got, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Equal(t, expect, got) + }) + + runStep(t, "config entry with wildcard service names picks up new registrations", func(t *testing.T) { + lastIdx++ + require.NoError(t, s.EnsureService(lastIdx, "foo", &structs.NodeService{ID: "payments", Service: "payments", Port: 5000})) + + lastIdx++ + proxy := structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: "payments-proxy", + Service: "payments-proxy", + Port: 5000, + } + require.NoError(t, s.EnsureService(lastIdx, "foo", &proxy)) + + require.True(t, watchFired(ws)) + ws = memdb.NewWatchSet() + + expect := []structs.ServiceName{ + { + Name: "billing", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + { + Name: "payments", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + { + Name: "payments-proxy", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + } + idx, got, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.ElementsMatch(t, expect, got) + }) + + runStep(t, "config entry with wildcard service names picks up service deletions", func(t *testing.T) { + lastIdx++ + require.NoError(t, s.DeleteService(lastIdx, "foo", "billing", nil, "")) + + require.True(t, watchFired(ws)) + ws = memdb.NewWatchSet() + + expect := []structs.ServiceName{ + { + Name: "payments", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + { + Name: "payments-proxy", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + } + idx, got, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.ElementsMatch(t, expect, got) + }) + + runStep(t, "deleting the config entry clears exported services", func(t *testing.T) { + require.NoError(t, s.DeleteConfigEntry(lastIdx, structs.ExportedServices, "default", structs.DefaultEnterpriseMetaInDefaultPartition())) + idx, exported, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Empty(t, exported) + }) +} diff --git a/agent/consul/state/query.go b/agent/consul/state/query.go index b88fbe4fc..a4725b875 100644 --- a/agent/consul/state/query.go +++ b/agent/consul/state/query.go @@ -12,10 +12,15 @@ import ( // Query is a type used to query any single value index that may include an // enterprise identifier. type Query struct { - Value string + Value string + PeerName string acl.EnterpriseMeta } +func (q Query) PeerOrEmpty() string { + return q.PeerName +} + func (q Query) IDValue() string { return q.Value } @@ -137,11 +142,16 @@ func (q BoolQuery) PartitionOrDefault() string { // KeyValueQuery is a type used to query for both a key and a value that may // include an enterprise identifier. type KeyValueQuery struct { - Key string - Value string + Key string + Value string + PeerName string acl.EnterpriseMeta } +func (q KeyValueQuery) PeerOrEmpty() string { + return q.PeerName +} + // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer // receiver for this method. Remove once that is fixed. func (q KeyValueQuery) NamespaceOrDefault() string { diff --git a/agent/consul/state/query_oss.go b/agent/consul/state/query_oss.go index 0f11dce5f..553e7aebe 100644 --- a/agent/consul/state/query_oss.go +++ b/agent/consul/state/query_oss.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" ) func prefixIndexFromQuery(arg interface{}) ([]byte, error) { @@ -28,6 +29,29 @@ func prefixIndexFromQuery(arg interface{}) ([]byte, error) { return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) } +func prefixIndexFromQueryWithPeer(arg interface{}) ([]byte, error) { + var b indexBuilder + switch v := arg.(type) { + case *acl.EnterpriseMeta: + return nil, nil + case acl.EnterpriseMeta: + return nil, nil + case Query: + if v.PeerOrEmpty() == "" { + b.String(structs.LocalPeerKeyword) + } else { + b.String(strings.ToLower(v.PeerOrEmpty())) + } + if v.Value == "" { + return b.Bytes(), nil + } + b.String(strings.ToLower(v.Value)) + return b.Bytes(), nil + } + + return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) +} + func prefixIndexFromQueryNoNamespace(arg interface{}) ([]byte, error) { return prefixIndexFromQuery(arg) } diff --git a/agent/consul/state/schema.go b/agent/consul/state/schema.go index 75a2ffa74..28a690e48 100644 --- a/agent/consul/state/schema.go +++ b/agent/consul/state/schema.go @@ -22,12 +22,16 @@ func newDBSchema() *memdb.DBSchema { configTableSchema, coordinatesTableSchema, federationStateTableSchema, + freeVirtualIPTableSchema, gatewayServicesTableSchema, indexTableSchema, intentionsTableSchema, + kindServiceNameTableSchema, kvsTableSchema, meshTopologyTableSchema, nodesTableSchema, + peeringTableSchema, + peeringTrustBundlesTableSchema, policiesTableSchema, preparedQueriesTableSchema, rolesTableSchema, @@ -39,8 +43,6 @@ func newDBSchema() *memdb.DBSchema { tokensTableSchema, tombstonesTableSchema, usageTableSchema, - freeVirtualIPTableSchema, - kindServiceNameTableSchema, ) withEnterpriseSchema(db) return db diff --git a/agent/consul/state/schema_oss.go b/agent/consul/state/schema_oss.go index ea8e8a43e..fbe3cd7e5 100644 --- a/agent/consul/state/schema_oss.go +++ b/agent/consul/state/schema_oss.go @@ -3,7 +3,12 @@ package state -import "github.com/hashicorp/consul/acl" +import ( + "fmt" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" +) func partitionedIndexEntryName(entry string, _ string) string { return entry @@ -12,3 +17,11 @@ func partitionedIndexEntryName(entry string, _ string) string { func partitionedAndNamespacedIndexEntryName(entry string, _ *acl.EnterpriseMeta) string { return entry } + +// peeredIndexEntryName returns the peered index key for an importable entity (e.g. checks, services, or nodes). +func peeredIndexEntryName(entry, peerName string) string { + if peerName == "" { + peerName = structs.LocalPeerKeyword + } + return fmt.Sprintf("peer.%s:%s", peerName, entry) +} diff --git a/agent/consul/state/session_test.go b/agent/consul/state/session_test.go index 2e841500a..a4eae8a50 100644 --- a/agent/consul/state/session_test.go +++ b/agent/consul/state/session_test.go @@ -553,7 +553,7 @@ func TestStateStore_Session_Invalidate_DeleteNode(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteNode(15, "foo", nil); err != nil { + if err := s.DeleteNode(15, "foo", nil, ""); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { @@ -608,7 +608,7 @@ func TestStateStore_Session_Invalidate_DeleteService(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteService(15, "foo", "api", nil); err != nil { + if err := s.DeleteService(15, "foo", "api", nil, ""); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { @@ -709,7 +709,7 @@ func TestStateStore_Session_Invalidate_DeleteCheck(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteCheck(15, "foo", "bar", nil); err != nil { + if err := s.DeleteCheck(15, "foo", "bar", nil, ""); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { @@ -777,7 +777,7 @@ func TestStateStore_Session_Invalidate_Key_Unlock_Behavior(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteNode(6, "foo", nil); err != nil { + if err := s.DeleteNode(6, "foo", nil, ""); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { @@ -859,7 +859,7 @@ func TestStateStore_Session_Invalidate_Key_Delete_Behavior(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteNode(6, "foo", nil); err != nil { + if err := s.DeleteNode(6, "foo", nil, ""); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index e795b6857..d8aa98dd9 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -291,10 +291,9 @@ func maxIndexWatchTxn(tx ReadTxn, ws memdb.WatchSet, tables ...string) uint64 { return lindex } -// indexUpdateMaxTxn is used when restoring entries and sets the table's index to -// the given idx only if it's greater than the current index. -func indexUpdateMaxTxn(tx WriteTxn, idx uint64, table string) error { - ti, err := tx.First(tableIndex, indexID, table) +// indexUpdateMaxTxn sets the table's index to the given idx only if it's greater than the current index. +func indexUpdateMaxTxn(tx WriteTxn, idx uint64, key string) error { + ti, err := tx.First(tableIndex, indexID, key) if err != nil { return fmt.Errorf("failed to retrieve existing index: %s", err) } @@ -311,7 +310,7 @@ func indexUpdateMaxTxn(tx WriteTxn, idx uint64, table string) error { } } - if err := tx.Insert(tableIndex, &IndexEntry{table, idx}); err != nil { + if err := tx.Insert(tableIndex, &IndexEntry{key, idx}); err != nil { return fmt.Errorf("failed updating index %s", err) } return nil diff --git a/agent/consul/state/store_integration_test.go b/agent/consul/state/store_integration_test.go index c31b42ecc..47afc36d7 100644 --- a/agent/consul/state/store_integration_test.go +++ b/agent/consul/state/store_integration_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbsubscribe" ) func TestStore_IntegrationWithEventPublisher_ACLTokenUpdate(t *testing.T) { @@ -399,7 +400,7 @@ var topicService topic = "test-topic-service" func (s *Store) topicServiceTestHandler(req stream.SubscribeRequest, snap stream.SnapshotAppender) (uint64, error) { key := req.Subject.String() - idx, nodes, err := s.ServiceNodes(nil, key, nil) + idx, nodes, err := s.ServiceNodes(nil, key, nil, structs.TODOPeerKeyword) if err != nil { return idx, err } @@ -434,6 +435,10 @@ func (p nodePayload) Subject() stream.Subject { return stream.StringSubject(p.key) } +func (e nodePayload) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("EventPayloadCARoots does not implement ToSubscriptionEvent") +} + func createTokenAndWaitForACLEventPublish(t *testing.T, s *Store) *structs.ACLToken { token := &structs.ACLToken{ AccessorID: "3af117a9-2233-4cf4-8ff8-3c749c9906b4", diff --git a/agent/consul/state/txn.go b/agent/consul/state/txn.go index 4f44b56cc..5faccadfb 100644 --- a/agent/consul/state/txn.go +++ b/agent/consul/state/txn.go @@ -153,9 +153,9 @@ func (s *Store) txnNode(tx WriteTxn, idx uint64, op *structs.TxnNodeOp) (structs getNode := func() (*structs.Node, error) { if op.Node.ID != "" { - return getNodeIDTxn(tx, op.Node.ID, op.Node.GetEnterpriseMeta()) + return getNodeIDTxn(tx, op.Node.ID, op.Node.GetEnterpriseMeta(), op.Node.PeerName) } else { - return getNodeTxn(tx, op.Node.Node, op.Node.GetEnterpriseMeta()) + return getNodeTxn(tx, op.Node.Node, op.Node.GetEnterpriseMeta(), op.Node.PeerName) } } @@ -182,11 +182,11 @@ func (s *Store) txnNode(tx WriteTxn, idx uint64, op *structs.TxnNodeOp) (structs entry, err = getNode() case api.NodeDelete: - err = s.deleteNodeTxn(tx, idx, op.Node.Node, op.Node.GetEnterpriseMeta()) + err = s.deleteNodeTxn(tx, idx, op.Node.Node, op.Node.GetEnterpriseMeta(), op.Node.PeerName) case api.NodeDeleteCAS: var ok bool - ok, err = s.deleteNodeCASTxn(tx, idx, op.Node.ModifyIndex, op.Node.Node, op.Node.GetEnterpriseMeta()) + ok, err = s.deleteNodeCASTxn(tx, idx, op.Node.ModifyIndex, op.Node.Node, op.Node.GetEnterpriseMeta(), op.Node.PeerName) if !ok && err == nil { err = fmt.Errorf("failed to delete node %q, index is stale", op.Node.Node) } @@ -219,7 +219,7 @@ func (s *Store) txnNode(tx WriteTxn, idx uint64, op *structs.TxnNodeOp) (structs func (s *Store) txnService(tx WriteTxn, idx uint64, op *structs.TxnServiceOp) (structs.TxnResults, error) { switch op.Verb { case api.ServiceGet: - entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) + entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta, op.Service.PeerName) switch { case err != nil: return nil, err @@ -233,7 +233,7 @@ func (s *Store) txnService(tx WriteTxn, idx uint64, op *structs.TxnServiceOp) (s if err := ensureServiceTxn(tx, idx, op.Node, false, &op.Service); err != nil { return nil, err } - entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) + entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta, op.Service.PeerName) return newTxnResultFromNodeServiceEntry(entry), err case api.ServiceCAS: @@ -246,15 +246,15 @@ func (s *Store) txnService(tx WriteTxn, idx uint64, op *structs.TxnServiceOp) (s return nil, err } - entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) + entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta, op.Service.PeerName) return newTxnResultFromNodeServiceEntry(entry), err case api.ServiceDelete: - err := s.deleteServiceTxn(tx, idx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) + err := s.deleteServiceTxn(tx, idx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta, op.Service.PeerName) return nil, err case api.ServiceDeleteCAS: - ok, err := s.deleteServiceCASTxn(tx, idx, op.Service.ModifyIndex, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) + ok, err := s.deleteServiceCASTxn(tx, idx, op.Service.ModifyIndex, op.Node, op.Service.ID, &op.Service.EnterpriseMeta, op.Service.PeerName) if !ok && err == nil { return nil, fmt.Errorf("failed to delete service %q on node %q, index is stale", op.Service.ID, op.Node) } @@ -284,7 +284,7 @@ func (s *Store) txnCheck(tx WriteTxn, idx uint64, op *structs.TxnCheckOp) (struc switch op.Verb { case api.CheckGet: - _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) + _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta, op.Check.PeerName) if entry == nil && err == nil { err = fmt.Errorf("check %q on node %q doesn't exist", op.Check.CheckID, op.Check.Node) } @@ -292,7 +292,7 @@ func (s *Store) txnCheck(tx WriteTxn, idx uint64, op *structs.TxnCheckOp) (struc case api.CheckSet: err = s.ensureCheckTxn(tx, idx, false, &op.Check) if err == nil { - _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) + _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta, op.Check.PeerName) } case api.CheckCAS: @@ -303,14 +303,14 @@ func (s *Store) txnCheck(tx WriteTxn, idx uint64, op *structs.TxnCheckOp) (struc err = fmt.Errorf("failed to set check %q on node %q, index is stale", entry.CheckID, entry.Node) break } - _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) + _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta, op.Check.PeerName) case api.CheckDelete: - err = s.deleteCheckTxn(tx, idx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) + err = s.deleteCheckTxn(tx, idx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta, op.Check.PeerName) case api.CheckDeleteCAS: var ok bool - ok, err = s.deleteCheckCASTxn(tx, idx, op.Check.ModifyIndex, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) + ok, err = s.deleteCheckCASTxn(tx, idx, op.Check.ModifyIndex, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta, op.Check.PeerName) if !ok && err == nil { err = fmt.Errorf("failed to delete check %q on node %q, index is stale", op.Check.CheckID, op.Check.Node) } diff --git a/agent/consul/state/txn_test.go b/agent/consul/state/txn_test.go index 17adc2bc3..f98325df3 100644 --- a/agent/consul/state/txn_test.go +++ b/agent/consul/state/txn_test.go @@ -196,7 +196,7 @@ func TestStateStore_Txn_Node(t *testing.T) { require.Equal(t, expected, results) // Pull the resulting state store contents. - idx, actual, err := s.Nodes(nil, nil) + idx, actual, err := s.Nodes(nil, nil, "") require.NoError(t, err) if idx != 8 { t.Fatalf("bad index: %d", idx) @@ -311,7 +311,7 @@ func TestStateStore_Txn_Service(t *testing.T) { require.Equal(t, expected, results) // Pull the resulting state store contents. - idx, actual, err := s.NodeServices(nil, "node1", nil) + idx, actual, err := s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) if idx != 6 { t.Fatalf("bad index: %d", idx) @@ -464,7 +464,7 @@ func TestStateStore_Txn_Checks(t *testing.T) { require.Equal(t, expected, results) // Pull the resulting state store contents. - idx, actual, err := s.NodeChecks(nil, "node1", nil) + idx, actual, err := s.NodeChecks(nil, "node1", nil, "") require.NoError(t, err) if idx != 6 { t.Fatalf("bad index: %d", idx) diff --git a/agent/consul/state/usage_test.go b/agent/consul/state/usage_test.go index 3831d9c76..7b0f11f8f 100644 --- a/agent/consul/state/usage_test.go +++ b/agent/consul/state/usage_test.go @@ -38,7 +38,7 @@ func TestStateStore_Usage_NodeUsage_Delete(t *testing.T) { require.Equal(t, idx, uint64(1)) require.Equal(t, usage.Nodes, 2) - require.NoError(t, s.DeleteNode(2, "node2", nil)) + require.NoError(t, s.DeleteNode(2, "node2", nil, "")) idx, usage, err = s.NodeUsage() require.NoError(t, err) require.Equal(t, idx, uint64(2)) @@ -152,7 +152,7 @@ func TestStateStore_Usage_ServiceUsage_DeleteNode(t *testing.T) { require.Equal(t, 1, usage.ConnectServiceInstances[string(structs.ServiceKindConnectProxy)]) require.Equal(t, 1, usage.ConnectServiceInstances[connectNativeInstancesTable]) - require.NoError(t, s.DeleteNode(4, "node1", nil)) + require.NoError(t, s.DeleteNode(4, "node1", nil, "")) idx, usage, err = s.ServiceUsage() require.NoError(t, err) diff --git a/agent/consul/stream/event.go b/agent/consul/stream/event.go index c2223d8e2..708420de2 100644 --- a/agent/consul/stream/event.go +++ b/agent/consul/stream/event.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/proto/pbsubscribe" ) // Topic is an identifier that partitions events. A subscription will only receive @@ -46,6 +47,10 @@ type Payload interface { // it is usually the normalized resource name (including the partition and // namespace if applicable). Subject() Subject + + // ToSubscriptionEvent is used to convert streaming events to their + // serializable equivalent. + ToSubscriptionEvent(idx uint64) *pbsubscribe.Event } // PayloadEvents is a Payload that may be returned by Subscription.Next when @@ -109,6 +114,26 @@ func (PayloadEvents) Subject() Subject { panic("PayloadEvents does not implement Subject") } +func (p PayloadEvents) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + return &pbsubscribe.Event{ + Index: idx, + Payload: &pbsubscribe.Event_EventBatch{ + EventBatch: &pbsubscribe.EventBatch{ + Events: batchEventsFromEventSlice(p.Items), + }, + }, + } +} + +func batchEventsFromEventSlice(events []Event) []*pbsubscribe.Event { + result := make([]*pbsubscribe.Event, len(events)) + for i := range events { + event := events[i] + result[i] = event.Payload.ToSubscriptionEvent(event.Index) + } + return result +} + // IsEndOfSnapshot returns true if this is a framing event that indicates the // snapshot has completed. Subsequent events from Subscription.Next will be // streamed as they occur. @@ -142,18 +167,42 @@ func (framingEvent) Subject() Subject { panic("framing events do not implement Subject") } +func (framingEvent) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("framingEvent does not implement ToSubscriptionEvent") +} + type endOfSnapshot struct { framingEvent } +func (s endOfSnapshot) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + return &pbsubscribe.Event{ + Index: idx, + Payload: &pbsubscribe.Event_EndOfSnapshot{EndOfSnapshot: true}, + } +} + type newSnapshotToFollow struct { framingEvent } +func (s newSnapshotToFollow) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + return &pbsubscribe.Event{ + Index: idx, + Payload: &pbsubscribe.Event_NewSnapshotToFollow{NewSnapshotToFollow: true}, + } +} + type closeSubscriptionPayload struct { tokensSecretIDs []string } +// closeSubscriptionPayload is only used internally and does not correspond to +// a subscription event that would be sent to clients. +func (s closeSubscriptionPayload) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("closeSubscriptionPayload does not implement ToSubscriptionEvent") +} + func (closeSubscriptionPayload) HasReadPermission(acl.Authorizer) bool { return false } diff --git a/agent/consul/stream/event_publisher_test.go b/agent/consul/stream/event_publisher_test.go index d9f7097a6..6d930691d 100644 --- a/agent/consul/stream/event_publisher_test.go +++ b/agent/consul/stream/event_publisher_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/proto/pbsubscribe" ) type intTopic int @@ -84,6 +85,10 @@ func (p simplePayload) HasReadPermission(acl.Authorizer) bool { func (p simplePayload) Subject() Subject { return StringSubject(p.key) } +func (p simplePayload) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("simplePayload does not implement ToSubscriptionEvent") +} + func registerTestSnapshotHandlers(t *testing.T, publisher *EventPublisher) { t.Helper() diff --git a/agent/consul/txn_endpoint_test.go b/agent/consul/txn_endpoint_test.go index 868ea3b81..7f8d09f32 100644 --- a/agent/consul/txn_endpoint_test.go +++ b/agent/consul/txn_endpoint_test.go @@ -234,7 +234,7 @@ func TestTxn_Apply(t *testing.T) { t.Fatalf("bad: %v", d) } - _, n, err := state.GetNode("foo", nil) + _, n, err := state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -242,7 +242,7 @@ func TestTxn_Apply(t *testing.T) { t.Fatalf("bad: %v", err) } - _, s, err := state.NodeService("foo", "svc-foo", nil) + _, s, err := state.NodeService("foo", "svc-foo", nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -250,7 +250,7 @@ func TestTxn_Apply(t *testing.T) { t.Fatalf("bad: %v", err) } - _, c, err := state.NodeCheck("foo", types.CheckID("check-foo"), nil) + _, c, err := state.NodeCheck("foo", types.CheckID("check-foo"), nil, "") if err != nil { t.Fatalf("err: %v", err) } diff --git a/agent/grpc/private/services/subscribe/subscribe.go b/agent/grpc/private/services/subscribe/subscribe.go index c1b2f7e2d..3abaa4b55 100644 --- a/agent/grpc/private/services/subscribe/subscribe.go +++ b/agent/grpc/private/services/subscribe/subscribe.go @@ -2,7 +2,6 @@ package subscribe import ( "errors" - "fmt" "github.com/hashicorp/go-hclog" "google.golang.org/grpc" @@ -13,7 +12,6 @@ import ( "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" ) @@ -61,7 +59,7 @@ func (h *Server) Subscribe(req *pbsubscribe.SubscribeRequest, serverStream pbsub return status.Error(codes.InvalidArgument, "Key is required") } - sub, err := h.Backend.Subscribe(toStreamSubscribeRequest(req, entMeta)) + sub, err := h.Backend.Subscribe(state.PBToStreamSubscribeRequest(req, entMeta)) if err != nil { return err } @@ -84,25 +82,15 @@ func (h *Server) Subscribe(req *pbsubscribe.SubscribeRequest, serverStream pbsub } elog.Trace(event) - e := newEventFromStreamEvent(event) + + // TODO: This conversion could be cached if needed + e := event.Payload.ToSubscriptionEvent(event.Index) if err := serverStream.Send(e); err != nil { return err } } } -func toStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta acl.EnterpriseMeta) *stream.SubscribeRequest { - return &stream.SubscribeRequest{ - Topic: req.Topic, - Subject: state.EventSubjectService{ - Key: req.Key, - EnterpriseMeta: entMeta, - }, - Token: req.Token, - Index: req.Index, - } -} - func forwardToDC( req *pbsubscribe.SubscribeRequest, serverStream pbsubscribe.StateChangeSubscription_SubscribeServer, @@ -129,48 +117,3 @@ func forwardToDC( } } } - -func newEventFromStreamEvent(event stream.Event) *pbsubscribe.Event { - e := &pbsubscribe.Event{Index: event.Index} - switch { - case event.IsEndOfSnapshot(): - e.Payload = &pbsubscribe.Event_EndOfSnapshot{EndOfSnapshot: true} - return e - case event.IsNewSnapshotToFollow(): - e.Payload = &pbsubscribe.Event_NewSnapshotToFollow{NewSnapshotToFollow: true} - return e - } - setPayload(e, event.Payload) - return e -} - -func setPayload(e *pbsubscribe.Event, payload stream.Payload) { - switch p := payload.(type) { - case *stream.PayloadEvents: - e.Payload = &pbsubscribe.Event_EventBatch{ - EventBatch: &pbsubscribe.EventBatch{ - Events: batchEventsFromEventSlice(p.Items), - }, - } - case state.EventPayloadCheckServiceNode: - e.Payload = &pbsubscribe.Event_ServiceHealth{ - ServiceHealth: &pbsubscribe.ServiceHealthUpdate{ - Op: p.Op, - // TODO: this could be cached - CheckServiceNode: pbservice.NewCheckServiceNodeFromStructs(p.Value), - }, - } - default: - panic(fmt.Sprintf("unexpected payload: %T: %#v", p, p)) - } -} - -func batchEventsFromEventSlice(events []stream.Event) []*pbsubscribe.Event { - result := make([]*pbsubscribe.Event, len(events)) - for i := range events { - event := events[i] - result[i] = &pbsubscribe.Event{Index: event.Index} - setPayload(result[i], event.Payload) - } - return result -} diff --git a/agent/grpc/private/services/subscribe/subscribe_test.go b/agent/grpc/private/services/subscribe/subscribe_test.go index c31959057..c9afbe495 100644 --- a/agent/grpc/private/services/subscribe/subscribe_test.go +++ b/agent/grpc/private/services/subscribe/subscribe_test.go @@ -956,7 +956,7 @@ func TestNewEventFromSteamEvent(t *testing.T) { fn := func(t *testing.T, tc testCase) { expected := tc.expected - actual := newEventFromStreamEvent(tc.event) + actual := tc.event.Payload.ToSubscriptionEvent(tc.event.Index) prototest.AssertDeepEqual(t, expected, actual, cmpopts.EquateEmpty()) } diff --git a/agent/grpc/public/services/dataplane/get_envoy_bootstrap_params.go b/agent/grpc/public/services/dataplane/get_envoy_bootstrap_params.go index 872e2e74b..c34289ff0 100644 --- a/agent/grpc/public/services/dataplane/get_envoy_bootstrap_params.go +++ b/agent/grpc/public/services/dataplane/get_envoy_bootstrap_params.go @@ -5,14 +5,15 @@ import ( "errors" "strings" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/structpb" + acl "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/grpc/public" structs "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto-public/pbdataplane" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/structpb" ) func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.GetEnvoyBootstrapParamsRequest) (*pbdataplane.GetEnvoyBootstrapParamsResponse, error) { @@ -31,7 +32,7 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G store := s.GetStore() - _, svc, err := store.ServiceNode(req.GetNodeId(), req.GetNodeName(), req.GetServiceId(), &entMeta) + _, svc, err := store.ServiceNode(req.GetNodeId(), req.GetNodeName(), req.GetServiceId(), &entMeta, structs.DefaultPeerKeyword) if err != nil { logger.Error("Error looking up service", "error", err) if errors.Is(err, state.ErrNodeNotFound) { diff --git a/agent/grpc/public/services/dataplane/server.go b/agent/grpc/public/services/dataplane/server.go index 80b0ccfaf..b45f6f38a 100644 --- a/agent/grpc/public/services/dataplane/server.go +++ b/agent/grpc/public/services/dataplane/server.go @@ -23,7 +23,7 @@ type Config struct { } type StateStore interface { - ServiceNode(string, string, string, *acl.EnterpriseMeta) (uint64, *structs.ServiceNode, error) + ServiceNode(string, string, string, *acl.EnterpriseMeta, string) (uint64, *structs.ServiceNode, error) } //go:generate mockery --name ACLResolver --inpackage diff --git a/agent/health_endpoint.go b/agent/health_endpoint.go index 69e7777f6..7f904089d 100644 --- a/agent/health_endpoint.go +++ b/agent/health_endpoint.go @@ -194,6 +194,8 @@ func (s *HTTPHandlers) healthServiceNodes(resp http.ResponseWriter, req *http.Re return nil, nil } + s.parsePeerName(req, &args) + // Check for tags params := req.URL.Query() if _, ok := params["tag"]; ok { diff --git a/agent/health_endpoint_test.go b/agent/health_endpoint_test.go index baa4c4342..8bf37835c 100644 --- a/agent/health_endpoint_test.go +++ b/agent/health_endpoint_test.go @@ -607,129 +607,163 @@ func TestHealthServiceNodes(t *testing.T) { t.Parallel() a := NewTestAgent(t, "") - defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, "dc1") - req, _ := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1", nil) - resp := httptest.NewRecorder() - obj, err := a.srv.HealthServiceNodes(resp, req) - if err != nil { - t.Fatalf("err: %v", err) + testingPeerNames := []string{"", "my-peer"} + + suffix := func(peerName string) string { + if peerName == "" { + return "" + } + // TODO(peering): after streaming works, remove the "&near=_agent" part + return "&peer=" + peerName + "&near=_agent" } - assertIndex(t, resp) - - // Should be 1 health check for consul - nodes := obj.(structs.CheckServiceNodes) - if len(nodes) != 1 { - t.Fatalf("bad: %v", obj) - } - - req, _ = http.NewRequest("GET", "/v1/health/service/nope?dc=dc1", nil) - resp = httptest.NewRecorder() - obj, err = a.srv.HealthServiceNodes(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - - assertIndex(t, resp) - - // Should be a non-nil empty list - nodes = obj.(structs.CheckServiceNodes) - if nodes == nil || len(nodes) != 0 { - t.Fatalf("bad: %v", obj) - } - - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.1", - Service: &structs.NodeService{ - ID: "test", - Service: "test", - }, - } - - var out struct{} - if err := a.RPC("Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - req, _ = http.NewRequest("GET", "/v1/health/service/test?dc=dc1", nil) - resp = httptest.NewRecorder() - obj, err = a.srv.HealthServiceNodes(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - - assertIndex(t, resp) - - // Should be a non-nil empty list for checks - nodes = obj.(structs.CheckServiceNodes) - if len(nodes) != 1 || nodes[0].Checks == nil || len(nodes[0].Checks) != 0 { - t.Fatalf("bad: %v", obj) - } - - // Test caching - { - // List instances with cache enabled - req, _ := http.NewRequest("GET", "/v1/health/service/test?cached", nil) + for _, peerName := range testingPeerNames { + req, err := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1"+suffix(peerName), nil) + require.NoError(t, err) resp := httptest.NewRecorder() obj, err := a.srv.HealthServiceNodes(resp, req) require.NoError(t, err) - nodes := obj.(structs.CheckServiceNodes) - assert.Len(t, nodes, 1) - // Should be a cache miss - assert.Equal(t, "MISS", resp.Header().Get("X-Cache")) + assertIndex(t, resp) + + nodes := obj.(structs.CheckServiceNodes) + if peerName == "" { + // Should be 1 health check for consul + require.Len(t, nodes, 1) + } else { + require.NotNil(t, nodes) + require.Len(t, nodes, 0) + } + + req, err = http.NewRequest("GET", "/v1/health/service/nope?dc=dc1"+suffix(peerName), nil) + require.NoError(t, err) + resp = httptest.NewRecorder() + obj, err = a.srv.HealthServiceNodes(resp, req) + require.NoError(t, err) + + assertIndex(t, resp) + + // Should be a non-nil empty list + nodes = obj.(structs.CheckServiceNodes) + require.NotNil(t, nodes) + require.Len(t, nodes, 0) } - { - // List instances with cache enabled - req, _ := http.NewRequest("GET", "/v1/health/service/test?cached", nil) + // TODO(peering): will have to seed this data differently in the future + originalRegister := make(map[string]*structs.RegisterRequest) + for _, peerName := range testingPeerNames { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.1", + PeerName: peerName, + Service: &structs.NodeService{ + ID: "test", + Service: "test", + PeerName: peerName, + }, + } + + var out struct{} + require.NoError(t, a.RPC("Catalog.Register", args, &out)) + originalRegister[peerName] = args + } + + verify := func(t *testing.T, peerName string, nodes structs.CheckServiceNodes) { + require.Len(t, nodes, 1) + require.Equal(t, peerName, nodes[0].Node.PeerName) + require.Equal(t, "bar", nodes[0].Node.Node) + require.Equal(t, peerName, nodes[0].Service.PeerName) + require.Equal(t, "test", nodes[0].Service.Service) + require.NotNil(t, nodes[0].Checks) + require.Len(t, nodes[0].Checks, 0) + } + + for _, peerName := range testingPeerNames { + req, err := http.NewRequest("GET", "/v1/health/service/test?dc=dc1"+suffix(peerName), nil) + require.NoError(t, err) resp := httptest.NewRecorder() obj, err := a.srv.HealthServiceNodes(resp, req) require.NoError(t, err) - nodes := obj.(structs.CheckServiceNodes) - assert.Len(t, nodes, 1) - // Should be a cache HIT now! - assert.Equal(t, "HIT", resp.Header().Get("X-Cache")) + assertIndex(t, resp) + + // Should be a non-nil empty list for checks + nodes := obj.(structs.CheckServiceNodes) + verify(t, peerName, nodes) + + // Test caching + { + // List instances with cache enabled + req, err := http.NewRequest("GET", "/v1/health/service/test?cached"+suffix(peerName), nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + obj, err := a.srv.HealthServiceNodes(resp, req) + require.NoError(t, err) + nodes := obj.(structs.CheckServiceNodes) + verify(t, peerName, nodes) + + // Should be a cache miss + require.Equal(t, "MISS", resp.Header().Get("X-Cache")) + } + + { + // List instances with cache enabled + req, err := http.NewRequest("GET", "/v1/health/service/test?cached"+suffix(peerName), nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + obj, err := a.srv.HealthServiceNodes(resp, req) + require.NoError(t, err) + nodes := obj.(structs.CheckServiceNodes) + verify(t, peerName, nodes) + + // Should be a cache HIT now! + require.Equal(t, "HIT", resp.Header().Get("X-Cache")) + } } // Ensure background refresh works { - // Register a new instance of the service - args2 := args - args2.Node = "baz" - args2.Address = "127.0.0.2" - require.NoError(t, a.RPC("Catalog.Register", args, &out)) + // TODO(peering): will have to seed this data differently in the future + for _, peerName := range testingPeerNames { + args := originalRegister[peerName] + // Register a new instance of the service + args2 := *args + args2.Node = "baz" + args2.Address = "127.0.0.2" + var out struct{} + require.NoError(t, a.RPC("Catalog.Register", &args2, &out)) + } - retry.Run(t, func(r *retry.R) { - // List it again - req, _ := http.NewRequest("GET", "/v1/health/service/test?cached", nil) - resp := httptest.NewRecorder() - obj, err := a.srv.HealthServiceNodes(resp, req) - r.Check(err) + for _, peerName := range testingPeerNames { + retry.Run(t, func(r *retry.R) { + // List it again + req, err := http.NewRequest("GET", "/v1/health/service/test?cached"+suffix(peerName), nil) + require.NoError(r, err) + resp := httptest.NewRecorder() + obj, err := a.srv.HealthServiceNodes(resp, req) + require.NoError(r, err) - nodes := obj.(structs.CheckServiceNodes) - if len(nodes) != 2 { - r.Fatalf("Want 2 nodes") - } - header := resp.Header().Get("X-Consul-Index") - if header == "" || header == "0" { - r.Fatalf("Want non-zero header: %q", header) - } - _, err = strconv.ParseUint(header, 10, 64) - r.Check(err) + nodes := obj.(structs.CheckServiceNodes) + require.Len(r, nodes, 2) - // Should be a cache hit! The data should've updated in the cache - // in the background so this should've been fetched directly from - // the cache. - if resp.Header().Get("X-Cache") != "HIT" { - r.Fatalf("should be a cache hit") - } - }) + header := resp.Header().Get("X-Consul-Index") + if header == "" || header == "0" { + r.Fatalf("Want non-zero header: %q", header) + } + _, err = strconv.ParseUint(header, 10, 64) + require.NoError(r, err) + + // Should be a cache hit! The data should've updated in the cache + // in the background so this should've been fetched directly from + // the cache. + if resp.Header().Get("X-Cache") != "HIT" { + r.Fatalf("should be a cache hit") + } + }) + } } } diff --git a/agent/http.go b/agent/http.go index 16a3a2150..6be651c1a 100644 --- a/agent/http.go +++ b/agent/http.go @@ -1105,6 +1105,12 @@ func (s *HTTPHandlers) parseSource(req *http.Request, source *structs.QuerySourc } } +func (s *HTTPHandlers) parsePeerName(req *http.Request, args *structs.ServiceSpecificRequest) { + if peer := req.URL.Query().Get("peer"); peer != "" { + args.PeerName = peer + } +} + // parseMetaFilter is used to parse the ?node-meta=key:value query parameter, used for // filtering results to nodes with the given metadata key/value func (s *HTTPHandlers) parseMetaFilter(req *http.Request) map[string]string { diff --git a/agent/http_register.go b/agent/http_register.go index 47cdfcf1f..cbef7fa6c 100644 --- a/agent/http_register.go +++ b/agent/http_register.go @@ -103,6 +103,10 @@ func init() { registerEndpoint("/v1/operator/autopilot/configuration", []string{"GET", "PUT"}, (*HTTPHandlers).OperatorAutopilotConfiguration) registerEndpoint("/v1/operator/autopilot/health", []string{"GET"}, (*HTTPHandlers).OperatorServerHealth) registerEndpoint("/v1/operator/autopilot/state", []string{"GET"}, (*HTTPHandlers).OperatorAutopilotState) + registerEndpoint("/v1/peering/token", []string{"POST"}, (*HTTPHandlers).PeeringGenerateToken) + registerEndpoint("/v1/peering/initiate", []string{"POST"}, (*HTTPHandlers).PeeringInitiate) + registerEndpoint("/v1/peering/", []string{"GET"}, (*HTTPHandlers).PeeringRead) + registerEndpoint("/v1/peerings", []string{"GET"}, (*HTTPHandlers).PeeringList) registerEndpoint("/v1/query", []string{"GET", "POST"}, (*HTTPHandlers).PreparedQueryGeneral) // specific prepared query endpoints have more complex rules for allowed methods, so // the prefix is registered with no methods. diff --git a/agent/peering_endpoint.go b/agent/peering_endpoint.go new file mode 100644 index 000000000..6138a910c --- /dev/null +++ b/agent/peering_endpoint.go @@ -0,0 +1,118 @@ +package agent + +import ( + "fmt" + "net/http" + + "github.com/hashicorp/consul/lib" + "github.com/hashicorp/consul/proto/pbpeering" +) + +// PeeringRead fetches a peering that matches the request parameters. +func (s *HTTPHandlers) PeeringRead(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + name, err := getPathSuffixUnescaped(req.URL.Path, "/v1/peering/") + if err != nil { + return nil, err + } + if name == "" { + return nil, BadRequestError{Reason: "Must specify a name to fetch."} + } + + entMeta := s.agent.AgentEnterpriseMeta() + if err := s.parseEntMetaPartition(req, entMeta); err != nil { + return nil, err + } + + args := pbpeering.PeeringReadRequest{ + Name: name, + Datacenter: s.agent.config.Datacenter, + Partition: entMeta.PartitionOrEmpty(), // should be "" in OSS + } + + result, err := s.agent.rpcClientPeering.PeeringRead(req.Context(), &args) + if err != nil { + return nil, err + } + if result.Peering == nil { + return nil, NotFoundError{} + } + + // TODO(peering): replace with API types + return result.Peering, nil +} + +// PeeringList fetches all peerings in the datacenter in OSS or in a given partition in Consul Enterprise. +func (s *HTTPHandlers) PeeringList(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + entMeta := s.agent.AgentEnterpriseMeta() + if err := s.parseEntMetaPartition(req, entMeta); err != nil { + return nil, err + } + + args := pbpeering.PeeringListRequest{ + Datacenter: s.agent.config.Datacenter, + Partition: entMeta.PartitionOrEmpty(), // should be "" in OSS + } + + pbresp, err := s.agent.rpcClientPeering.PeeringList(req.Context(), &args) + if err != nil { + return nil, err + } + return pbresp.Peerings, nil +} + +// PeeringGenerateToken handles POSTs to the /v1/peering/token endpoint. The request +// will always be forwarded via RPC to the local leader. +func (s *HTTPHandlers) PeeringGenerateToken(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + args := pbpeering.GenerateTokenRequest{ + Datacenter: s.agent.config.Datacenter, + } + + if req.Body == nil { + return nil, BadRequestError{Reason: "The peering arguments must be provided in the body"} + } + + if err := lib.DecodeJSON(req.Body, &args); err != nil { + return nil, BadRequestError{Reason: fmt.Sprintf("Body decoding failed: %v", err)} + } + + if args.PeerName == "" { + return nil, BadRequestError{Reason: "PeerName is required in the payload when generating a new peering token."} + } + + entMeta := s.agent.AgentEnterpriseMeta() + if err := s.parseEntMetaPartition(req, entMeta); err != nil { + return nil, err + } + + if args.Partition == "" { + args.Partition = entMeta.PartitionOrEmpty() + } + + return s.agent.rpcClientPeering.GenerateToken(req.Context(), &args) +} + +// PeeringInitiate handles POSTs to the /v1/peering/initiate endpoint. The request +// will always be forwarded via RPC to the local leader. +func (s *HTTPHandlers) PeeringInitiate(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + args := pbpeering.InitiateRequest{ + Datacenter: s.agent.config.Datacenter, + } + + if req.Body == nil { + return nil, BadRequestError{Reason: "The peering arguments must be provided in the body"} + } + + if err := lib.DecodeJSON(req.Body, &args); err != nil { + return nil, BadRequestError{Reason: fmt.Sprintf("Body decoding failed: %v", err)} + } + + if args.PeerName == "" { + return nil, BadRequestError{Reason: "PeerName is required in the payload when initiating a peering."} + } + + if args.PeeringToken == "" { + return nil, BadRequestError{Reason: "PeeringToken is required in the payload when initiating a peering."} + } + + return s.agent.rpcClientPeering.Initiate(req.Context(), &args) +} diff --git a/agent/peering_endpoint_oss_test.go b/agent/peering_endpoint_oss_test.go new file mode 100644 index 000000000..5a6fa1f28 --- /dev/null +++ b/agent/peering_endpoint_oss_test.go @@ -0,0 +1,45 @@ +//go:build !consulent +// +build !consulent + +package agent + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/testrpc" +) + +func TestHTTP_Peering_GenerateToken_OSS_Failure(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + a := NewTestAgent(t, "") + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + t.Run("Doesn't allow partitions in OSS HTTP requests", func(t *testing.T) { + reqBody := &pbpeering.GenerateTokenRequest{ + PeerName: "peering-a", + } + reqBodyBytes, err := json.Marshal(reqBody) + require.NoError(t, err) + req, err := http.NewRequest("POST", "/v1/peering/token?partition=foo", + bytes.NewReader(reqBodyBytes)) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "Partitions are a Consul Enterprise feature") + }) +} diff --git a/agent/peering_endpoint_test.go b/agent/peering_endpoint_test.go new file mode 100644 index 000000000..0e1840cf0 --- /dev/null +++ b/agent/peering_endpoint_test.go @@ -0,0 +1,312 @@ +package agent + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/testrpc" +) + +var validCA = ` +-----BEGIN CERTIFICATE----- +MIICmDCCAj6gAwIBAgIBBzAKBggqhkjOPQQDAjAWMRQwEgYDVQQDEwtDb25zdWwg +Q0EgNzAeFw0xODA1MjExNjMzMjhaFw0yODA1MTgxNjMzMjhaMBYxFDASBgNVBAMT +C0NvbnN1bCBDQSA3MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAER0qlxjnRcMEr +iSGlH7G7dYU7lzBEmLUSMZkyBbClmyV8+e8WANemjn+PLnCr40If9cmpr7RnC9Qk +GTaLnLiF16OCAXswggF3MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/ +MGgGA1UdDgRhBF8xZjo5MTpjYTo0MTo4ZjphYzo2NzpiZjo1OTpjMjpmYTo0ZTo3 +NTo1YzpkODpmMDo1NTpkZTpiZTo3NTpiODozMzozMTpkNToyNDpiMDowNDpiMzpl +ODo5Nzo1Yjo3ZTBqBgNVHSMEYzBhgF8xZjo5MTpjYTo0MTo4ZjphYzo2NzpiZjo1 +OTpjMjpmYTo0ZTo3NTo1YzpkODpmMDo1NTpkZTpiZTo3NTpiODozMzozMTpkNToy +NDpiMDowNDpiMzplODo5Nzo1Yjo3ZTA/BgNVHREEODA2hjRzcGlmZmU6Ly8xMjRk +ZjVhMC05ODIwLTc2YzMtOWFhOS02ZjYyMTY0YmExYzIuY29uc3VsMD0GA1UdHgEB +/wQzMDGgLzAtgisxMjRkZjVhMC05ODIwLTc2YzMtOWFhOS02ZjYyMTY0YmExYzIu +Y29uc3VsMAoGCCqGSM49BAMCA0gAMEUCIQDzkkI7R+0U12a+zq2EQhP/n2mHmta+ +fs2hBxWIELGwTAIgLdO7RRw+z9nnxCIA6kNl//mIQb+PGItespiHZKAz74Q= +-----END CERTIFICATE----- +` + +func TestHTTP_Peering_GenerateToken(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + t.Run("No Body", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/token", nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "The peering arguments must be provided in the body") + }) + + t.Run("Body Invalid", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/token", bytes.NewReader([]byte("abc"))) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "Body decoding failed:") + }) + + t.Run("No Name", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/token", + bytes.NewReader([]byte(`{}`))) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "PeerName is required") + }) + + // TODO(peering): add more failure cases + + t.Run("Success", func(t *testing.T) { + body := &pbpeering.GenerateTokenRequest{ + PeerName: "peering-a", + } + + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + + req, err := http.NewRequest("POST", "/v1/peering/token", bytes.NewReader(bodyBytes)) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code, "expected 200, got %d: %v", resp.Code, resp.Body.String()) + + var r pbpeering.GenerateTokenResponse + require.NoError(t, json.NewDecoder(resp.Body).Decode(&r)) + + tokenJSON, err := base64.StdEncoding.DecodeString(r.PeeringToken) + require.NoError(t, err) + + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + require.Nil(t, token.CA) + require.Equal(t, []string{fmt.Sprintf("127.0.0.1:%d", a.config.ServerPort)}, token.ServerAddresses) + require.Equal(t, "server.dc1.consul", token.ServerName) + + // The PeerID in the token is randomly generated so we don't assert on its value. + require.NotEmpty(t, token.PeerID) + }) +} + +func TestHTTP_Peering_Initiate(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + t.Run("No Body", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/initiate", nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "The peering arguments must be provided in the body") + }) + + t.Run("Body Invalid", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/initiate", bytes.NewReader([]byte("abc"))) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "Body decoding failed:") + }) + + t.Run("No Name", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/initiate", + bytes.NewReader([]byte(`{}`))) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "PeerName is required") + }) + + t.Run("No Token", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/initiate", + bytes.NewReader([]byte(`{"PeerName": "peer1-usw1"}`))) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "PeeringToken is required") + }) + + // TODO(peering): add more failure cases + + t.Run("Success", func(t *testing.T) { + token := structs.PeeringToken{ + CA: []string{validCA}, + ServerName: "server.dc1.consul", + ServerAddresses: []string{fmt.Sprintf("1.2.3.4:%d", 443)}, + PeerID: "a0affd3e-f1c8-4bb9-9168-90fd902c441d", + } + tokenJSON, _ := json.Marshal(&token) + tokenB64 := base64.StdEncoding.EncodeToString(tokenJSON) + body := &pbpeering.InitiateRequest{ + PeerName: "peering-a", + PeeringToken: tokenB64, + } + + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + + req, err := http.NewRequest("POST", "/v1/peering/initiate", bytes.NewReader(bodyBytes)) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code, "expected 200, got %d: %v", resp.Code, resp.Body.String()) + + // success response does not currently return a value so {} is correct + require.Equal(t, "{}", resp.Body.String()) + }) +} + +func TestHTTP_Peering_Read(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Insert peerings directly to state store. + // Note that the state store holds reference to the underlying + // variables; do not modify them after writing. + foo := &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_INITIAL, + PeerCAPems: nil, + PeerServerName: "fooservername", + PeerServerAddresses: []string{"addr1"}, + }, + } + _, err := a.rpcClientPeering.PeeringWrite(ctx, foo) + require.NoError(t, err) + bar := &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: "bar", + State: pbpeering.PeeringState_ACTIVE, + PeerCAPems: nil, + PeerServerName: "barservername", + PeerServerAddresses: []string{"addr1"}, + }, + } + _, err = a.rpcClientPeering.PeeringWrite(ctx, bar) + require.NoError(t, err) + + t.Run("return foo", func(t *testing.T) { + req, err := http.NewRequest("GET", "/v1/peering/foo", nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + // TODO(peering): replace with API types + var pbresp pbpeering.Peering + require.NoError(t, json.NewDecoder(resp.Body).Decode(&pbresp)) + + require.Equal(t, foo.Peering.Name, pbresp.Name) + }) + + t.Run("not found", func(t *testing.T) { + req, err := http.NewRequest("GET", "/v1/peering/baz", nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusNotFound, resp.Code) + }) +} + +func TestHTTP_Peering_List(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Insert peerings directly to state store. + // Note that the state store holds reference to the underlying + // variables; do not modify them after writing. + foo := &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_INITIAL, + PeerCAPems: nil, + PeerServerName: "fooservername", + PeerServerAddresses: []string{"addr1"}, + }, + } + _, err := a.rpcClientPeering.PeeringWrite(ctx, foo) + require.NoError(t, err) + bar := &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: "bar", + State: pbpeering.PeeringState_ACTIVE, + PeerCAPems: nil, + PeerServerName: "barservername", + PeerServerAddresses: []string{"addr1"}, + }, + } + _, err = a.rpcClientPeering.PeeringWrite(ctx, bar) + require.NoError(t, err) + + t.Run("return all", func(t *testing.T) { + req, err := http.NewRequest("GET", "/v1/peerings", nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + // TODO(peering): replace with API types + var pbresp []*pbpeering.Peering + require.NoError(t, json.NewDecoder(resp.Body).Decode(&pbresp)) + + require.Len(t, pbresp, 2) + }) +} diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go new file mode 100644 index 000000000..288669a1f --- /dev/null +++ b/agent/rpc/peering/service.go @@ -0,0 +1,741 @@ +package peering + +import ( + "context" + "errors" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/armon/go-metrics" + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-memdb" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/dns" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/proto/pbservice" + "github.com/hashicorp/consul/proto/pbstatus" +) + +var ( + errPeeringTokenEmptyCA = errors.New("peering token CA value is empty") + errPeeringTokenInvalidCA = errors.New("peering token CA value is invalid") + errPeeringTokenEmptyServerAddresses = errors.New("peering token server addresses value is empty") + errPeeringTokenEmptyServerName = errors.New("peering token server name value is empty") + errPeeringTokenEmptyPeerID = errors.New("peering token peer ID value is empty") +) + +// errPeeringInvalidServerAddress is returned when an initiate request contains +// an invalid server address. +type errPeeringInvalidServerAddress struct { + addr string +} + +// Error implements the error interface +func (e *errPeeringInvalidServerAddress) Error() string { + return fmt.Sprintf("%s is not a valid peering server address", e.addr) +} + +// Service implements pbpeering.PeeringService to provide RPC operations for +// managing peering relationships. +type Service struct { + Backend Backend + logger hclog.Logger + streams *streamTracker +} + +func NewService(logger hclog.Logger, backend Backend) *Service { + return &Service{ + Backend: backend, + logger: logger, + streams: newStreamTracker(), + } +} + +var _ pbpeering.PeeringServiceServer = (*Service)(nil) + +// Backend defines the core integrations the Peering endpoint depends on. A +// functional implementation will integrate with various subcomponents of Consul +// such as the State store for reading and writing data, the CA machinery for +// providing access to CA data and the RPC system for forwarding requests to +// other servers. +type Backend interface { + // Forward should forward the request to the leader when necessary. + Forward(info structs.RPCInfo, f func(*grpc.ClientConn) error) (handled bool, err error) + + // GetAgentCACertificates returns the CA certificate to be returned in the peering token data + GetAgentCACertificates() ([]string, error) + + // GetServerAddresses returns the addresses used for establishing a peering connection + GetServerAddresses() ([]string, error) + + // GetServerName returns the SNI to be returned in the peering token data which + // will be used by peers when establishing peering connections over TLS. + GetServerName() string + + // EncodeToken packages a peering token into a slice of bytes. + EncodeToken(tok *structs.PeeringToken) ([]byte, error) + + // DecodeToken unpackages a peering token from a slice of bytes. + DecodeToken([]byte) (*structs.PeeringToken, error) + + EnterpriseCheckPartitions(partition string) error + + Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) + + Store() Store + Apply() Apply +} + +// Store provides a read-only interface for querying Peering data. +type Store interface { + PeeringRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.Peering, error) + PeeringList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) + ExportedServicesForPeer(ws memdb.WatchSet, peerID string) (uint64, []structs.ServiceName, error) + AbandonCh() <-chan struct{} +} + +// Apply provides a write-only interface for persisting Peering data. +type Apply interface { + PeeringWrite(req *pbpeering.PeeringWriteRequest) error + PeeringDelete(req *pbpeering.PeeringDeleteRequest) error + PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error +} + +// GenerateToken implements the PeeringService RPC method to generate a +// peering token which is the initial step in establishing a peering relationship +// with other Consul clusters. +func (s *Service) GenerateToken( + ctx context.Context, + req *pbpeering.GenerateTokenRequest, +) (*pbpeering.GenerateTokenResponse, error) { + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { + return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) + } + // validate prior to forwarding to the leader, this saves a network hop + if err := dns.ValidateLabel(req.PeerName); err != nil { + return nil, fmt.Errorf("%s is not a valid peer name: %w", req.PeerName, err) + } + + // TODO(peering): add metrics + // TODO(peering): add tracing + + resp := &pbpeering.GenerateTokenResponse{} + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).GenerateToken(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + ca, err := s.Backend.GetAgentCACertificates() + if err != nil { + return nil, err + } + + serverAddrs, err := s.Backend.GetServerAddresses() + if err != nil { + return nil, err + } + + writeReq := pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: req.PeerName, + + // TODO(peering): Normalize from ACL token once this endpoint is guarded by ACLs. + Partition: req.PartitionOrDefault(), + }, + } + if err := s.Backend.Apply().PeeringWrite(&writeReq); err != nil { + return nil, fmt.Errorf("failed to write peering: %w", err) + } + + q := state.Query{ + Value: strings.ToLower(req.PeerName), + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + } + _, peering, err := s.Backend.Store().PeeringRead(nil, q) + if err != nil { + return nil, err + } + if peering == nil { + return nil, fmt.Errorf("peering was deleted while token generation request was in flight") + } + + tok := structs.PeeringToken{ + // Store the UUID so that we can do a global search when handling inbound streams. + PeerID: peering.ID, + CA: ca, + ServerAddresses: serverAddrs, + ServerName: s.Backend.GetServerName(), + } + + encoded, err := s.Backend.EncodeToken(&tok) + if err != nil { + return nil, err + } + resp.PeeringToken = string(encoded) + return resp, err +} + +// Initiate implements the PeeringService RPC method to finalize peering +// registration. Given a valid token output from a peer's GenerateToken endpoint, +// a peering is registered. +func (s *Service) Initiate( + ctx context.Context, + req *pbpeering.InitiateRequest, +) (*pbpeering.InitiateResponse, error) { + // validate prior to forwarding to the leader, this saves a network hop + if err := dns.ValidateLabel(req.PeerName); err != nil { + return nil, fmt.Errorf("%s is not a valid peer name: %w", req.PeerName, err) + } + tok, err := s.Backend.DecodeToken([]byte(req.PeeringToken)) + if err != nil { + return nil, err + } + if err := validatePeeringToken(tok); err != nil { + return nil, err + } + + resp := &pbpeering.InitiateResponse{} + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).Initiate(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + defer metrics.MeasureSince([]string{"peering", "initiate"}, time.Now()) + + // convert ServiceAddress values to strings + serverAddrs := make([]string, len(tok.ServerAddresses)) + for i, addr := range tok.ServerAddresses { + serverAddrs[i] = addr + } + + // as soon as a peering is written with a list of ServerAddresses that is + // non-empty, the leader routine will see the peering and attempt to establish + // a connection with the remote peer. + writeReq := &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: req.PeerName, + PeerCAPems: tok.CA, + PeerServerAddresses: serverAddrs, + PeerServerName: tok.ServerName, + // uncomment once #1613 lands + // PeerID: tok.PeerID, + }, + } + if err = s.Backend.Apply().PeeringWrite(writeReq); err != nil { + return nil, fmt.Errorf("failed to write peering: %w", err) + } + // resp.Status == 0 + return resp, nil +} + +func (s *Service) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequest) (*pbpeering.PeeringReadResponse, error) { + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { + return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) + } + + var resp *pbpeering.PeeringReadResponse + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringRead(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + defer metrics.MeasureSince([]string{"peering", "read"}, time.Now()) + // TODO(peering): ACL check request token + + // TODO(peering): handle blocking queries + q := state.Query{ + Value: strings.ToLower(req.Name), + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition)} + _, peering, err := s.Backend.Store().PeeringRead(nil, q) + if err != nil { + return nil, err + } + return &pbpeering.PeeringReadResponse{Peering: peering}, nil +} + +func (s *Service) PeeringList(ctx context.Context, req *pbpeering.PeeringListRequest) (*pbpeering.PeeringListResponse, error) { + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { + return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) + } + + var resp *pbpeering.PeeringListResponse + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringList(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + defer metrics.MeasureSince([]string{"peering", "list"}, time.Now()) + // TODO(peering): ACL check request token + + // TODO(peering): handle blocking queries + _, peerings, err := s.Backend.Store().PeeringList(nil, *structs.NodeEnterpriseMetaInPartition(req.Partition)) + if err != nil { + return nil, err + } + return &pbpeering.PeeringListResponse{Peerings: peerings}, nil +} + +// TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. +// Consider removing if we can find another way to populate state store in peering_endpoint_test.go +func (s *Service) PeeringWrite(ctx context.Context, req *pbpeering.PeeringWriteRequest) (*pbpeering.PeeringWriteResponse, error) { + if err := s.Backend.EnterpriseCheckPartitions(req.Peering.Partition); err != nil { + return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) + } + + var resp *pbpeering.PeeringWriteResponse + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringWrite(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + defer metrics.MeasureSince([]string{"peering", "write"}, time.Now()) + // TODO(peering): ACL check request token + + // TODO(peering): handle blocking queries + err = s.Backend.Apply().PeeringWrite(req) + if err != nil { + return nil, err + } + return &pbpeering.PeeringWriteResponse{}, nil +} + +func (s *Service) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDeleteRequest) (*pbpeering.PeeringDeleteResponse, error) { + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { + return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) + } + + var resp *pbpeering.PeeringDeleteResponse + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringDelete(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + defer metrics.MeasureSince([]string{"peering", "delete"}, time.Now()) + // TODO(peering): ACL check request token + + // TODO(peering): handle blocking queries + err = s.Backend.Apply().PeeringDelete(req) + if err != nil { + return nil, err + } + return &pbpeering.PeeringDeleteResponse{}, nil +} + +type BidirectionalStream interface { + Send(*pbpeering.ReplicationMessage) error + Recv() (*pbpeering.ReplicationMessage, error) + Context() context.Context +} + +// StreamResources handles incoming streaming connections. +func (s *Service) StreamResources(stream pbpeering.PeeringService_StreamResourcesServer) error { + // Initial message on a new stream must be a new subscription request. + first, err := stream.Recv() + if err != nil { + s.logger.Error("failed to establish stream", "error", err) + return err + } + + // TODO(peering) Make request contain a list of resources, so that roots and services can be + // subscribed to with a single request. See: + // https://github.com/envoyproxy/data-plane-api/blob/main/envoy/service/discovery/v3/discovery.proto#L46 + req := first.GetRequest() + if req == nil { + return grpcstatus.Error(codes.InvalidArgument, "first message when initiating a peering must be a subscription request") + } + s.logger.Trace("received initial replication request from peer") + logTraceRecv(s.logger, req) + + if req.PeerID == "" { + return grpcstatus.Error(codes.InvalidArgument, "initial subscription request must specify a PeerID") + } + if req.Nonce != "" { + return grpcstatus.Error(codes.InvalidArgument, "initial subscription request must not contain a nonce") + } + if req.ResourceURL != pbpeering.TypeURLService { + return grpcstatus.Error(codes.InvalidArgument, fmt.Sprintf("subscription request to unknown resource URL: %s", req.ResourceURL)) + } + + // TODO(peering): Validate that a peering exists for this peer + // TODO(peering): If the peering is marked as deleted, send a Terminated message and return + // TODO(peering): Store subscription request so that an event publisher can separately handle pushing messages for it + s.logger.Info("accepted initial replication request from peer", "peer_id", req.PeerID) + + // For server peers both of these ID values are the same, because we generated a token with a local ID, + // and the client peer dials using that same ID. + return s.HandleStream(req.PeerID, req.PeerID, stream) +} + +// The localID provided is the locally-generated identifier for the peering. +// The remoteID is an identifier that the remote peer recognizes for the peering. +func (s *Service) HandleStream(localID, remoteID string, stream BidirectionalStream) error { + logger := s.logger.Named("stream").With("peer_id", localID) + logger.Trace("handling stream for peer") + + status, err := s.streams.connected(localID) + if err != nil { + return fmt.Errorf("failed to register stream: %v", err) + } + + // TODO(peering) Also need to clear subscriptions associated with the peer + defer s.streams.disconnected(localID) + + mgr := newSubscriptionManager(stream.Context(), logger, s.Backend) + subCh := mgr.subscribe(stream.Context(), localID) + + sub := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + PeerID: remoteID, + }, + }, + } + logTraceSend(logger, sub) + + if err := stream.Send(sub); err != nil { + if err == io.EOF { + logger.Info("stream ended by peer") + status.trackReceiveError(err.Error()) + return nil + } + // TODO(peering) Test error handling in calls to Send/Recv + status.trackSendError(err.Error()) + return fmt.Errorf("failed to send to stream: %v", err) + } + + // TODO(peering): Should this be buffered? + recvChan := make(chan *pbpeering.ReplicationMessage) + go func() { + defer close(recvChan) + for { + msg, err := stream.Recv() + if err == io.EOF { + logger.Info("stream ended by peer") + status.trackReceiveError(err.Error()) + return + } + if e, ok := grpcstatus.FromError(err); ok { + // Cancelling the stream is not an error, that means we or our peer intended to terminate the peering. + if e.Code() == codes.Canceled { + return + } + } + if err != nil { + logger.Error("failed to receive from stream", "error", err) + status.trackReceiveError(err.Error()) + return + } + + logTraceRecv(logger, msg) + recvChan <- msg + } + }() + + for { + select { + // When the doneCh is closed that means that the peering was deleted locally. + case <-status.doneCh: + logger.Info("ending stream") + + term := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Terminated_{ + Terminated: &pbpeering.ReplicationMessage_Terminated{}, + }, + } + logTraceSend(logger, term) + + if err := stream.Send(term); err != nil { + status.trackSendError(err.Error()) + return fmt.Errorf("failed to send to stream: %v", err) + } + + logger.Trace("deleting stream status") + s.streams.deleteStatus(localID) + + return nil + + case msg, open := <-recvChan: + if !open { + // No longer receiving data on the stream. + return nil + } + + if req := msg.GetRequest(); req != nil { + switch { + case req.Nonce == "": + // TODO(peering): This can happen on a client peer since they don't try to receive subscriptions before entering HandleStream. + // Should change that behavior or only allow it that one time. + + case req.Error != nil && (req.Error.Code != int32(code.Code_OK) || req.Error.Message != ""): + logger.Warn("client peer was unable to apply resource", "code", req.Error.Code, "error", req.Error.Message) + status.trackNack(fmt.Sprintf("client peer was unable to apply resource: %s", req.Error.Message)) + + default: + status.trackAck() + } + + continue + } + + if resp := msg.GetResponse(); resp != nil { + req, err := processResponse(resp) + if err != nil { + logger.Error("failed to persist resource", "resourceURL", resp.ResourceURL, "resourceID", resp.ResourceID) + status.trackReceiveError(err.Error()) + } else { + status.trackReceiveSuccess() + } + + logTraceSend(logger, req) + if err := stream.Send(req); err != nil { + status.trackSendError(err.Error()) + return fmt.Errorf("failed to send to stream: %v", err) + } + + continue + } + + if term := msg.GetTerminated(); term != nil { + logger.Info("received peering termination message, cleaning up imported resources") + + // Once marked as terminated, a separate deferred deletion routine will clean up imported resources. + if err := s.Backend.Apply().PeeringTerminateByID(&pbpeering.PeeringTerminateByIDRequest{ID: localID}); err != nil { + return err + } + return nil + } + + case update := <-subCh: + switch { + case strings.HasPrefix(update.CorrelationID, subExportedService): + if err := pushServiceResponse(logger, stream, status, update); err != nil { + return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err) + } + + default: + logger.Warn("unrecognized update type from subscription manager: " + update.CorrelationID) + continue + } + } + } +} + +// pushService response handles sending exported service instance updates to the peer cluster. +// Each cache.UpdateEvent will contain all instances for a service name. +// If there are no instances in the event, we consider that to be a de-registration. +func pushServiceResponse(logger hclog.Logger, stream BidirectionalStream, status *lockableStreamStatus, update cache.UpdateEvent) error { + csn, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + if !ok { + logger.Error(fmt.Sprintf("invalid type for response: %T, expected *pbservice.IndexedCheckServiceNodes", update.Result)) + + // Skip this update to avoid locking up peering due to a bad service update. + return nil + } + serviceName := strings.TrimPrefix(update.CorrelationID, subExportedService) + + // If no nodes are present then it's due to one of: + // 1. The service is newly registered or exported and yielded a transient empty update. + // 2. All instances of the service were de-registered. + // 3. The service was un-exported. + // + // We don't distinguish when these three things occurred, but it's safe to send a DELETE Op in all cases, so we do that. + // Case #1 is a no-op for the importing peer. + if len(csn.Nodes) == 0 { + resp := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Response_{ + Response: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + // TODO(peering): Nonce management + Nonce: "", + ResourceID: serviceName, + Operation: pbpeering.ReplicationMessage_Response_DELETE, + }, + }, + } + logTraceSend(logger, resp) + if err := stream.Send(resp); err != nil { + status.trackSendError(err.Error()) + return fmt.Errorf("failed to send to stream: %v", err) + } + return nil + } + + // If there are nodes in the response, we push them as an UPSERT operation. + any, err := ptypes.MarshalAny(csn) + if err != nil { + // Log the error and skip this response to avoid locking up peering due to a bad update event. + logger.Error("failed to marshal service endpoints", "error", err) + return nil + } + resp := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Response_{ + Response: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + // TODO(peering): Nonce management + Nonce: "", + ResourceID: serviceName, + Operation: pbpeering.ReplicationMessage_Response_UPSERT, + Resource: any, + }, + }, + } + logTraceSend(logger, resp) + if err := stream.Send(resp); err != nil { + status.trackSendError(err.Error()) + return fmt.Errorf("failed to send to stream: %v", err) + } + return nil +} + +func (s *Service) StreamStatus(peer string) (resp StreamStatus, found bool) { + return s.streams.streamStatus(peer) +} + +// ConnectedStreams returns a map of connected stream IDs to the corresponding channel for tearing them down. +func (s *Service) ConnectedStreams() map[string]chan struct{} { + return s.streams.connectedStreams() +} + +func makeReply(resourceURL, nonce string, errCode code.Code, errMsg string) *pbpeering.ReplicationMessage { + var rpcErr *pbstatus.Status + if errCode != code.Code_OK || errMsg != "" { + rpcErr = &pbstatus.Status{ + Code: int32(errCode), + Message: errMsg, + } + } + + msg := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: resourceURL, + Nonce: nonce, + Error: rpcErr, + }, + }, + } + return msg +} + +func processResponse(resp *pbpeering.ReplicationMessage_Response) (*pbpeering.ReplicationMessage, error) { + var ( + err error + errCode code.Code + errMsg string + ) + + if resp.ResourceURL != pbpeering.TypeURLService { + errCode = code.Code_INVALID_ARGUMENT + err = fmt.Errorf("received response for unknown resource type %q", resp.ResourceURL) + return makeReply(resp.ResourceURL, resp.Nonce, errCode, err.Error()), err + } + + switch resp.Operation { + case pbpeering.ReplicationMessage_Response_UPSERT: + err = handleUpsert(resp.ResourceURL, resp.Resource) + if err != nil { + errCode = code.Code_INTERNAL + errMsg = err.Error() + } + + case pbpeering.ReplicationMessage_Response_DELETE: + err = handleDelete(resp.ResourceURL, resp.ResourceID) + if err != nil { + errCode = code.Code_INTERNAL + errMsg = err.Error() + } + + default: + errCode = code.Code_INVALID_ARGUMENT + + op := pbpeering.ReplicationMessage_Response_Operation_name[int32(resp.Operation)] + if op == "" { + op = strconv.FormatInt(int64(resp.Operation), 10) + } + errMsg = fmt.Sprintf("unsupported operation: %q", op) + + err = errors.New(errMsg) + } + + return makeReply(resp.ResourceURL, resp.Nonce, errCode, errMsg), err +} + +func handleUpsert(resourceURL string, resource *anypb.Any) error { + // TODO(peering): implement + return nil +} + +func handleDelete(resourceURL string, resourceID string) error { + // TODO(peering): implement + return nil +} + +func logTraceRecv(logger hclog.Logger, pb proto.Message) { + logTraceProto(logger, pb, true) +} + +func logTraceSend(logger hclog.Logger, pb proto.Message) { + logTraceProto(logger, pb, false) +} + +func logTraceProto(logger hclog.Logger, pb proto.Message, received bool) { + if !logger.IsTrace() { + return + } + + dir := "sent" + if received { + dir = "received" + } + + m := jsonpb.Marshaler{ + Indent: " ", + } + out, err := m.MarshalToString(pb) + if err != nil { + out = "" + } + + logger.Trace("replication message", "direction", dir, "protobuf", out) +} diff --git a/agent/rpc/peering/service_oss_test.go b/agent/rpc/peering/service_oss_test.go new file mode 100644 index 000000000..8c7633639 --- /dev/null +++ b/agent/rpc/peering/service_oss_test.go @@ -0,0 +1,39 @@ +//go:build !consulent +// +build !consulent + +package peering_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/proto/pbpeering" +) + +func TestPeeringService_RejectsPartition(t *testing.T) { + s := newTestServer(t, nil) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + t.Run("read", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + req := &pbpeering.PeeringReadRequest{Name: "foo", Partition: "default"} + resp, err := client.PeeringRead(ctx, req) + require.Contains(t, err.Error(), "Partitions are a Consul Enterprise feature") + require.Nil(t, resp) + }) + + t.Run("list", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + req := &pbpeering.PeeringListRequest{Partition: "default"} + resp, err := client.PeeringList(ctx, req) + require.Contains(t, err.Error(), "Partitions are a Consul Enterprise feature") + require.Nil(t, resp) + }) +} diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go new file mode 100644 index 000000000..a90df37f1 --- /dev/null +++ b/agent/rpc/peering/service_test.go @@ -0,0 +1,414 @@ +package peering_test + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "path" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + gogrpc "google.golang.org/grpc" + + grpc "github.com/hashicorp/consul/agent/grpc/private" + "github.com/hashicorp/consul/agent/grpc/private/resolver" + "github.com/hashicorp/consul/proto/prototest" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul" + "github.com/hashicorp/consul/agent/pool" + "github.com/hashicorp/consul/agent/router" + "github.com/hashicorp/consul/agent/rpc/middleware" + "github.com/hashicorp/consul/agent/rpc/peering" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/token" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/testrpc" + "github.com/hashicorp/consul/tlsutil" + "github.com/hashicorp/consul/types" +) + +func TestPeeringService_GenerateToken(t *testing.T) { + dir := testutil.TempDir(t, "consul") + signer, _, _ := tlsutil.GeneratePrivateKey() + ca, _, _ := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) + cafile := path.Join(dir, "cacert.pem") + require.NoError(t, ioutil.WriteFile(cafile, []byte(ca), 0600)) + + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(c *consul.Config) { + c.SerfLANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.1" + c.TLSConfig.InternalRPC.CAFile = cafile + c.DataDir = dir + }) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + req := pbpeering.GenerateTokenRequest{PeerName: "peerB", Datacenter: "dc1"} + resp, err := client.GenerateToken(ctx, &req) + require.NoError(t, err) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + token := &structs.PeeringToken{} + require.NoError(t, json.Unmarshal(tokenJSON, token)) + require.Equal(t, "server.dc1.consul", token.ServerName) + require.Len(t, token.ServerAddresses, 1) + require.Equal(t, "127.0.0.1:2345", token.ServerAddresses[0]) + require.Equal(t, []string{ca}, token.CA) + + require.NotEmpty(t, token.PeerID) + _, err = uuid.ParseUUID(token.PeerID) + require.NoError(t, err) + + _, peers, err := s.Server.FSM().State().PeeringList(nil, *structs.DefaultEnterpriseMetaInDefaultPartition()) + require.NoError(t, err) + require.Len(t, peers, 1) + + peers[0].ModifyIndex = 0 + peers[0].CreateIndex = 0 + + expect := &pbpeering.Peering{ + Name: "peerB", + Partition: acl.DefaultPartitionName, + ID: token.PeerID, + State: pbpeering.PeeringState_INITIAL, + } + require.Equal(t, expect, peers[0]) +} + +func TestPeeringService_Initiate(t *testing.T) { + validToken := peering.TestPeeringToken("83474a06-cca4-4ff4-99a4-4152929c8160") + validTokenJSON, _ := json.Marshal(&validToken) + validTokenB64 := base64.StdEncoding.EncodeToString(validTokenJSON) + + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, nil) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + req *pbpeering.InitiateRequest + expectResp *pbpeering.InitiateResponse + expectPeering *pbpeering.Peering + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.Initiate(ctx, tc.req) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + prototest.AssertDeepEqual(t, tc.expectResp, resp) + + // if a peering was expected to be written, try to read it back + if tc.expectPeering != nil { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: tc.expectPeering.Name}) + require.NoError(t, err) + // check individual values we care about since we don't know exactly + // what the create/modify indexes will be + require.Equal(t, tc.expectPeering.Name, resp.Peering.Name) + require.Equal(t, tc.expectPeering.Partition, resp.Peering.Partition) + require.Equal(t, tc.expectPeering.State, resp.Peering.State) + require.Equal(t, tc.expectPeering.PeerCAPems, resp.Peering.PeerCAPems) + require.Equal(t, tc.expectPeering.PeerServerAddresses, resp.Peering.PeerServerAddresses) + require.Equal(t, tc.expectPeering.PeerServerName, resp.Peering.PeerServerName) + } + } + tcs := []testcase{ + { + name: "invalid peer name", + req: &pbpeering.InitiateRequest{PeerName: "--AA--"}, + expectErr: "--AA-- is not a valid peer name", + }, + { + name: "invalid token (base64)", + req: &pbpeering.InitiateRequest{ + PeerName: "peer1-usw1", + PeeringToken: "+++/+++", + }, + expectErr: "illegal base64 data", + }, + { + name: "invalid token (JSON)", + req: &pbpeering.InitiateRequest{ + PeerName: "peer1-usw1", + PeeringToken: "Cg==", // base64 of "-" + }, + expectErr: "unexpected end of JSON input", + }, + { + name: "invalid token (empty)", + req: &pbpeering.InitiateRequest{ + PeerName: "peer1-usw1", + PeeringToken: "e30K", // base64 of "{}" + }, + expectErr: "peering token CA value is empty", + }, + { + name: "success", + req: &pbpeering.InitiateRequest{ + PeerName: "peer1-usw1", + PeeringToken: validTokenB64, + }, + expectResp: &pbpeering.InitiateResponse{}, + expectPeering: peering.TestPeering( + "peer1-usw1", + pbpeering.PeeringState_INITIAL, + ), + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} +func TestPeeringService_Read(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, nil) + + // insert peering directly to state store + p := &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_INITIAL, + PeerCAPems: nil, + PeerServerName: "test", + PeerServerAddresses: []string{"addr1"}, + } + err := s.Server.FSM().State().PeeringWrite(10, p) + require.NoError(t, err) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + req *pbpeering.PeeringReadRequest + expect *pbpeering.PeeringReadResponse + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.PeeringRead(ctx, tc.req) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + prototest.AssertDeepEqual(t, tc.expect, resp) + } + tcs := []testcase{ + { + name: "returns foo", + req: &pbpeering.PeeringReadRequest{Name: "foo"}, + expect: &pbpeering.PeeringReadResponse{Peering: p}, + expectErr: "", + }, + { + name: "bar not found", + req: &pbpeering.PeeringReadRequest{Name: "bar"}, + expect: &pbpeering.PeeringReadResponse{}, + expectErr: "", + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestPeeringService_List(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, nil) + + // Insert peerings directly to state store. + // Note that the state store holds reference to the underlying + // variables; do not modify them after writing. + foo := &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_INITIAL, + PeerCAPems: nil, + PeerServerName: "fooservername", + PeerServerAddresses: []string{"addr1"}, + } + require.NoError(t, s.Server.FSM().State().PeeringWrite(10, foo)) + bar := &pbpeering.Peering{ + Name: "bar", + State: pbpeering.PeeringState_ACTIVE, + PeerCAPems: nil, + PeerServerName: "barservername", + PeerServerAddresses: []string{"addr1"}, + } + require.NoError(t, s.Server.FSM().State().PeeringWrite(15, bar)) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.PeeringList(ctx, &pbpeering.PeeringListRequest{}) + require.NoError(t, err) + + expect := &pbpeering.PeeringListResponse{ + Peerings: []*pbpeering.Peering{bar, foo}, + } + prototest.AssertDeepEqual(t, expect, resp) +} + +// newTestServer is copied from partition/service_test.go, with the addition of certs/cas. +// TODO(peering): these are endpoint tests and should live in the agent/consul +// package. Instead, these can be written around a mock client (see testing.go) +// and a mock backend (future) +func newTestServer(t *testing.T, cb func(conf *consul.Config)) testingServer { + t.Helper() + conf := consul.DefaultConfig() + dir := testutil.TempDir(t, "consul") + + conf.Bootstrap = true + conf.Datacenter = "dc1" + conf.DataDir = dir + conf.RPCAddr = &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 2345} + conf.RaftConfig.ElectionTimeout = 200 * time.Millisecond + conf.RaftConfig.LeaderLeaseTimeout = 100 * time.Millisecond + conf.RaftConfig.HeartbeatTimeout = 200 * time.Millisecond + conf.TLSConfig.Domain = "consul" + + nodeID, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + conf.NodeID = types.NodeID(nodeID) + + if cb != nil { + cb(conf) + } + + // Apply config to copied fields because many tests only set the old + // values. + conf.ACLResolverSettings.ACLsEnabled = conf.ACLsEnabled + conf.ACLResolverSettings.NodeName = conf.NodeName + conf.ACLResolverSettings.Datacenter = conf.Datacenter + conf.ACLResolverSettings.EnterpriseMeta = *conf.AgentEnterpriseMeta() + + deps := newDefaultDeps(t, conf) + server, err := consul.NewServer(conf, deps, gogrpc.NewServer()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, server.Shutdown()) + }) + + testrpc.WaitForLeader(t, server.RPC, conf.Datacenter) + + backend := consul.NewPeeringBackend(server, deps.GRPCConnPool) + handler := &peering.Service{Backend: backend} + + grpcServer := gogrpc.NewServer() + pbpeering.RegisterPeeringServiceServer(grpcServer, handler) + + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + t.Cleanup(func() { lis.Close() }) + + g := new(errgroup.Group) + g.Go(func() error { + return grpcServer.Serve(lis) + }) + t.Cleanup(func() { + if grpcServer.Stop(); err != nil { + t.Logf("grpc server shutdown: %v", err) + } + if err := g.Wait(); err != nil { + t.Logf("grpc server error: %v", err) + } + }) + + return testingServer{ + Server: server, + Backend: backend, + Addr: lis.Addr(), + } +} + +func (s testingServer) ClientConn(t *testing.T) *gogrpc.ClientConn { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + conn, err := gogrpc.DialContext(ctx, s.Addr.String(), gogrpc.WithInsecure()) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + return conn +} + +type testingServer struct { + Server *consul.Server + Addr net.Addr + Backend peering.Backend +} + +// TODO(peering): remove duplication between this and agent/consul tests +func newDefaultDeps(t *testing.T, c *consul.Config) consul.Deps { + t.Helper() + + logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Name: c.NodeName, + Level: hclog.Debug, + Output: testutil.NewLogBuffer(t), + }) + + tls, err := tlsutil.NewConfigurator(c.TLSConfig, logger) + require.NoError(t, err, "failed to create tls configuration") + + r := router.NewRouter(logger, c.Datacenter, fmt.Sprintf("%s.%s", c.NodeName, c.Datacenter), nil) + builder := resolver.NewServerResolverBuilder(resolver.Config{}) + resolver.Register(builder) + + connPool := &pool.ConnPool{ + Server: false, + SrcAddr: c.RPCSrcAddr, + Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}), + MaxTime: 2 * time.Minute, + MaxStreams: 4, + TLSConfigurator: tls, + Datacenter: c.Datacenter, + } + + return consul.Deps{ + Logger: logger, + TLSConfigurator: tls, + Tokens: new(token.Store), + Router: r, + ConnPool: connPool, + GRPCConnPool: grpc.NewClientConnPool(grpc.ClientConnPoolConfig{ + Servers: builder, + TLSWrapper: grpc.TLSWrapper(tls.OutgoingRPCWrapper()), + UseTLSForDC: tls.UseTLS, + DialingFromServer: true, + DialingFromDatacenter: c.Datacenter, + }), + LeaderForwarder: builder, + EnterpriseDeps: newDefaultDepsEnterprise(t, logger, c), + NewRequestRecorderFunc: middleware.NewRequestRecorder, + GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor, + } +} diff --git a/agent/rpc/peering/stream_test.go b/agent/rpc/peering/stream_test.go new file mode 100644 index 000000000..65aa4c0f8 --- /dev/null +++ b/agent/rpc/peering/stream_test.go @@ -0,0 +1,810 @@ +package peering + +import ( + "context" + "io" + "testing" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/stretchr/testify/require" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/proto/pbservice" + "github.com/hashicorp/consul/proto/pbstatus" + "github.com/hashicorp/consul/proto/prototest" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" +) + +func TestStreamResources_Server_FirstRequest(t *testing.T) { + type testCase struct { + name string + input *pbpeering.ReplicationMessage + wantErr error + } + + run := func(t *testing.T, tc testCase) { + srv := NewService(testutil.Logger(t), nil) + client := newMockClient(context.Background()) + + errCh := make(chan error, 1) + client.errCh = errCh + + go func() { + // Pass errors from server handler into errCh so that they can be seen by the client on Recv(). + // This matches gRPC's behavior when an error is returned by a server. + err := srv.StreamResources(client.replicationStream) + if err != nil { + errCh <- err + } + }() + + err := client.Send(tc.input) + require.NoError(t, err) + + msg, err := client.Recv() + require.Nil(t, msg) + require.Error(t, err) + require.EqualError(t, err, tc.wantErr.Error()) + } + + tt := []testCase{ + { + name: "unexpected response", + input: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Response_{ + Response: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + ResourceID: "api-service", + Nonce: "2", + }, + }, + }, + wantErr: status.Error(codes.InvalidArgument, "first message when initiating a peering must be a subscription request"), + }, + { + name: "missing peer id", + input: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{}, + }, + }, + wantErr: status.Error(codes.InvalidArgument, "initial subscription request must specify a PeerID"), + }, + { + name: "unexpected nonce", + input: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: "63b60245-c475-426b-b314-4588d210859d", + Nonce: "1", + }, + }, + }, + wantErr: status.Error(codes.InvalidArgument, "initial subscription request must not contain a nonce"), + }, + { + name: "unknown resource", + input: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: "63b60245-c475-426b-b314-4588d210859d", + ResourceURL: "nomad.Job", + }, + }, + }, + wantErr: status.Error(codes.InvalidArgument, "subscription request to unknown resource URL: nomad.Job"), + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } + +} + +func TestStreamResources_Server_Terminate(t *testing.T) { + publisher := stream.NewEventPublisher(10 * time.Second) + store := newStateStore(t, publisher) + + srv := NewService(testutil.Logger(t), &testStreamBackend{ + store: store, + pub: publisher, + }) + + it := incrementalTime{ + base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + srv.streams.timeNow = it.Now + + client := newMockClient(context.Background()) + + errCh := make(chan error, 1) + client.errCh = errCh + + go func() { + // Pass errors from server handler into errCh so that they can be seen by the client on Recv(). + // This matches gRPC's behavior when an error is returned by a server. + if err := srv.StreamResources(client.replicationStream); err != nil { + errCh <- err + } + }() + + // Receive a subscription from a peer + peerID := "63b60245-c475-426b-b314-4588d210859d" + sub := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: peerID, + ResourceURL: pbpeering.TypeURLService, + }, + }, + } + err := client.Send(sub) + require.NoError(t, err) + + runStep(t, "new stream gets tracked", func(t *testing.T) { + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.True(r, status.Connected) + }) + }) + + // Receive subscription to my-peer-B's resources + receivedSub, err := client.Recv() + require.NoError(t, err) + + expect := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + PeerID: peerID, + }, + }, + } + prototest.AssertDeepEqual(t, expect, receivedSub) + + runStep(t, "terminate the stream", func(t *testing.T) { + done := srv.ConnectedStreams()[peerID] + close(done) + + retry.Run(t, func(r *retry.R) { + _, ok := srv.StreamStatus(peerID) + require.False(r, ok) + }) + }) + + receivedTerm, err := client.Recv() + require.NoError(t, err) + expect = &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Terminated_{ + Terminated: &pbpeering.ReplicationMessage_Terminated{}, + }, + } + prototest.AssertDeepEqual(t, expect, receivedTerm) +} + +func TestStreamResources_Server_StreamTracker(t *testing.T) { + publisher := stream.NewEventPublisher(10 * time.Second) + store := newStateStore(t, publisher) + + srv := NewService(testutil.Logger(t), &testStreamBackend{ + store: store, + pub: publisher, + }) + + it := incrementalTime{ + base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + srv.streams.timeNow = it.Now + + client := newMockClient(context.Background()) + + errCh := make(chan error, 1) + go func() { + errCh <- srv.StreamResources(client.replicationStream) + }() + + peerID := "63b60245-c475-426b-b314-4588d210859d" + sub := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: peerID, + ResourceURL: pbpeering.TypeURLService, + }, + }, + } + err := client.Send(sub) + require.NoError(t, err) + + runStep(t, "new stream gets tracked", func(t *testing.T) { + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.True(r, status.Connected) + }) + }) + + runStep(t, "client receives initial subscription", func(t *testing.T) { + ack, err := client.Recv() + require.NoError(t, err) + + expectAck := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + PeerID: peerID, + Nonce: "", + }, + }, + } + prototest.AssertDeepEqual(t, expectAck, ack) + }) + + var sequence uint64 + var lastSendSuccess time.Time + + runStep(t, "ack tracked as success", func(t *testing.T) { + ack := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: peerID, + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + + // Acks do not have an Error populated in the request + }, + }, + } + err := client.Send(ack) + require.NoError(t, err) + sequence++ + + lastSendSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() + + expect := StreamStatus{ + Connected: true, + LastAck: lastSendSuccess, + } + + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.Equal(r, expect, status) + }) + }) + + var lastNack time.Time + var lastNackMsg string + + runStep(t, "nack tracked as error", func(t *testing.T) { + nack := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: peerID, + ResourceURL: pbpeering.TypeURLService, + Nonce: "2", + Error: &pbstatus.Status{ + Code: int32(code.Code_UNAVAILABLE), + Message: "bad bad not good", + }, + }, + }, + } + err := client.Send(nack) + require.NoError(t, err) + sequence++ + + lastNackMsg = "client peer was unable to apply resource: bad bad not good" + lastNack = it.base.Add(time.Duration(sequence) * time.Second).UTC() + + expect := StreamStatus{ + Connected: true, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + } + + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.Equal(r, expect, status) + }) + }) + + var lastRecvSuccess time.Time + + runStep(t, "response applied locally", func(t *testing.T) { + resp := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Response_{ + Response: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + ResourceID: "api", + Nonce: "21", + Operation: pbpeering.ReplicationMessage_Response_UPSERT, + }, + }, + } + err := client.Send(resp) + require.NoError(t, err) + sequence++ + + ack, err := client.Recv() + require.NoError(t, err) + + expectAck := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "21", + }, + }, + } + prototest.AssertDeepEqual(t, expectAck, ack) + + lastRecvSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() + + expect := StreamStatus{ + Connected: true, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + LastReceiveSuccess: lastRecvSuccess, + } + + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.Equal(r, expect, status) + }) + }) + + var lastRecvError time.Time + var lastRecvErrorMsg string + + runStep(t, "response fails to apply locally", func(t *testing.T) { + resp := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Response_{ + Response: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + ResourceID: "web", + Nonce: "24", + + // Unknown operation gets NACKed + Operation: pbpeering.ReplicationMessage_Response_Unknown, + }, + }, + } + err := client.Send(resp) + require.NoError(t, err) + sequence++ + + ack, err := client.Recv() + require.NoError(t, err) + + expectNack := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "24", + Error: &pbstatus.Status{ + Code: int32(code.Code_INVALID_ARGUMENT), + Message: `unsupported operation: "Unknown"`, + }, + }, + }, + } + prototest.AssertDeepEqual(t, expectNack, ack) + + lastRecvError = it.base.Add(time.Duration(sequence) * time.Second).UTC() + lastRecvErrorMsg = `unsupported operation: "Unknown"` + + expect := StreamStatus{ + Connected: true, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + LastReceiveSuccess: lastRecvSuccess, + LastReceiveError: lastRecvError, + LastReceiveErrorMessage: lastRecvErrorMsg, + } + + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.Equal(r, expect, status) + }) + }) + + runStep(t, "client disconnect marks stream as disconnected", func(t *testing.T) { + client.Close() + + sequence++ + lastRecvError := it.base.Add(time.Duration(sequence) * time.Second).UTC() + + sequence++ + disconnectTime := it.base.Add(time.Duration(sequence) * time.Second).UTC() + + expect := StreamStatus{ + Connected: false, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + DisconnectTime: disconnectTime, + LastReceiveSuccess: lastRecvSuccess, + LastReceiveErrorMessage: io.EOF.Error(), + LastReceiveError: lastRecvError, + } + + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.Equal(r, expect, status) + }) + }) + + select { + case err := <-errCh: + // Client disconnect is not an error, but should make the handler return. + require.NoError(t, err) + case <-time.After(50 * time.Millisecond): + t.Fatalf("timed out waiting for handler to finish") + } +} + +func TestStreamResources_Server_ServiceUpdates(t *testing.T) { + publisher := stream.NewEventPublisher(10 * time.Second) + store := newStateStore(t, publisher) + + // Create a peering + var lastIdx uint64 = 1 + err := store.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "my-peering", + }) + require.NoError(t, err) + + _, p, err := store.PeeringRead(nil, state.Query{Value: "my-peering"}) + require.NoError(t, err) + require.NotNil(t, p) + + srv := NewService(testutil.Logger(t), &testStreamBackend{ + store: store, + pub: publisher, + }) + + client := newMockClient(context.Background()) + + errCh := make(chan error, 1) + client.errCh = errCh + + go func() { + // Pass errors from server handler into errCh so that they can be seen by the client on Recv(). + // This matches gRPC's behavior when an error is returned by a server. + if err := srv.StreamResources(client.replicationStream); err != nil { + errCh <- err + } + }() + + // Issue a services subscription to server + init := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: p.ID, + ResourceURL: pbpeering.TypeURLService, + }, + }, + } + require.NoError(t, client.Send(init)) + + // Receive a services subscription from server + receivedSub, err := client.Recv() + require.NoError(t, err) + + expect := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + PeerID: p.ID, + }, + }, + } + prototest.AssertDeepEqual(t, expect, receivedSub) + + // Register a service that is not yet exported + mysql := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "foo", Address: "10.0.0.1"}, + Service: &structs.NodeService{ID: "mysql-1", Service: "mysql", Port: 5000}, + } + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mysql.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "foo", mysql.Service)) + + runStep(t, "exporting mysql leads to an UPSERT event", func(t *testing.T) { + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "mysql", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + { + // Mongo does not get pushed because it does not have instances registered. + Name: "mongo", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + }, + } + lastIdx++ + err = store.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + msg, err := client.RecvWithTimeout(100 * time.Millisecond) + require.NoError(r, err) + require.Equal(r, pbpeering.ReplicationMessage_Response_UPSERT, msg.GetResponse().Operation) + require.Equal(r, mysql.Service.CompoundServiceName().String(), msg.GetResponse().ResourceID) + + var nodes pbservice.IndexedCheckServiceNodes + require.NoError(r, ptypes.UnmarshalAny(msg.GetResponse().Resource, &nodes)) + require.Len(r, nodes.Nodes, 1) + }) + }) + + mongo := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "zip", Address: "10.0.0.3"}, + Service: &structs.NodeService{ID: "mongo-1", Service: "mongo", Port: 5000}, + } + + runStep(t, "registering mongo instance leads to an UPSERT event", func(t *testing.T) { + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mongo.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "zip", mongo.Service)) + + retry.Run(t, func(r *retry.R) { + msg, err := client.RecvWithTimeout(100 * time.Millisecond) + require.NoError(r, err) + require.Equal(r, pbpeering.ReplicationMessage_Response_UPSERT, msg.GetResponse().Operation) + require.Equal(r, mongo.Service.CompoundServiceName().String(), msg.GetResponse().ResourceID) + + var nodes pbservice.IndexedCheckServiceNodes + require.NoError(r, ptypes.UnmarshalAny(msg.GetResponse().Resource, &nodes)) + require.Len(r, nodes.Nodes, 1) + }) + }) + + runStep(t, "un-exporting mysql leads to a DELETE event for mysql", func(t *testing.T) { + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "mongo", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + }, + } + lastIdx++ + err = store.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + msg, err := client.RecvWithTimeout(100 * time.Millisecond) + require.NoError(r, err) + require.Equal(r, pbpeering.ReplicationMessage_Response_DELETE, msg.GetResponse().Operation) + require.Equal(r, mysql.Service.CompoundServiceName().String(), msg.GetResponse().ResourceID) + require.Nil(r, msg.GetResponse().Resource) + }) + }) + + runStep(t, "deleting the config entry leads to a DELETE event for mongo", func(t *testing.T) { + lastIdx++ + err = store.DeleteConfigEntry(lastIdx, structs.ExportedServices, "default", nil) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + msg, err := client.RecvWithTimeout(100 * time.Millisecond) + require.NoError(r, err) + require.Equal(r, pbpeering.ReplicationMessage_Response_DELETE, msg.GetResponse().Operation) + require.Equal(r, mongo.Service.CompoundServiceName().String(), msg.GetResponse().ResourceID) + require.Nil(r, msg.GetResponse().Resource) + }) + }) +} + +type testStreamBackend struct { + pub state.EventPublisher + store *state.Store +} + +func (b *testStreamBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) { + return b.pub.Subscribe(req) +} + +func (b *testStreamBackend) Store() Store { + return b.store +} + +func (b *testStreamBackend) Forward(info structs.RPCInfo, f func(conn *grpc.ClientConn) error) (handled bool, err error) { + return true, nil +} + +func (b *testStreamBackend) GetAgentCACertificates() ([]string, error) { + return []string{}, nil +} + +func (b *testStreamBackend) GetServerAddresses() ([]string, error) { + return []string{}, nil +} + +func (b *testStreamBackend) GetServerName() string { + return "" +} + +func (b *testStreamBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) { + return nil, nil +} + +func (b *testStreamBackend) DecodeToken([]byte) (*structs.PeeringToken, error) { + return nil, nil +} + +func (b *testStreamBackend) EnterpriseCheckPartitions(partition string) error { + return nil +} + +func (b *testStreamBackend) Apply() Apply { + return nil +} + +func Test_processResponse(t *testing.T) { + type testCase struct { + name string + in *pbpeering.ReplicationMessage_Response + expect *pbpeering.ReplicationMessage + wantErr bool + } + + run := func(t *testing.T, tc testCase) { + reply, err := processResponse(tc.in) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.expect, reply) + } + + tt := []testCase{ + { + name: "valid upsert", + in: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + ResourceID: "api", + Nonce: "1", + Operation: pbpeering.ReplicationMessage_Response_UPSERT, + }, + expect: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + }, + }, + }, + wantErr: false, + }, + { + name: "valid delete", + in: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + ResourceID: "api", + Nonce: "1", + Operation: pbpeering.ReplicationMessage_Response_DELETE, + }, + expect: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + }, + }, + }, + wantErr: false, + }, + { + name: "invalid resource url", + in: &pbpeering.ReplicationMessage_Response{ + ResourceURL: "nomad.Job", + Nonce: "1", + Operation: pbpeering.ReplicationMessage_Response_Unknown, + }, + expect: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: "nomad.Job", + Nonce: "1", + Error: &pbstatus.Status{ + Code: int32(code.Code_INVALID_ARGUMENT), + Message: `received response for unknown resource type "nomad.Job"`, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "unknown operation", + in: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + Operation: pbpeering.ReplicationMessage_Response_Unknown, + }, + expect: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + Error: &pbstatus.Status{ + Code: int32(code.Code_INVALID_ARGUMENT), + Message: `unsupported operation: "Unknown"`, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "out of range operation", + in: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + Operation: pbpeering.ReplicationMessage_Response_Operation(100000), + }, + expect: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + Error: &pbstatus.Status{ + Code: int32(code.Code_INVALID_ARGUMENT), + Message: `unsupported operation: "100000"`, + }, + }, + }, + }, + wantErr: true, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/agent/rpc/peering/stream_tracker.go b/agent/rpc/peering/stream_tracker.go new file mode 100644 index 000000000..af2cbe1c2 --- /dev/null +++ b/agent/rpc/peering/stream_tracker.go @@ -0,0 +1,212 @@ +package peering + +import ( + "fmt" + "sync" + "time" +) + +// streamTracker contains a map of (PeerID -> StreamStatus). +// As streams are opened and closed we track details about their status. +type streamTracker struct { + mu sync.RWMutex + streams map[string]*lockableStreamStatus + + // timeNow is a shim for testing. + timeNow func() time.Time +} + +func newStreamTracker() *streamTracker { + return &streamTracker{ + streams: make(map[string]*lockableStreamStatus), + timeNow: time.Now, + } +} + +// connected registers a stream for a given peer, and marks it as connected. +// It also enforces that there is only one active stream for a peer. +func (t *streamTracker) connected(id string) (*lockableStreamStatus, error) { + t.mu.Lock() + defer t.mu.Unlock() + + status, ok := t.streams[id] + if !ok { + status = newLockableStreamStatus(t.timeNow) + t.streams[id] = status + return status, nil + } + + if status.connected() { + return nil, fmt.Errorf("there is an active stream for the given PeerID %q", id) + } + status.trackConnected() + + return status, nil +} + +// disconnected ensures that if a peer id's stream status is tracked, it is marked as disconnected. +func (t *streamTracker) disconnected(id string) { + t.mu.Lock() + defer t.mu.Unlock() + + if status, ok := t.streams[id]; ok { + status.trackDisconnected() + } +} + +func (t *streamTracker) streamStatus(id string) (resp StreamStatus, found bool) { + t.mu.RLock() + defer t.mu.RUnlock() + + s, ok := t.streams[id] + if !ok { + return StreamStatus{}, false + } + return s.status(), true +} + +func (t *streamTracker) connectedStreams() map[string]chan struct{} { + t.mu.RLock() + defer t.mu.RUnlock() + + resp := make(map[string]chan struct{}) + for peer, status := range t.streams { + if status.connected() { + resp[peer] = status.doneCh + } + } + return resp +} + +func (t *streamTracker) deleteStatus(id string) { + t.mu.Lock() + defer t.mu.Unlock() + + delete(t.streams, id) +} + +type lockableStreamStatus struct { + mu sync.RWMutex + + // timeNow is a shim for testing. + timeNow func() time.Time + + // doneCh allows for shutting down a stream gracefully by sending a termination message + // to the peer before the stream's context is cancelled. + doneCh chan struct{} + + StreamStatus +} + +// StreamStatus contains information about the replication stream to a peer cluster. +// TODO(peering): There's a lot of fields here... +type StreamStatus struct { + // Connected is true when there is an open stream for the peer. + Connected bool + + // If the status is not connected, DisconnectTime tracks when the stream was closed. Else it's zero. + DisconnectTime time.Time + + // LastAck tracks the time we received the last ACK for a resource replicated TO the peer. + LastAck time.Time + + // LastNack tracks the time we received the last NACK for a resource replicated to the peer. + LastNack time.Time + + // LastNackMessage tracks the reported error message associated with the last NACK from a peer. + LastNackMessage string + + // LastSendError tracks the time of the last error sending into the stream. + LastSendError time.Time + + // LastSendErrorMessage tracks the last error message when sending into the stream. + LastSendErrorMessage string + + // LastReceiveSuccess tracks the time we last successfully stored a resource replicated FROM the peer. + LastReceiveSuccess time.Time + + // LastReceiveError tracks either: + // - The time we failed to store a resource replicated FROM the peer. + // - The time of the last error when receiving from the stream. + LastReceiveError time.Time + + // LastReceiveError tracks either: + // - The error message when we failed to store a resource replicated FROM the peer. + // - The last error message when receiving from the stream. + LastReceiveErrorMessage string +} + +func newLockableStreamStatus(now func() time.Time) *lockableStreamStatus { + return &lockableStreamStatus{ + StreamStatus: StreamStatus{ + Connected: true, + }, + timeNow: now, + doneCh: make(chan struct{}), + } +} + +func (s *lockableStreamStatus) trackAck() { + s.mu.Lock() + s.LastAck = s.timeNow().UTC() + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackSendError(error string) { + s.mu.Lock() + s.LastSendError = s.timeNow().UTC() + s.LastSendErrorMessage = error + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackReceiveSuccess() { + s.mu.Lock() + s.LastReceiveSuccess = s.timeNow().UTC() + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackReceiveError(error string) { + s.mu.Lock() + s.LastReceiveError = s.timeNow().UTC() + s.LastReceiveErrorMessage = error + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackNack(msg string) { + s.mu.Lock() + s.LastNack = s.timeNow().UTC() + s.LastNackMessage = msg + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackConnected() { + s.mu.Lock() + s.Connected = true + s.DisconnectTime = time.Time{} + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackDisconnected() { + s.mu.Lock() + s.Connected = false + s.DisconnectTime = s.timeNow().UTC() + s.mu.Unlock() +} + +func (s *lockableStreamStatus) connected() bool { + var resp bool + + s.mu.RLock() + resp = s.Connected + s.mu.RUnlock() + + return resp +} + +func (s *lockableStreamStatus) status() StreamStatus { + s.mu.RLock() + copy := s.StreamStatus + s.mu.RUnlock() + + return copy +} diff --git a/agent/rpc/peering/stream_tracker_test.go b/agent/rpc/peering/stream_tracker_test.go new file mode 100644 index 000000000..2c055865b --- /dev/null +++ b/agent/rpc/peering/stream_tracker_test.go @@ -0,0 +1,162 @@ +package peering + +import ( + "sort" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestStreamTracker_EnsureConnectedDisconnected(t *testing.T) { + tracker := newStreamTracker() + peerID := "63b60245-c475-426b-b314-4588d210859d" + + it := incrementalTime{ + base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + tracker.timeNow = it.Now + + var ( + statusPtr *lockableStreamStatus + err error + ) + + runStep(t, "new stream", func(t *testing.T) { + statusPtr, err = tracker.connected(peerID) + require.NoError(t, err) + + expect := StreamStatus{ + Connected: true, + } + + status, ok := tracker.streamStatus(peerID) + require.True(t, ok) + require.Equal(t, expect, status) + }) + + runStep(t, "duplicate gets rejected", func(t *testing.T) { + _, err := tracker.connected(peerID) + require.Error(t, err) + require.Contains(t, err.Error(), `there is an active stream for the given PeerID "63b60245-c475-426b-b314-4588d210859d"`) + }) + + var sequence uint64 + var lastSuccess time.Time + + runStep(t, "stream updated", func(t *testing.T) { + statusPtr.trackAck() + sequence++ + + status, ok := tracker.streamStatus(peerID) + require.True(t, ok) + + lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() + expect := StreamStatus{ + Connected: true, + LastAck: lastSuccess, + } + require.Equal(t, expect, status) + }) + + runStep(t, "disconnect", func(t *testing.T) { + tracker.disconnected(peerID) + sequence++ + + expect := StreamStatus{ + Connected: false, + DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(), + LastAck: lastSuccess, + } + status, ok := tracker.streamStatus(peerID) + require.True(t, ok) + require.Equal(t, expect, status) + }) + + runStep(t, "re-connect", func(t *testing.T) { + _, err := tracker.connected(peerID) + require.NoError(t, err) + + expect := StreamStatus{ + Connected: true, + LastAck: lastSuccess, + + // DisconnectTime gets cleared on re-connect. + } + + status, ok := tracker.streamStatus(peerID) + require.True(t, ok) + require.Equal(t, expect, status) + }) + + runStep(t, "delete", func(t *testing.T) { + tracker.deleteStatus(peerID) + + status, ok := tracker.streamStatus(peerID) + require.False(t, ok) + require.Zero(t, status) + }) +} + +func TestStreamTracker_connectedStreams(t *testing.T) { + type testCase struct { + name string + setup func(t *testing.T, s *streamTracker) + expect []string + } + + run := func(t *testing.T, tc testCase) { + tracker := newStreamTracker() + if tc.setup != nil { + tc.setup(t, tracker) + } + + streams := tracker.connectedStreams() + + var keys []string + for key := range streams { + keys = append(keys, key) + } + sort.Strings(keys) + + require.Equal(t, tc.expect, keys) + } + + tt := []testCase{ + { + name: "no streams", + expect: nil, + }, + { + name: "all streams active", + setup: func(t *testing.T, s *streamTracker) { + _, err := s.connected("foo") + require.NoError(t, err) + + _, err = s.connected("bar") + require.NoError(t, err) + }, + expect: []string{"bar", "foo"}, + }, + { + name: "mixed active and inactive", + setup: func(t *testing.T, s *streamTracker) { + status, err := s.connected("foo") + require.NoError(t, err) + + // Mark foo as disconnected to avoid showing it as an active stream + status.trackDisconnected() + + _, err = s.connected("bar") + require.NoError(t, err) + }, + expect: []string{"bar"}, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/agent/rpc/peering/subscription_manager.go b/agent/rpc/peering/subscription_manager.go new file mode 100644 index 000000000..bd90168d7 --- /dev/null +++ b/agent/rpc/peering/subscription_manager.go @@ -0,0 +1,149 @@ +package peering + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-memdb" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/submatview" + "github.com/hashicorp/consul/lib/retry" + "github.com/hashicorp/consul/proto/pbservice" +) + +type MaterializedViewStore interface { + Get(ctx context.Context, req submatview.Request) (submatview.Result, error) + Notify(ctx context.Context, req submatview.Request, cID string, ch chan<- cache.UpdateEvent) error +} + +type SubscriptionBackend interface { + Subscriber + Store() Store +} + +// subscriptionManager handlers requests to subscribe to events from an events publisher. +type subscriptionManager struct { + logger hclog.Logger + viewStore MaterializedViewStore + backend SubscriptionBackend + + // watchedServices is a map of exported services to a cancel function for their subscription notifier. + watchedServices map[structs.ServiceName]context.CancelFunc +} + +// TODO(peering): Maybe centralize so that there is a single manager per datacenter, rather than per peering. +func newSubscriptionManager(ctx context.Context, logger hclog.Logger, backend SubscriptionBackend) *subscriptionManager { + logger = logger.Named("subscriptions") + store := submatview.NewStore(logger.Named("viewstore")) + go store.Run(ctx) + + return &subscriptionManager{ + logger: logger, + viewStore: store, + backend: backend, + watchedServices: make(map[structs.ServiceName]context.CancelFunc), + } +} + +// subscribe returns a channel that will contain updates to exported service instances for a given peer. +func (m *subscriptionManager) subscribe(ctx context.Context, peerID string) <-chan cache.UpdateEvent { + updateCh := make(chan cache.UpdateEvent, 1) + go m.syncSubscriptions(ctx, peerID, updateCh) + + return updateCh +} + +func (m *subscriptionManager) syncSubscriptions(ctx context.Context, peerID string, updateCh chan<- cache.UpdateEvent) { + waiter := &retry.Waiter{ + MinFailures: 1, + Factor: 500 * time.Millisecond, + MaxWait: 60 * time.Second, + Jitter: retry.NewJitter(100), + } + + for { + if err := m.syncSubscriptionsAndBlock(ctx, peerID, updateCh); err != nil { + m.logger.Error("failed to sync subscriptions", "error", err) + } + + if err := waiter.Wait(ctx); err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + m.logger.Error("failed to wait before re-trying sync", "error", err) + } + + select { + case <-ctx.Done(): + return + default: + } + } +} + +// syncSubscriptionsAndBlock ensures that the subscriptions to the subscription backend +// match the list of services exported to the peer. +func (m *subscriptionManager) syncSubscriptionsAndBlock(ctx context.Context, peerID string, updateCh chan<- cache.UpdateEvent) error { + store := m.backend.Store() + + ws := memdb.NewWatchSet() + ws.Add(store.AbandonCh()) + ws.Add(ctx.Done()) + + // Get exported services for peer id + _, services, err := store.ExportedServicesForPeer(ws, peerID) + if err != nil { + return fmt.Errorf("failed to watch exported services for peer %q: %w", peerID, err) + } + + // seen contains the set of exported service names and is used to reconcile the list of watched services. + seen := make(map[structs.ServiceName]struct{}) + + // Ensure there is a subscription for each service exported to the peer. + for _, svc := range services { + seen[svc] = struct{}{} + + if _, ok := m.watchedServices[svc]; ok { + // Exported service is already being watched, nothing to do. + continue + } + + notifyCtx, cancel := context.WithCancel(ctx) + m.watchedServices[svc] = cancel + + if err := m.Notify(notifyCtx, svc, updateCh); err != nil { + m.logger.Error("failed to subscribe to service", "service", svc.String()) + continue + } + } + + // For every subscription without an exported service, call the associated cancel fn. + for svc, cancel := range m.watchedServices { + if _, ok := seen[svc]; !ok { + cancel() + + // Send an empty event to the stream handler to trigger sending a DELETE message. + // Cancelling the subscription context above is necessary, but does not yield a useful signal on its own. + updateCh <- cache.UpdateEvent{ + CorrelationID: subExportedService + svc.String(), + Result: &pbservice.IndexedCheckServiceNodes{}, + } + } + } + + // Block for any changes to the state store. + ws.WatchCh(ctx) + return nil +} + +const ( + subExportedService = "exported-service:" +) + +// Notify the given channel when there are updates to the requested service. +func (m *subscriptionManager) Notify(ctx context.Context, svc structs.ServiceName, updateCh chan<- cache.UpdateEvent) error { + sr := newExportedServiceRequest(m.logger, svc, m.backend) + return m.viewStore.Notify(ctx, sr, subExportedService+svc.String(), updateCh) +} diff --git a/agent/rpc/peering/subscription_manager_test.go b/agent/rpc/peering/subscription_manager_test.go new file mode 100644 index 000000000..b8b06be6d --- /dev/null +++ b/agent/rpc/peering/subscription_manager_test.go @@ -0,0 +1,362 @@ +package peering + +import ( + "context" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/proto/pbservice" + "github.com/hashicorp/consul/sdk/testutil/retry" +) + +type testSubscriptionBackend struct { + state.EventPublisher + store *state.Store +} + +func (b *testSubscriptionBackend) Store() Store { + return b.store +} + +func TestSubscriptionManager_RegisterDeregister(t *testing.T) { + publisher := stream.NewEventPublisher(10 * time.Second) + store := newStateStore(t, publisher) + + backend := testSubscriptionBackend{ + EventPublisher: publisher, + store: store, + } + + ctx := context.Background() + mgr := newSubscriptionManager(ctx, hclog.New(nil), &backend) + + // Create a peering + var lastIdx uint64 = 1 + err := store.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "my-peering", + }) + require.NoError(t, err) + + _, p, err := store.PeeringRead(nil, state.Query{Value: "my-peering"}) + require.NoError(t, err) + require.NotNil(t, p) + + id := p.ID + + subCh := mgr.subscribe(ctx, id) + + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "mysql", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + { + Name: "mongo", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-other-peering", + }, + }, + }, + }, + } + lastIdx++ + err = store.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + mysql1 := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "foo", Address: "10.0.0.1"}, + Service: &structs.NodeService{ID: "mysql-1", Service: "mysql", Port: 5000}, + Checks: structs.HealthChecks{ + &structs.HealthCheck{CheckID: "mysql-check", ServiceID: "mysql-1", Node: "foo"}, + }, + } + + runStep(t, "registering exported service instance yields update", func(t *testing.T) { + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mysql1.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "foo", mysql1.Service)) + + lastIdx++ + require.NoError(t, store.EnsureCheck(lastIdx, mysql1.Checks[0])) + + // Receive in a retry loop so that eventually we converge onto the expected CheckServiceNode. + retry.Run(t, func(r *retry.R) { + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + require.True(r, ok) + require.Equal(r, uint64(5), nodes.Index) + + require.Len(r, nodes.Nodes, 1) + require.Equal(r, "foo", nodes.Nodes[0].Node.Node) + require.Equal(r, "mysql-1", nodes.Nodes[0].Service.ID) + + require.Len(r, nodes.Nodes[0].Checks, 1) + require.Equal(r, "mysql-check", nodes.Nodes[0].Checks[0].CheckID) + + default: + r.Fatalf("invalid update") + } + }) + }) + + mysql2 := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "bar", Address: "10.0.0.2"}, + Service: &structs.NodeService{ID: "mysql-2", Service: "mysql", Port: 5000}, + Checks: structs.HealthChecks{ + &structs.HealthCheck{CheckID: "mysql-2-check", ServiceID: "mysql-2", Node: "bar"}, + }, + } + + runStep(t, "additional instances are returned when registered", func(t *testing.T) { + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mysql2.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "bar", mysql2.Service)) + + lastIdx++ + require.NoError(t, store.EnsureCheck(lastIdx, mysql2.Checks[0])) + + retry.Run(t, func(r *retry.R) { + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + require.True(r, ok) + require.Equal(r, uint64(8), nodes.Index) + + require.Len(r, nodes.Nodes, 2) + require.Equal(r, "bar", nodes.Nodes[0].Node.Node) + require.Equal(r, "mysql-2", nodes.Nodes[0].Service.ID) + + require.Len(r, nodes.Nodes[0].Checks, 1) + require.Equal(r, "mysql-2-check", nodes.Nodes[0].Checks[0].CheckID) + + require.Equal(r, "foo", nodes.Nodes[1].Node.Node) + require.Equal(r, "mysql-1", nodes.Nodes[1].Service.ID) + + require.Len(r, nodes.Nodes[1].Checks, 1) + require.Equal(r, "mysql-check", nodes.Nodes[1].Checks[0].CheckID) + + default: + r.Fatalf("invalid update") + } + }) + }) + + runStep(t, "no updates are received for services not exported to my-peering", func(t *testing.T) { + mongo := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "zip", Address: "10.0.0.3"}, + Service: &structs.NodeService{ID: "mongo", Service: "mongo", Port: 5000}, + Checks: structs.HealthChecks{ + &structs.HealthCheck{CheckID: "mongo-check", ServiceID: "mongo", Node: "zip"}, + }, + } + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mongo.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "zip", mongo.Service)) + + lastIdx++ + require.NoError(t, store.EnsureCheck(lastIdx, mongo.Checks[0])) + + // Receive from subCh times out. The retry in the last step already consumed all the mysql events. + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + + if ok && len(nodes.Nodes) > 0 && nodes.Nodes[0].Node.Node == "zip" { + t.Fatalf("received update for mongo node zip") + } + + case <-time.After(100 * time.Millisecond): + // Expect this to fire + } + }) + + runStep(t, "deregister an instance and it gets removed from the output", func(t *testing.T) { + lastIdx++ + require.NoError(t, store.DeleteService(lastIdx, "foo", mysql1.Service.ID, nil, "")) + + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + require.True(t, ok) + require.Equal(t, uint64(12), nodes.Index) + + require.Len(t, nodes.Nodes, 1) + require.Equal(t, "bar", nodes.Nodes[0].Node.Node) + require.Equal(t, "mysql-2", nodes.Nodes[0].Service.ID) + + require.Len(t, nodes.Nodes[0].Checks, 1) + require.Equal(t, "mysql-2-check", nodes.Nodes[0].Checks[0].CheckID) + + case <-time.After(100 * time.Millisecond): + t.Fatalf("timed out waiting for update") + } + }) + + runStep(t, "deregister the last instance and the output is empty", func(t *testing.T) { + lastIdx++ + require.NoError(t, store.DeleteService(lastIdx, "bar", mysql2.Service.ID, nil, "")) + + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + require.True(t, ok) + require.Equal(t, uint64(13), nodes.Index) + require.Len(t, nodes.Nodes, 0) + + case <-time.After(100 * time.Millisecond): + t.Fatalf("timed out waiting for update") + } + }) +} + +func TestSubscriptionManager_InitialSnapshot(t *testing.T) { + publisher := stream.NewEventPublisher(10 * time.Second) + store := newStateStore(t, publisher) + + backend := testSubscriptionBackend{ + EventPublisher: publisher, + store: store, + } + + ctx := context.Background() + mgr := newSubscriptionManager(ctx, hclog.New(nil), &backend) + + // Create a peering + var lastIdx uint64 = 1 + err := store.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "my-peering", + }) + require.NoError(t, err) + + _, p, err := store.PeeringRead(nil, state.Query{Value: "my-peering"}) + require.NoError(t, err) + require.NotNil(t, p) + + id := p.ID + + subCh := mgr.subscribe(ctx, id) + + // Register two services that are not yet exported + mysql := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "foo", Address: "10.0.0.1"}, + Service: &structs.NodeService{ID: "mysql-1", Service: "mysql", Port: 5000}, + } + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mysql.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "foo", mysql.Service)) + + mongo := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "zip", Address: "10.0.0.3"}, + Service: &structs.NodeService{ID: "mongo-1", Service: "mongo", Port: 5000}, + } + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mongo.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "zip", mongo.Service)) + + // No updates should be received, because neither service is exported. + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + + if ok && len(nodes.Nodes) > 0 { + t.Fatalf("received unexpected update") + } + + case <-time.After(100 * time.Millisecond): + // Expect this to fire + } + + runStep(t, "exporting the two services yields an update for both", func(t *testing.T) { + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "mysql", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + { + Name: "mongo", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + }, + } + lastIdx++ + err = store.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + var ( + sawMySQL bool + sawMongo bool + ) + + retry.Run(t, func(r *retry.R) { + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + require.True(r, ok) + require.Len(r, nodes.Nodes, 1) + + switch nodes.Nodes[0].Service.Service { + case "mongo": + sawMongo = true + case "mysql": + sawMySQL = true + } + if !sawMySQL || !sawMongo { + r.Fatalf("missing an update") + } + default: + r.Fatalf("invalid update") + } + }) + }) +} + +func newStateStore(t *testing.T, publisher *stream.EventPublisher) *state.Store { + gc, err := state.NewTombstoneGC(time.Second, time.Millisecond) + require.NoError(t, err) + + store := state.NewStateStoreWithEventPublisher(gc, publisher) + require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealth, store.ServiceHealthSnapshot)) + require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealthConnect, store.ServiceHealthSnapshot)) + go publisher.Run(context.Background()) + + return store +} diff --git a/agent/rpc/peering/subscription_view.go b/agent/rpc/peering/subscription_view.go new file mode 100644 index 000000000..d6b48e923 --- /dev/null +++ b/agent/rpc/peering/subscription_view.go @@ -0,0 +1,141 @@ +package peering + +import ( + "fmt" + "sort" + + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/submatview" + "github.com/hashicorp/consul/proto/pbservice" + "github.com/hashicorp/consul/proto/pbsubscribe" +) + +type Subscriber interface { + Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) +} + +type exportedServiceRequest struct { + logger hclog.Logger + req structs.ServiceSpecificRequest + sub Subscriber +} + +func newExportedServiceRequest(logger hclog.Logger, svc structs.ServiceName, sub Subscriber) *exportedServiceRequest { + req := structs.ServiceSpecificRequest{ + // TODO(peering): Need to subscribe to both Connect and not + Connect: false, + + ServiceName: svc.Name, + EnterpriseMeta: svc.EnterpriseMeta, + } + return &exportedServiceRequest{ + logger: logger, + req: req, + sub: sub, + } +} + +// CacheInfo implements submatview.Request +func (e *exportedServiceRequest) CacheInfo() cache.RequestInfo { + return e.req.CacheInfo() +} + +// NewMaterializer implements submatview.Request +func (e *exportedServiceRequest) NewMaterializer() (submatview.Materializer, error) { + reqFn := func(index uint64) *pbsubscribe.SubscribeRequest { + r := &pbsubscribe.SubscribeRequest{ + Topic: pbsubscribe.Topic_ServiceHealth, + Key: e.req.ServiceName, + Token: e.req.Token, + Datacenter: e.req.Datacenter, + Index: index, + Namespace: e.req.EnterpriseMeta.NamespaceOrEmpty(), + Partition: e.req.EnterpriseMeta.PartitionOrEmpty(), + } + if e.req.Connect { + r.Topic = pbsubscribe.Topic_ServiceHealthConnect + } + return r + } + deps := submatview.Deps{ + View: newExportedServicesView(), + Logger: e.logger, + Request: reqFn, + } + return submatview.NewLocalMaterializer(e.sub, deps), nil +} + +// Type implements submatview.Request +func (e *exportedServiceRequest) Type() string { + return "leader.peering.stream.exportedServiceRequest" +} + +// exportedServicesView implements submatview.View for storing the view state +// of an exported service's health result. We store it as a map to make updates and +// deletions a little easier but we could just store a result type +// (IndexedCheckServiceNodes) and update it in place for each event - that +// involves re-sorting each time etc. though. +// +// Unlike rpcclient.healthView, there is no need for a filter because for exported services +// we export all instances unconditionally. +type exportedServicesView struct { + state map[string]*pbservice.CheckServiceNode +} + +func newExportedServicesView() *exportedServicesView { + return &exportedServicesView{ + state: make(map[string]*pbservice.CheckServiceNode), + } +} + +// Reset implements submatview.View +func (s *exportedServicesView) Reset() { + s.state = make(map[string]*pbservice.CheckServiceNode) +} + +// Update implements submatview.View +func (s *exportedServicesView) Update(events []*pbsubscribe.Event) error { + for _, event := range events { + serviceHealth := event.GetServiceHealth() + if serviceHealth == nil { + return fmt.Errorf("unexpected event type for service health view: %T", + event.GetPayload()) + } + + id := serviceHealth.CheckServiceNode.UniqueID() + switch serviceHealth.Op { + case pbsubscribe.CatalogOp_Register: + s.state[id] = serviceHealth.CheckServiceNode + + case pbsubscribe.CatalogOp_Deregister: + delete(s.state, id) + } + } + return nil +} + +// Result returns the CheckServiceNodes stored by this view. +// Result implements submatview.View +func (s *exportedServicesView) Result(index uint64) interface{} { + result := pbservice.IndexedCheckServiceNodes{ + Nodes: make([]*pbservice.CheckServiceNode, 0, len(s.state)), + Index: index, + } + for _, node := range s.state { + result.Nodes = append(result.Nodes, node) + } + sortCheckServiceNodes(&result) + + return &result +} + +// sortCheckServiceNodes stable sorts the results to match memdb semantics. +func sortCheckServiceNodes(n *pbservice.IndexedCheckServiceNodes) { + sort.SliceStable(n.Nodes, func(i, j int) bool { + return n.Nodes[i].UniqueID() < n.Nodes[j].UniqueID() + }) +} diff --git a/agent/rpc/peering/subscription_view_test.go b/agent/rpc/peering/subscription_view_test.go new file mode 100644 index 000000000..cbb9d071f --- /dev/null +++ b/agent/rpc/peering/subscription_view_test.go @@ -0,0 +1,338 @@ +package peering + +import ( + "context" + "math/rand" + "sort" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/submatview" + "github.com/hashicorp/consul/proto/pbservice" + "github.com/hashicorp/consul/proto/pbsubscribe" +) + +// TestExportedServiceSubscription tests the exported services view and the backing submatview.LocalMaterializer. +func TestExportedServiceSubscription(t *testing.T) { + s := &stateMap{ + states: make(map[string]*serviceState), + } + + sh := snapshotHandler{stateMap: s} + pub := stream.NewEventPublisher(10 * time.Millisecond) + pub.RegisterHandler(pbsubscribe.Topic_ServiceHealth, sh.Snapshot) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go pub.Run(ctx) + + apiSN := structs.NewServiceName("api", nil) + webSN := structs.NewServiceName("web", nil) + + // List of updates to the state store: + // - api: {register api-1, register api-2, register api-3} + // - web: {register web-1, deregister web-1, register web-2}1 + events := []map[string]stream.Event{ + { + apiSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "api-1", + Service: "api", + }, + }, + }, + }, + webSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "web-1", + Service: "web", + }, + }, + }, + }, + }, + { + apiSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "api-2", + Service: "api", + }, + }, + }, + }, + webSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Deregister, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "web-1", + Service: "web", + }, + }, + }, + }, + }, + { + apiSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "api-3", + Service: "api", + }, + }, + }, + }, + webSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "web-2", + Service: "web", + }, + }, + }, + }, + }, + } + + // store represents Consul's memdb state store. + // A stream of event updates + store := store{stateMap: s, pub: pub} + + // This errgroup is used to issue simulate async updates to the state store, + // and also consume that fixed number of updates. + group, gctx := errgroup.WithContext(ctx) + group.Go(func() error { + store.simulateUpdates(gctx, events) + return nil + }) + + // viewStore is the store shared by the two service consumer's materializers. + // It is intentionally not run in the errgroup because it will block until the context is canceled. + viewStore := submatview.NewStore(hclog.New(nil)) + go viewStore.Run(ctx) + + // Each consumer represents a subscriber to exported service updates, and will consume + // stream events for the service name it is interested in. + consumers := make(map[string]*consumer) + for _, svc := range []structs.ServiceName{apiSN, webSN} { + c := &consumer{ + viewStore: viewStore, + publisher: pub, + seenByIndex: make(map[uint64][]string), + } + service := svc + group.Go(func() error { + return c.consume(gctx, service.Name, len(events)) + }) + consumers[service.String()] = c + } + + // Wait until all the events have been simulated and consumed. + done := make(chan struct{}) + go func() { + defer close(done) + _ = group.Wait() + }() + + select { + case <-done: + // finished + case <-time.After(500 * time.Millisecond): + // timed out, the Wait context will be cancelled by + t.Fatalf("timed out waiting for producers and consumers") + } + + for svc, c := range consumers { + require.NotEmpty(t, c.seenByIndex) + + // Note that store.states[svc].idsByIndex does not assert against a slice of expectations because + // the index that the different events will arrive in the simulation is not deterministic. + require.Equal(t, store.states[svc].idsByIndex, c.seenByIndex) + } +} + +// stateMap is a map keyed by service to the state of the store at different indexes +type stateMap struct { + mu sync.Mutex + states map[string]*serviceState +} + +type store struct { + *stateMap + + pub *stream.EventPublisher +} + +// simulateUpdates will publish events and also store the state at each index for later assertions. +func (s *store) simulateUpdates(ctx context.Context, events []map[string]stream.Event) { + idx := uint64(0) + + for _, m := range events { + if ctx.Err() != nil { + return + } + + for svc, event := range m { + idx++ + event.Index = idx + s.pub.Publish([]stream.Event{event}) + + s.stateMap.mu.Lock() + svcState, ok := s.states[svc] + if !ok { + svcState = &serviceState{ + current: make(map[string]*structs.CheckServiceNode), + idsByIndex: make(map[uint64][]string), + } + s.states[svc] = svcState + } + s.stateMap.mu.Unlock() + + svcState.mu.Lock() + svcState.idx = idx + + // Updating the svcState.current map allows us to capture snapshots from a stream of add/delete events. + payload := event.Payload.(state.EventPayloadCheckServiceNode) + switch payload.Op { + case pbsubscribe.CatalogOp_Register: + svcState.current[payload.Value.Service.ID] = payload.Value + default: + // If not a registration it must be a deregistration: + delete(svcState.current, payload.Value.Service.ID) + } + + svcState.idsByIndex[idx] = serviceIDsFromMap(svcState.current) + svcState.mu.Unlock() + + delay := time.Duration(rand.Intn(25)) * time.Millisecond + time.Sleep(5*time.Millisecond + delay) + } + } +} + +func serviceIDsFromMap(m map[string]*structs.CheckServiceNode) []string { + var result []string + for id := range m { + result = append(result, id) + } + sort.Strings(result) + return result +} + +type snapshotHandler struct { + *stateMap +} + +type serviceState struct { + mu sync.Mutex + idx uint64 + + // The current snapshot of data, given the observed events. + current map[string]*structs.CheckServiceNode + + // The list of service IDs seen at each index that an update was received for the given service name. + idsByIndex map[uint64][]string +} + +// Snapshot dumps the currently registered service instances. +// +// Snapshot implements stream.SnapshotFunc. +func (s *snapshotHandler) Snapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) { + s.stateMap.mu.Lock() + svcState, ok := s.states[req.Subject.String()] + if !ok { + svcState = &serviceState{ + current: make(map[string]*structs.CheckServiceNode), + idsByIndex: make(map[uint64][]string), + } + s.states[req.Subject.String()] = svcState + } + s.stateMap.mu.Unlock() + + svcState.mu.Lock() + defer svcState.mu.Unlock() + + for _, node := range svcState.current { + event := stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Index: svcState.idx, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: node, + }, + } + buf.Append([]stream.Event{event}) + } + return svcState.idx, nil +} + +type consumer struct { + viewStore *submatview.Store + publisher *stream.EventPublisher + seenByIndex map[uint64][]string +} + +func (c *consumer) consume(ctx context.Context, service string, countExpected int) error { + group, gctx := errgroup.WithContext(ctx) + updateCh := make(chan cache.UpdateEvent, 10) + + group.Go(func() error { + sr := newExportedServiceRequest(hclog.New(nil), structs.NewServiceName(service, nil), c.publisher) + return c.viewStore.Notify(gctx, sr, "", updateCh) + }) + group.Go(func() error { + var n int + for { + if n >= countExpected { + return nil + } + select { + case u := <-updateCh: + // Each update contains the current snapshot of registered services. + c.seenByIndex[u.Meta.Index] = serviceIDsFromUpdates(u) + n++ + + case <-gctx.Done(): + return nil + } + } + }) + return group.Wait() +} + +func serviceIDsFromUpdates(u cache.UpdateEvent) []string { + var result []string + for _, node := range u.Result.(*pbservice.IndexedCheckServiceNodes).Nodes { + result = append(result, node.Service.ID) + } + sort.Strings(result) + return result +} diff --git a/agent/rpc/peering/testing.go b/agent/rpc/peering/testing.go new file mode 100644 index 000000000..ffa24ea71 --- /dev/null +++ b/agent/rpc/peering/testing.go @@ -0,0 +1,199 @@ +package peering + +import ( + "context" + "io" + "sync" + "testing" + "time" + + "google.golang.org/grpc/metadata" + + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +// same certificate that appears in our connect tests +var validCA = ` +-----BEGIN CERTIFICATE----- +MIICmDCCAj6gAwIBAgIBBzAKBggqhkjOPQQDAjAWMRQwEgYDVQQDEwtDb25zdWwg +Q0EgNzAeFw0xODA1MjExNjMzMjhaFw0yODA1MTgxNjMzMjhaMBYxFDASBgNVBAMT +C0NvbnN1bCBDQSA3MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAER0qlxjnRcMEr +iSGlH7G7dYU7lzBEmLUSMZkyBbClmyV8+e8WANemjn+PLnCr40If9cmpr7RnC9Qk +GTaLnLiF16OCAXswggF3MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/ +MGgGA1UdDgRhBF8xZjo5MTpjYTo0MTo4ZjphYzo2NzpiZjo1OTpjMjpmYTo0ZTo3 +NTo1YzpkODpmMDo1NTpkZTpiZTo3NTpiODozMzozMTpkNToyNDpiMDowNDpiMzpl +ODo5Nzo1Yjo3ZTBqBgNVHSMEYzBhgF8xZjo5MTpjYTo0MTo4ZjphYzo2NzpiZjo1 +OTpjMjpmYTo0ZTo3NTo1YzpkODpmMDo1NTpkZTpiZTo3NTpiODozMzozMTpkNToy +NDpiMDowNDpiMzplODo5Nzo1Yjo3ZTA/BgNVHREEODA2hjRzcGlmZmU6Ly8xMjRk +ZjVhMC05ODIwLTc2YzMtOWFhOS02ZjYyMTY0YmExYzIuY29uc3VsMD0GA1UdHgEB +/wQzMDGgLzAtgisxMjRkZjVhMC05ODIwLTc2YzMtOWFhOS02ZjYyMTY0YmExYzIu +Y29uc3VsMAoGCCqGSM49BAMCA0gAMEUCIQDzkkI7R+0U12a+zq2EQhP/n2mHmta+ +fs2hBxWIELGwTAIgLdO7RRw+z9nnxCIA6kNl//mIQb+PGItespiHZKAz74Q= +-----END CERTIFICATE----- +` +var invalidCA = ` +-----BEGIN CERTIFICATE----- +not valid +-----END CERTIFICATE----- +` + +var validAddress = "1.2.3.4:80" + +var validServerName = "server.consul" + +var validPeerID = "peer1" + +// TODO(peering): the test methods below are exposed to prevent duplication, +// these should be removed at same time tests in peering_test get refactored. +// XXX: we can't put the existing tests in service_test.go into the peering +// package because it causes an import cycle by importing the top-level consul +// package (which correctly imports the agent/rpc/peering package) + +// TestPeering is a test utility for generating a pbpeering.Peering with valid +// data along with the peerName, state and index. +func TestPeering(peerName string, state pbpeering.PeeringState) *pbpeering.Peering { + return &pbpeering.Peering{ + Name: peerName, + PeerCAPems: []string{validCA}, + PeerServerAddresses: []string{validAddress}, + PeerServerName: validServerName, + State: state, + // uncomment once #1613 lands + // PeerID: validPeerID + } +} + +// TestPeeringToken is a test utility for generating a valid peering token +// with the given peerID for use in test cases +func TestPeeringToken(peerID string) structs.PeeringToken { + return structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{validAddress}, + ServerName: validServerName, + PeerID: peerID, + } +} + +type mockClient struct { + mu sync.Mutex + errCh chan error + + replicationStream *mockStream +} + +func (c *mockClient) Send(r *pbpeering.ReplicationMessage) error { + c.replicationStream.recvCh <- r + return nil +} + +func (c *mockClient) Recv() (*pbpeering.ReplicationMessage, error) { + select { + case err := <-c.errCh: + return nil, err + case r := <-c.replicationStream.sendCh: + return r, nil + case <-time.After(10 * time.Millisecond): + return nil, io.EOF + } +} + +func (c *mockClient) RecvWithTimeout(dur time.Duration) (*pbpeering.ReplicationMessage, error) { + select { + case err := <-c.errCh: + return nil, err + case r := <-c.replicationStream.sendCh: + return r, nil + case <-time.After(dur): + return nil, io.EOF + } +} + +func (c *mockClient) Close() { + close(c.replicationStream.recvCh) +} + +func newMockClient(ctx context.Context) *mockClient { + return &mockClient{ + replicationStream: newTestReplicationStream(ctx), + } +} + +// mockStream mocks peering.PeeringService_StreamResourcesServer +type mockStream struct { + sendCh chan *pbpeering.ReplicationMessage + recvCh chan *pbpeering.ReplicationMessage + + ctx context.Context + mu sync.Mutex +} + +var _ pbpeering.PeeringService_StreamResourcesServer = (*mockStream)(nil) + +func newTestReplicationStream(ctx context.Context) *mockStream { + return &mockStream{ + sendCh: make(chan *pbpeering.ReplicationMessage, 1), + recvCh: make(chan *pbpeering.ReplicationMessage, 1), + ctx: ctx, + } +} + +// Send implements pbpeering.PeeringService_StreamResourcesServer +func (s *mockStream) Send(r *pbpeering.ReplicationMessage) error { + s.sendCh <- r + return nil +} + +// Recv implements pbpeering.PeeringService_StreamResourcesServer +func (s *mockStream) Recv() (*pbpeering.ReplicationMessage, error) { + r := <-s.recvCh + if r == nil { + return nil, io.EOF + } + return r, nil +} + +// Context implements grpc.ServerStream and grpc.ClientStream +func (s *mockStream) Context() context.Context { + return s.ctx +} + +// SendMsg implements grpc.ServerStream and grpc.ClientStream +func (s *mockStream) SendMsg(m interface{}) error { + return nil +} + +// RecvMsg implements grpc.ServerStream and grpc.ClientStream +func (s *mockStream) RecvMsg(m interface{}) error { + return nil +} + +// SetHeader implements grpc.ServerStream +func (s *mockStream) SetHeader(metadata.MD) error { + return nil +} + +// SendHeader implements grpc.ServerStream +func (s *mockStream) SendHeader(metadata.MD) error { + return nil +} + +// SetTrailer implements grpc.ServerStream +func (s *mockStream) SetTrailer(metadata.MD) {} + +type incrementalTime struct { + base time.Time + next uint64 +} + +func (t *incrementalTime) Now() time.Time { + t.next++ + return t.base.Add(time.Duration(t.next) * time.Second) +} + +func runStep(t *testing.T, name string, fn func(t *testing.T)) { + t.Helper() + if !t.Run(name, fn) { + t.FailNow() + } +} diff --git a/agent/rpc/peering/testutil_oss_test.go b/agent/rpc/peering/testutil_oss_test.go new file mode 100644 index 000000000..4aac92cad --- /dev/null +++ b/agent/rpc/peering/testutil_oss_test.go @@ -0,0 +1,16 @@ +//go:build !consulent +// +build !consulent + +package peering_test + +import ( + "testing" + + "github.com/hashicorp/consul/agent/consul" + "github.com/hashicorp/go-hclog" +) + +func newDefaultDepsEnterprise(t *testing.T, logger hclog.Logger, c *consul.Config) consul.EnterpriseDeps { + t.Helper() + return consul.EnterpriseDeps{} +} diff --git a/agent/rpc/peering/validate.go b/agent/rpc/peering/validate.go new file mode 100644 index 000000000..466b61a6e --- /dev/null +++ b/agent/rpc/peering/validate.go @@ -0,0 +1,62 @@ +package peering + +import ( + "fmt" + "net" + "strconv" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + + // TODO: replace this with net/netip when we upgrade to go1.18 + "inet.af/netaddr" +) + +// validatePeeringToken ensures that the token has valid values. +func validatePeeringToken(tok *structs.PeeringToken) error { + if len(tok.CA) == 0 { + return errPeeringTokenEmptyCA + } + + // the CA values here should be valid x509 certs + for _, certStr := range tok.CA { + // TODO(peering): should we put these in a cert pool on the token? + // maybe there's a better place to do the parsing? + if _, err := connect.ParseCert(certStr); err != nil { + return fmt.Errorf("peering token invalid CA: %w", err) + } + } + + if len(tok.ServerAddresses) == 0 { + return errPeeringTokenEmptyServerAddresses + } + for _, addr := range tok.ServerAddresses { + host, portRaw, err := net.SplitHostPort(addr) + if err != nil { + return &errPeeringInvalidServerAddress{addr} + } + + port, err := strconv.Atoi(portRaw) + if err != nil { + return &errPeeringInvalidServerAddress{addr} + } + if port < 1 || port > 65535 { + return &errPeeringInvalidServerAddress{addr} + } + if _, err := netaddr.ParseIP(host); err != nil { + return &errPeeringInvalidServerAddress{addr} + } + } + + // TODO(peering): validate name matches SNI? + // TODO(peering): validate name well formed? + if tok.ServerName == "" { + return errPeeringTokenEmptyServerName + } + + if tok.PeerID == "" { + return errPeeringTokenEmptyPeerID + } + + return nil +} diff --git a/agent/rpc/peering/validate_test.go b/agent/rpc/peering/validate_test.go new file mode 100644 index 000000000..e3b1cbf7d --- /dev/null +++ b/agent/rpc/peering/validate_test.go @@ -0,0 +1,107 @@ +package peering + +import ( + "errors" + "testing" + + "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/require" +) + +func TestValidatePeeringToken(t *testing.T) { + type testCase struct { + name string + token *structs.PeeringToken + wantErr error + } + + tt := []testCase{ + { + name: "empty", + token: &structs.PeeringToken{}, + wantErr: errPeeringTokenEmptyCA, + }, + { + name: "empty CA", + token: &structs.PeeringToken{ + CA: []string{}, + }, + wantErr: errPeeringTokenEmptyCA, + }, + { + name: "invalid CA", + token: &structs.PeeringToken{ + CA: []string{"notavalidcert"}, + }, + wantErr: errors.New("peering token invalid CA: no PEM-encoded data found"), + }, + { + name: "invalid CA cert", + token: &structs.PeeringToken{ + CA: []string{invalidCA}, + }, + wantErr: errors.New("peering token invalid CA: x509: malformed certificate"), + }, + { + name: "invalid address port", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{"1.2.3.4"}, + }, + wantErr: &errPeeringInvalidServerAddress{ + "1.2.3.4", + }, + }, + { + name: "invalid address IP", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{"foo.bar.baz"}, + }, + wantErr: &errPeeringInvalidServerAddress{ + "foo.bar.baz", + }, + }, + { + name: "invalid server name", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{"1.2.3.4:80"}, + }, + wantErr: errPeeringTokenEmptyServerName, + }, + { + name: "invalid peer ID", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{validAddress}, + ServerName: validServerName, + }, + wantErr: errPeeringTokenEmptyPeerID, + }, + { + name: "valid token", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{validAddress}, + ServerName: validServerName, + PeerID: validPeerID, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + err := validatePeeringToken(tc.token) + if tc.wantErr != nil { + if err == nil { + t.Error("expected error but got nil") + return + } + require.Contains(t, err.Error(), tc.wantErr.Error()) + return + } + require.NoError(t, err) + }) + } +} diff --git a/agent/rpcclient/health/health.go b/agent/rpcclient/health/health.go index 004101144..828e28494 100644 --- a/agent/rpcclient/health/health.go +++ b/agent/rpcclient/health/health.go @@ -133,15 +133,16 @@ func (r serviceRequest) Type() string { return "agent.rpcclient.health.serviceRequest" } -func (r serviceRequest) NewMaterializer() (*submatview.Materializer, error) { +func (r serviceRequest) NewMaterializer() (submatview.Materializer, error) { view, err := newHealthView(r.ServiceSpecificRequest) if err != nil { return nil, err } - return submatview.NewMaterializer(submatview.Deps{ + deps := submatview.Deps{ View: view, - Client: pbsubscribe.NewStateChangeSubscriptionClient(r.deps.Conn), Logger: r.deps.Logger, Request: newMaterializerRequest(r.ServiceSpecificRequest), - }), nil + } + + return submatview.NewRPCMaterializer(pbsubscribe.NewStateChangeSubscriptionClient(r.deps.Conn), deps), nil } diff --git a/agent/rpcclient/health/view_test.go b/agent/rpcclient/health/view_test.go index c2a7ea79b..137a9986a 100644 --- a/agent/rpcclient/health/view_test.go +++ b/agent/rpcclient/health/view_test.go @@ -537,17 +537,17 @@ type serviceRequestStub struct { streamClient submatview.StreamClient } -func (r serviceRequestStub) NewMaterializer() (*submatview.Materializer, error) { +func (r serviceRequestStub) NewMaterializer() (submatview.Materializer, error) { view, err := newHealthView(r.ServiceSpecificRequest) if err != nil { return nil, err } - return submatview.NewMaterializer(submatview.Deps{ + deps := submatview.Deps{ View: view, - Client: r.streamClient, Logger: hclog.New(nil), Request: newMaterializerRequest(r.ServiceSpecificRequest), - }), nil + } + return submatview.NewRPCMaterializer(r.streamClient, deps), nil } func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsubscribe.Event { diff --git a/agent/structs/config_entry_export_oss_test.go b/agent/structs/config_entry_export_oss_test.go new file mode 100644 index 000000000..4015f5d71 --- /dev/null +++ b/agent/structs/config_entry_export_oss_test.go @@ -0,0 +1,62 @@ +//go:build !consulent +// +build !consulent + +package structs + +import ( + "testing" +) + +func TestExportedServicesConfigEntry_OSS(t *testing.T) { + cases := map[string]configEntryTestcase{ + "normalize: noop in oss": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "web", + Consumers: []ServiceConsumer{ + { + PeerName: "bar", + }, + }, + }, + }, + }, + expected: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "web", + Namespace: "", + Consumers: []ServiceConsumer{ + { + PeerName: "bar", + }, + }, + }, + }, + }, + }, + "validate: empty name": { + entry: &ExportedServicesConfigEntry{ + Name: "", + }, + validateErr: `exported-services Name must be "default"`, + }, + "validate: wildcard name": { + entry: &ExportedServicesConfigEntry{ + Name: WildcardSpecifier, + }, + validateErr: `exported-services Name must be "default"`, + }, + "validate: other name": { + entry: &ExportedServicesConfigEntry{ + Name: "foo", + }, + validateErr: `exported-services Name must be "default"`, + }, + } + + testConfigEntryNormalizeAndValidate(t, cases) +} diff --git a/agent/structs/config_entry_exports.go b/agent/structs/config_entry_exports.go index 8a184cc39..e7e33c54d 100644 --- a/agent/structs/config_entry_exports.go +++ b/agent/structs/config_entry_exports.go @@ -35,9 +35,14 @@ type ExportedService struct { } // ServiceConsumer represents a downstream consumer of the service to be exported. +// At most one of Partition or PeerName must be specified. type ServiceConsumer struct { // Partition is the admin partition to export the service to. + // Deprecated: PeerName should be used for both remote peers and local partitions. Partition string + + // PeerName is the name of the peer to export the service to. + PeerName string } func (e *ExportedServicesConfigEntry) ToMap() map[string]map[string][]string { @@ -99,37 +104,40 @@ func (e *ExportedServicesConfigEntry) Normalize() error { e.EnterpriseMeta.Normalize() for i := range e.Services { - e.Services[i].Namespace = acl.NamespaceOrDefault(e.Services[i].Namespace) + e.Services[i].Namespace = acl.NormalizeNamespace(e.Services[i].Namespace) } return nil } func (e *ExportedServicesConfigEntry) Validate() error { - if e.Name == "" { - return fmt.Errorf("Name is required") - } - if e.Name == WildcardSpecifier { - return fmt.Errorf("exported-services Name must be the name of a partition, and not a wildcard") - } - - if err := requireEnterprise(e.GetKind()); err != nil { + if err := validateExportedServicesName(e.Name); err != nil { return err } + if err := validateConfigEntryMeta(e.Meta); err != nil { return err } - for _, svc := range e.Services { + for i, svc := range e.Services { if svc.Name == "" { - return fmt.Errorf("service name cannot be empty") + return fmt.Errorf("Services[%d]: service name cannot be empty", i) + } + if svc.Namespace == WildcardSpecifier && svc.Name != WildcardSpecifier { + return fmt.Errorf("Services[%d]: service name must be wildcard if namespace is wildcard", i) } if len(svc.Consumers) == 0 { - return fmt.Errorf("service %q must have at least one consumer", svc.Name) + return fmt.Errorf("Services[%d]: must have at least one consumer", i) } - for _, consumer := range svc.Consumers { + for j, consumer := range svc.Consumers { + if consumer.PeerName != "" && consumer.Partition != "" { + return fmt.Errorf("Services[%d].Consumers[%d]: must define at most one of PeerName or Partition", i, j) + } if consumer.Partition == WildcardSpecifier { - return fmt.Errorf("exporting to all partitions (wildcard) is not yet supported") + return fmt.Errorf("Services[%d].Consumers[%d]: exporting to all partitions (wildcard) is not supported", i, j) + } + if consumer.PeerName == WildcardSpecifier { + return fmt.Errorf("Services[%d].Consumers[%d]: exporting to all peers (wildcard) is not supported", i, j) } } } diff --git a/agent/structs/config_entry_exports_test.go b/agent/structs/config_entry_exports_test.go new file mode 100644 index 000000000..db0aadb91 --- /dev/null +++ b/agent/structs/config_entry_exports_test.go @@ -0,0 +1,94 @@ +package structs + +import ( + "testing" +) + +func TestExportedServicesConfigEntry(t *testing.T) { + cases := map[string]configEntryTestcase{ + "validate: empty service name": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "", + }, + }, + }, + validateErr: `service name cannot be empty`, + }, + "validate: empty consumer list": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "web", + }, + }, + }, + validateErr: `must have at least one consumer`, + }, + "validate: no wildcard in consumer partition": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "api", + Consumers: []ServiceConsumer{ + { + Partition: "foo", + }, + }, + }, + { + Name: "web", + Consumers: []ServiceConsumer{ + { + Partition: "*", + }, + }, + }, + }, + }, + validateErr: `Services[1].Consumers[0]: exporting to all partitions (wildcard) is not supported`, + }, + "validate: no wildcard in consumer peername": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "web", + Consumers: []ServiceConsumer{ + { + PeerName: "foo", + }, + { + PeerName: "*", + }, + }, + }, + }, + }, + validateErr: `Services[0].Consumers[1]: exporting to all peers (wildcard) is not supported`, + }, + "validate: cannot specify consumer with partition and peername": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "web", + Consumers: []ServiceConsumer{ + { + Partition: "foo", + PeerName: "bar", + }, + }, + }, + }, + }, + validateErr: `Services[0].Consumers[0]: must define at most one of PeerName or Partition`, + }, + } + + testConfigEntryNormalizeAndValidate(t, cases) +} diff --git a/agent/structs/config_entry_oss.go b/agent/structs/config_entry_oss.go index 2cd1db7ac..4bd3a93fc 100644 --- a/agent/structs/config_entry_oss.go +++ b/agent/structs/config_entry_oss.go @@ -38,6 +38,9 @@ func validateInnerEnterpriseMeta(_, _ *acl.EnterpriseMeta) error { return nil } -func requireEnterprise(kind string) error { - return fmt.Errorf("Config entry kind %q requires Consul Enterprise", kind) +func validateExportedServicesName(name string) error { + if name != "default" { + return fmt.Errorf(`exported-services Name must be "default"`) + } + return nil } diff --git a/agent/structs/peering.go b/agent/structs/peering.go new file mode 100644 index 000000000..16235fd86 --- /dev/null +++ b/agent/structs/peering.go @@ -0,0 +1,9 @@ +package structs + +// PeeringToken identifies a peer in order for a connection to be established. +type PeeringToken struct { + CA []string + ServerAddresses []string + ServerName string + PeerID string +} diff --git a/agent/structs/prepared_query.go b/agent/structs/prepared_query.go index b6028cead..440053f0b 100644 --- a/agent/structs/prepared_query.go +++ b/agent/structs/prepared_query.go @@ -79,6 +79,10 @@ type ServiceQuery struct { // should be directly next to their services so this isn't an issue. Connect bool + // If not empty, PeerName represents the peer that the service + // was imported from. + PeerName string + // EnterpriseMeta is the embedded enterprise metadata acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 7a06aeeda..88e5a9b21 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -14,11 +14,10 @@ import ( "strings" "time" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/duration" "github.com/golang/protobuf/ptypes/timestamp" - - "github.com/golang/protobuf/proto" - ptypes "github.com/golang/protobuf/ptypes" "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" "github.com/hashicorp/go-multierror" "github.com/hashicorp/serf/coordinate" @@ -78,6 +77,29 @@ const ( ServiceVirtualIPRequestType = 32 FreeVirtualIPRequestType = 33 KindServiceNamesType = 34 + PeeringWriteType = 35 + PeeringDeleteType = 36 + PeeringTerminateByIDType = 37 + PeeringTrustBundleWriteType = 38 + PeeringTrustBundleDeleteType = 39 +) + +const ( + // LocalPeerKeyword is a reserved keyword used for indexing in the state store for objects in the local peer. + LocalPeerKeyword = "internal" + + // DefaultPeerKeyword is the PeerName to use to refer to the local + // cluster's own data, rather than replicated peered data. + // + // This may internally be converted into LocalPeerKeyword, but external + // uses should not use that symbol directly in most cases. + DefaultPeerKeyword = "" + + // TODOPeerKeyword is the peer keyword to use if you aren't sure if the + // usage SHOULD be peering-aware yet. + // + // TODO(peering): remove this in the future + TODOPeerKeyword = "" ) // if a new request type is added above it must be @@ -121,6 +143,10 @@ var requestTypeStrings = map[MessageType]string{ ServiceVirtualIPRequestType: "ServiceVirtualIP", FreeVirtualIPRequestType: "FreeVirtualIP", KindServiceNamesType: "KindServiceName", + PeeringWriteType: "Peering", + PeeringDeleteType: "PeeringDelete", + PeeringTrustBundleWriteType: "PeeringTrustBundle", + PeeringTrustBundleDeleteType: "PeeringTrustBundleDelete", } const ( @@ -440,6 +466,8 @@ type RegisterRequest struct { // node portion of this update will not apply. SkipNodeUpdate bool + PeerName string + // EnterpriseMeta is the embedded enterprise metadata acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` @@ -470,6 +498,7 @@ func (r *RegisterRequest) ChangesNode(node *Node) bool { if r.ID != node.ID || !strings.EqualFold(r.Node, node.Node) || r.PartitionOrDefault() != node.PartitionOrDefault() || + r.PeerName != node.PeerName || r.Address != node.Address || r.Datacenter != node.Datacenter || !reflect.DeepEqual(r.TaggedAddresses, node.TaggedAddresses) || @@ -490,6 +519,7 @@ type DeregisterRequest struct { Node string ServiceID string CheckID types.CheckID + PeerName string acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` WriteRequest } @@ -553,6 +583,7 @@ type DCSpecificRequest struct { Datacenter string NodeMetaFilters map[string]string Source QuerySource + PeerName string acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -565,6 +596,7 @@ func (r *DCSpecificRequest) CacheInfo() cache.RequestInfo { info := cache.RequestInfo{ Token: r.Token, Datacenter: r.Datacenter, + PeerName: r.PeerName, MinIndex: r.MinQueryIndex, Timeout: r.MaxQueryTime, MaxAge: r.MaxAge, @@ -599,6 +631,7 @@ type ServiceDumpRequest struct { UseServiceKind bool Source QuerySource acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + PeerName string QueryOptions } @@ -610,6 +643,7 @@ func (r *ServiceDumpRequest) CacheInfo() cache.RequestInfo { info := cache.RequestInfo{ Token: r.Token, Datacenter: r.Datacenter, + PeerName: r.PeerName, MinIndex: r.MinQueryIndex, Timeout: r.MaxQueryTime, MaxAge: r.MaxAge, @@ -646,7 +680,11 @@ func (r *ServiceDumpRequest) CacheMinIndex() uint64 { // ServiceSpecificRequest is used to query about a specific service type ServiceSpecificRequest struct { - Datacenter string + Datacenter string + + // The name of the peer that the requested service was imported from. + PeerName string + NodeMetaFilters map[string]string ServiceName string ServiceKind ServiceKind @@ -707,6 +745,7 @@ func (r *ServiceSpecificRequest) CacheInfo() cache.RequestInfo { r.Connect, r.Filter, r.EnterpriseMeta, + r.PeerName, r.Ingress, r.ServiceKind, }, nil) @@ -728,6 +767,7 @@ func (r *ServiceSpecificRequest) CacheMinIndex() uint64 { type NodeSpecificRequest struct { Datacenter string Node string + PeerName string acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -761,13 +801,14 @@ func (r *NodeSpecificRequest) CacheInfo() cache.RequestInfo { return info } -// ChecksInStateRequest is used to query for nodes in a state +// ChecksInStateRequest is used to query for checks in a state type ChecksInStateRequest struct { Datacenter string NodeMetaFilters map[string]string State string Source QuerySource + PeerName string acl.EnterpriseMeta `mapstructure:",squash"` QueryOptions } @@ -783,12 +824,17 @@ type Node struct { Address string Datacenter string Partition string `json:",omitempty"` + PeerName string `json:",omitempty"` TaggedAddresses map[string]string Meta map[string]string RaftIndex `bexpr:"-"` } +func (n *Node) PeerOrEmpty() string { + return n.PeerName +} + func (n *Node) GetEnterpriseMeta() *acl.EnterpriseMeta { return NodeEnterpriseMetaInPartition(n.Partition) } @@ -814,6 +860,7 @@ func (n *Node) IsSame(other *Node) bool { return n.ID == other.ID && strings.EqualFold(n.Node, other.Node) && n.PartitionOrDefault() == other.PartitionOrDefault() && + strings.EqualFold(n.PeerName, other.PeerName) && n.Address == other.Address && n.Datacenter == other.Datacenter && reflect.DeepEqual(n.TaggedAddresses, other.TaggedAddresses) && @@ -932,11 +979,18 @@ type ServiceNode struct { ServiceProxy ConnectProxyConfig ServiceConnect ServiceConnect + // If not empty, PeerName represents the peer that this ServiceNode was imported from. + PeerName string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` RaftIndex `bexpr:"-"` } +func (s *ServiceNode) PeerOrEmpty() string { + return s.PeerName +} + // PartialClone() returns a clone of the given service node, minus the node- // related fields that get filled in later, Address and TaggedAddresses. func (s *ServiceNode) PartialClone() *ServiceNode { @@ -978,6 +1032,7 @@ func (s *ServiceNode) PartialClone() *ServiceNode { ModifyIndex: s.ModifyIndex, }, EnterpriseMeta: s.EnterpriseMeta, + PeerName: s.PeerName, } } @@ -997,6 +1052,7 @@ func (s *ServiceNode) ToNodeService() *NodeService { EnableTagOverride: s.ServiceEnableTagOverride, Proxy: s.ServiceProxy, Connect: s.ServiceConnect, + PeerName: s.PeerName, EnterpriseMeta: s.EnterpriseMeta, RaftIndex: RaftIndex{ CreateIndex: s.CreateIndex, @@ -1142,6 +1198,9 @@ type NodeService struct { acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` + // If not empty, PeerName represents the peer that the NodeService was imported from. + PeerName string + RaftIndex `bexpr:"-"` } @@ -1426,6 +1485,7 @@ func (s *NodeService) IsSame(other *NodeService) bool { s.Kind != other.Kind || !reflect.DeepEqual(s.Proxy, other.Proxy) || s.Connect != other.Connect || + s.PeerName != other.PeerName || !s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) { return false } @@ -1497,6 +1557,7 @@ func (s *NodeService) ToServiceNode(node string) *ServiceNode { ServiceProxy: s.Proxy, ServiceConnect: s.Connect, EnterpriseMeta: s.EnterpriseMeta, + PeerName: s.PeerName, RaftIndex: RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, @@ -1538,6 +1599,10 @@ type HealthCheck struct { // HTTP or GRPC health check of the service. ExposedPort int + // PeerName is the name of the peer the check was imported from. + // It is empty if the check was registered locally. + PeerName string `json:",omitempty"` + Definition HealthCheckDefinition `bexpr:"-"` acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` @@ -1545,6 +1610,10 @@ type HealthCheck struct { RaftIndex `bexpr:"-"` } +func (hc *HealthCheck) PeerOrEmpty() string { + return hc.PeerName +} + func (hc *HealthCheck) NodeIdentity() Identity { return Identity{ ID: hc.Node, @@ -1702,6 +1771,7 @@ func (c *HealthCheck) IsSame(other *HealthCheck) bool { c.ServiceName != other.ServiceName || !reflect.DeepEqual(c.ServiceTags, other.ServiceTags) || !reflect.DeepEqual(c.Definition, other.Definition) || + c.PeerName != other.PeerName || !c.EnterpriseMeta.IsSame(&other.EnterpriseMeta) { return false } @@ -1876,6 +1946,7 @@ type NodeInfo struct { ID types.NodeID Node string Partition string `json:",omitempty"` + PeerName string `json:",omitempty"` Address string TaggedAddresses map[string]string Meta map[string]string diff --git a/agent/structs/structs_filtering_test.go b/agent/structs/structs_filtering_test.go index b094cf5bd..93d51c5b1 100644 --- a/agent/structs/structs_filtering_test.go +++ b/agent/structs/structs_filtering_test.go @@ -53,7 +53,7 @@ func TestPointerStructure(t *testing.T) { require.Equal(t, "1.1.1.1", val) } -/////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // // NOTE: The tests within this file are designed to validate that the fields // that will be available for filtering for various data types in the @@ -61,7 +61,7 @@ func TestPointerStructure(t *testing.T) { // to update this file to get the tests passing again then you definitely // should update the documentation as well. // -/////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// type fieldConfigTest struct { dataType interface{} @@ -309,6 +309,11 @@ var expectedFieldConfigNode bexpr.FieldConfigurations = bexpr.FieldConfiguration CoerceFn: bexpr.CoerceString, SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, }, + "PeerName": &bexpr.FieldConfiguration{ + StructFieldName: "PeerName", + CoerceFn: bexpr.CoerceString, + SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, + }, "Address": &bexpr.FieldConfiguration{ StructFieldName: "Address", CoerceFn: bexpr.CoerceString, @@ -408,6 +413,11 @@ var expectedFieldConfigNodeService bexpr.FieldConfigurations = bexpr.FieldConfig StructFieldName: "ServiceConnect", SubFields: expectedFieldConfigServiceConnect, }, + "PeerName": &bexpr.FieldConfiguration{ + StructFieldName: "PeerName", + CoerceFn: bexpr.CoerceString, + SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, + }, } var expectedFieldConfigServiceNode bexpr.FieldConfigurations = bexpr.FieldConfigurations{ @@ -507,6 +517,11 @@ var expectedFieldConfigServiceNode bexpr.FieldConfigurations = bexpr.FieldConfig StructFieldName: "ServiceConnect", SubFields: expectedFieldConfigServiceConnect, }, + "PeerName": &bexpr.FieldConfiguration{ + StructFieldName: "PeerName", + CoerceFn: bexpr.CoerceString, + SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, + }, } var expectedFieldConfigHealthCheck bexpr.FieldConfigurations = bexpr.FieldConfigurations{ @@ -578,6 +593,11 @@ var expectedFieldConfigHealthCheck bexpr.FieldConfigurations = bexpr.FieldConfig SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual}, StructFieldName: "ExposedPort", }, + "PeerName": &bexpr.FieldConfiguration{ + CoerceFn: bexpr.CoerceString, + SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, + StructFieldName: "PeerName", + }, } var expectedFieldConfigCheckServiceNode bexpr.FieldConfigurations = bexpr.FieldConfigurations{ @@ -612,6 +632,11 @@ var expectedFieldConfigNodeInfo bexpr.FieldConfigurations = bexpr.FieldConfigura CoerceFn: bexpr.CoerceString, SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, }, + "PeerName": &bexpr.FieldConfiguration{ + StructFieldName: "PeerName", + CoerceFn: bexpr.CoerceString, + SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, + }, "Address": &bexpr.FieldConfiguration{ StructFieldName: "Address", CoerceFn: bexpr.CoerceString, diff --git a/agent/structs/structs_test.go b/agent/structs/structs_test.go index 57711184b..cee4cff2e 100644 --- a/agent/structs/structs_test.go +++ b/agent/structs/structs_test.go @@ -1908,6 +1908,8 @@ func TestServiceDumpRequest_CacheInfoKey(t *testing.T) { var cacheInfoIgnoredFields = map[string]bool{ // Datacenter is part of the cache key added by the cache itself. "Datacenter": true, + // PeerName is part of the cache key added by the cache itself. + "PeerName": true, // QuerySource is always the same for every request from a single agent, so it // is excluded from the key. "Source": true, diff --git a/agent/submatview/local_materializer.go b/agent/submatview/local_materializer.go new file mode 100644 index 000000000..b65c2f074 --- /dev/null +++ b/agent/submatview/local_materializer.go @@ -0,0 +1,109 @@ +package submatview + +import ( + "context" + "errors" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/lib/retry" + "github.com/hashicorp/consul/proto/pbsubscribe" +) + +// LocalMaterializer is a materializer for a stream of events +// and manages the local subscription to the event publisher +// until the cache result is discarded when its TTL expires. +type LocalMaterializer struct { + deps Deps + backend LocalBackend + retryWaiter *retry.Waiter + handler eventHandler + + mat *materializer +} + +var _ Materializer = (*LocalMaterializer)(nil) + +type LocalBackend interface { + Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) +} + +func NewLocalMaterializer(backend LocalBackend, deps Deps) *LocalMaterializer { + m := LocalMaterializer{ + backend: backend, + deps: deps, + mat: newMaterializer(deps.Logger, deps.View, deps.Waiter), + } + return &m +} + +// Query implements Materializer +func (m *LocalMaterializer) Query(ctx context.Context, minIndex uint64) (Result, error) { + return m.mat.query(ctx, minIndex) +} + +// Run receives events from a local subscription backend and sends them to the View. +// It runs until ctx is cancelled, so it is expected to be run in a goroutine. +// Mirrors implementation of RPCMaterializer. +// +// Run implements Materializer +func (m *LocalMaterializer) Run(ctx context.Context) { + for { + req := m.deps.Request(m.mat.currentIndex()) + err := m.subscribeOnce(ctx, req) + if ctx.Err() != nil { + return + } + m.mat.handleError(req, err) + + if err := m.mat.retryWaiter.Wait(ctx); err != nil { + return + } + } +} + +// subscribeOnce opens a new subscription to a local backend and runs +// for its lifetime or until the view is closed. +func (m *LocalMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + m.handler = initialHandler(req.Index) + + entMeta := acl.NewEnterpriseMetaWithPartition(req.Partition, req.Namespace) + sub, err := m.backend.Subscribe(state.PBToStreamSubscribeRequest(req, entMeta)) + if err != nil { + return err + } + defer sub.Unsubscribe() + + for { + event, err := sub.Next(ctx) + switch { + case errors.Is(err, stream.ErrSubForceClosed): + m.deps.Logger.Trace("subscription reset by server") + return err + + case err != nil: + return err + } + + e := event.Payload.ToSubscriptionEvent(event.Index) + m.handler, err = m.handler(m, e) + if err != nil { + m.mat.reset() + return err + } + } +} + +// updateView implements viewState +func (m *LocalMaterializer) updateView(events []*pbsubscribe.Event, index uint64) error { + return m.mat.updateView(events, index) +} + +// reset implements viewState +func (m *LocalMaterializer) reset() { + m.mat.reset() +} diff --git a/agent/submatview/materializer.go b/agent/submatview/materializer.go index 3b870d9e1..aa312d060 100644 --- a/agent/submatview/materializer.go +++ b/agent/submatview/materializer.go @@ -6,9 +6,6 @@ import ( "time" "github.com/hashicorp/go-hclog" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "github.com/hashicorp/consul/lib/retry" "github.com/hashicorp/consul/proto/pbsubscribe" @@ -38,16 +35,27 @@ type View interface { Reset() } -// Materializer consumes the event stream, handling any framing events, and -// sends the events to View as they are received. -// -// Materializer is used as the cache.Result.State for a streaming -// cache type and manages the actual streaming RPC call to the servers behind -// the scenes until the cache result is discarded when TTL expires. -type Materializer struct { - deps Deps +// Result returned from the View. +type Result struct { + Index uint64 + Value interface{} + // Cached is true if the requested value was already available locally. If + // the value is false, it indicates that GetFromView had to wait for an update, + Cached bool +} + +type Deps struct { + View View + Logger hclog.Logger + Waiter *retry.Waiter + Request func(index uint64) *pbsubscribe.SubscribeRequest +} + +// materializer consumes the event stream, handling any framing events, and +// allows for querying the materialized view. +type materializer struct { retryWaiter *retry.Waiter - handler eventHandler + logger hclog.Logger // lock protects the mutable state - all fields below it must only be accessed // while holding lock. @@ -58,175 +66,22 @@ type Materializer struct { err error } -type Deps struct { - View View - Client StreamClient - Logger hclog.Logger - Waiter *retry.Waiter - Request func(index uint64) *pbsubscribe.SubscribeRequest -} - -// StreamClient provides a subscription to state change events. -type StreamClient interface { - Subscribe(ctx context.Context, in *pbsubscribe.SubscribeRequest, opts ...grpc.CallOption) (pbsubscribe.StateChangeSubscription_SubscribeClient, error) -} - -// NewMaterializer returns a new Materializer. Run must be called to start it. -func NewMaterializer(deps Deps) *Materializer { - v := &Materializer{ - deps: deps, - view: deps.View, - retryWaiter: deps.Waiter, +func newMaterializer(logger hclog.Logger, view View, waiter *retry.Waiter) *materializer { + m := materializer{ + view: view, + retryWaiter: waiter, + logger: logger, updateCh: make(chan struct{}), } - if v.retryWaiter == nil { - v.retryWaiter = &retry.Waiter{ - MinFailures: 1, - // Start backing off with small increments (200-400ms) which will double - // each attempt. (200-400, 400-800, 800-1600, 1600-3200, 3200-6000, 6000 - // after that). (retry.Wait applies Max limit after jitter right now). - Factor: 200 * time.Millisecond, - MinWait: 0, - MaxWait: 60 * time.Second, - Jitter: retry.NewJitter(100), - } + if m.retryWaiter == nil { + m.retryWaiter = defaultWaiter() } - return v + return &m } -// Run receives events from the StreamClient and sends them to the View. It runs -// until ctx is cancelled, so it is expected to be run in a goroutine. -func (m *Materializer) Run(ctx context.Context) { - for { - req := m.deps.Request(m.index) - err := m.runSubscription(ctx, req) - if ctx.Err() != nil { - return - } - - failures := m.retryWaiter.Failures() - if isNonTemporaryOrConsecutiveFailure(err, failures) { - m.lock.Lock() - m.notifyUpdateLocked(err) - m.lock.Unlock() - } - - m.deps.Logger.Error("subscribe call failed", - "err", err, - "topic", req.Topic, - "key", req.Key, - "failure_count", failures+1) - - if err := m.retryWaiter.Wait(ctx); err != nil { - return - } - } -} - -// isNonTemporaryOrConsecutiveFailure returns true if the error is not a -// temporary error or if failures > 0. -func isNonTemporaryOrConsecutiveFailure(err error, failures int) bool { - // temporary is an interface used by net and other std lib packages to - // show error types represent temporary/recoverable errors. - temp, ok := err.(interface { - Temporary() bool - }) - return !ok || !temp.Temporary() || failures > 0 -} - -// runSubscription opens a new subscribe streaming call to the servers and runs -// for it's lifetime or until the view is closed. -func (m *Materializer) runSubscription(ctx context.Context, req *pbsubscribe.SubscribeRequest) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - m.handler = initialHandler(req.Index) - - s, err := m.deps.Client.Subscribe(ctx, req) - if err != nil { - return err - } - - for { - event, err := s.Recv() - switch { - case isGrpcStatus(err, codes.Aborted): - m.reset() - return resetErr("stream reset requested") - case err != nil: - return err - } - - m.handler, err = m.handler(m, event) - if err != nil { - m.reset() - return err - } - } -} - -func isGrpcStatus(err error, code codes.Code) bool { - s, ok := status.FromError(err) - return ok && s.Code() == code -} - -// resetErr represents a server request to reset the subscription, it's typed so -// we can mark it as temporary and so attempt to retry first time without -// notifying clients. -type resetErr string - -// Temporary Implements the internal Temporary interface -func (e resetErr) Temporary() bool { - return true -} - -// Error implements error -func (e resetErr) Error() string { - return string(e) -} - -// reset clears the state ready to start a new stream from scratch. -func (m *Materializer) reset() { - m.lock.Lock() - defer m.lock.Unlock() - - m.view.Reset() - m.index = 0 -} - -func (m *Materializer) updateView(events []*pbsubscribe.Event, index uint64) error { - m.lock.Lock() - defer m.lock.Unlock() - - if err := m.view.Update(events); err != nil { - return err - } - m.index = index - m.notifyUpdateLocked(nil) - m.retryWaiter.Reset() - return nil -} - -// notifyUpdateLocked closes the current update channel and recreates a new -// one. It must be called while holding the s.lock lock. -func (m *Materializer) notifyUpdateLocked(err error) { - m.err = err - close(m.updateCh) - m.updateCh = make(chan struct{}) -} - -// Result returned from the View. -type Result struct { - Index uint64 - Value interface{} - // Cached is true if the requested value was already available locally. If - // the value is false, it indicates that getFromView had to wait for an update, - Cached bool -} - -// getFromView blocks until the index of the View is greater than opts.MinIndex, -//or the context is cancelled. -func (m *Materializer) getFromView(ctx context.Context, minIndex uint64) (Result, error) { +// Query blocks until the index of the View is greater than opts.MinIndex, +// or the context is cancelled. +func (m *materializer) query(ctx context.Context, minIndex uint64) (Result, error) { m.lock.Lock() result := Result{ @@ -278,3 +133,85 @@ func (m *Materializer) getFromView(ctx context.Context, minIndex uint64) (Result } } } + +func (m *materializer) currentIndex() uint64 { + var resp uint64 + + m.lock.Lock() + resp = m.index + m.lock.Unlock() + + return resp +} + +// notifyUpdateLocked closes the current update channel and recreates a new +// one. It must be called while holding the m.lock lock. +func (m *materializer) notifyUpdateLocked(err error) { + m.err = err + close(m.updateCh) + m.updateCh = make(chan struct{}) +} + +// reset clears the state ready to start a new stream from scratch. +func (m *materializer) reset() { + m.lock.Lock() + defer m.lock.Unlock() + + m.view.Reset() + m.index = 0 +} + +// updateView updates the view from a sequence of events and stores +// the corresponding Raft index. +func (m *materializer) updateView(events []*pbsubscribe.Event, index uint64) error { + m.lock.Lock() + defer m.lock.Unlock() + + if err := m.view.Update(events); err != nil { + return err + } + + m.index = index + m.notifyUpdateLocked(nil) + m.retryWaiter.Reset() + return nil +} + +func (m *materializer) handleError(req *pbsubscribe.SubscribeRequest, err error) { + failures := m.retryWaiter.Failures() + if isNonTemporaryOrConsecutiveFailure(err, failures) { + m.lock.Lock() + m.notifyUpdateLocked(err) + m.lock.Unlock() + } + + m.logger.Error("subscribe call failed", + "err", err, + "topic", req.Topic, + "key", req.Key, + "failure_count", failures+1) +} + +// isNonTemporaryOrConsecutiveFailure returns true if the error is not a +// temporary error or if failures > 0. +func isNonTemporaryOrConsecutiveFailure(err error, failures int) bool { + // temporary is an interface used by net and other std lib packages to + // show error types represent temporary/recoverable errors. + temp, ok := err.(interface { + Temporary() bool + }) + return !ok || !temp.Temporary() || failures > 0 +} + +func defaultWaiter() *retry.Waiter { + return &retry.Waiter{ + MinFailures: 1, + // Start backing off with small increments (200-400ms) which will double + // each attempt. (200-400, 400-800, 800-1600, 1600-3200, 3200-6000, 6000 + // after that). (retry.Wait applies Max limit after jitter right now). + Factor: 200 * time.Millisecond, + MinWait: 0, + MaxWait: 60 * time.Second, + Jitter: retry.NewJitter(100), + } +} diff --git a/agent/submatview/rpc_materializer.go b/agent/submatview/rpc_materializer.go new file mode 100644 index 000000000..3b379d4e8 --- /dev/null +++ b/agent/submatview/rpc_materializer.go @@ -0,0 +1,125 @@ +package submatview + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hashicorp/consul/proto/pbsubscribe" +) + +// RPCMaterializer is a materializer for a streaming cache type +// and manages the actual streaming RPC call to the servers behind +// the scenes until the cache result is discarded when its TTL expires. +type RPCMaterializer struct { + deps Deps + client StreamClient + handler eventHandler + + mat *materializer +} + +var _ Materializer = (*RPCMaterializer)(nil) + +// StreamClient provides a subscription to state change events. +type StreamClient interface { + Subscribe(ctx context.Context, in *pbsubscribe.SubscribeRequest, opts ...grpc.CallOption) (pbsubscribe.StateChangeSubscription_SubscribeClient, error) +} + +// NewRPCMaterializer returns a new Materializer. Run must be called to start it. +func NewRPCMaterializer(client StreamClient, deps Deps) *RPCMaterializer { + m := RPCMaterializer{ + deps: deps, + client: client, + mat: newMaterializer(deps.Logger, deps.View, deps.Waiter), + } + return &m +} + +// Query implements Materializer +func (m *RPCMaterializer) Query(ctx context.Context, minIndex uint64) (Result, error) { + return m.mat.query(ctx, minIndex) +} + +// Run receives events from the StreamClient and sends them to the View. It runs +// until ctx is cancelled, so it is expected to be run in a goroutine. +// Mirrors implementation of LocalMaterializer +// +// Run implements Materializer +func (m *RPCMaterializer) Run(ctx context.Context) { + for { + req := m.deps.Request(m.mat.currentIndex()) + err := m.subscribeOnce(ctx, req) + if ctx.Err() != nil { + return + } + m.mat.handleError(req, err) + + if err := m.mat.retryWaiter.Wait(ctx); err != nil { + return + } + } +} + +// subscribeOnce opens a new subscribe streaming call to the servers and runs +// for its lifetime or until the view is closed. +func (m *RPCMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + m.handler = initialHandler(req.Index) + + s, err := m.client.Subscribe(ctx, req) + if err != nil { + return err + } + + for { + event, err := s.Recv() + switch { + case isGrpcStatus(err, codes.Aborted): + m.mat.reset() + return resetErr("stream reset requested") + case err != nil: + return err + } + + m.handler, err = m.handler(m, event) + if err != nil { + m.mat.reset() + return err + } + } +} + +func isGrpcStatus(err error, code codes.Code) bool { + s, ok := status.FromError(err) + return ok && s.Code() == code +} + +// resetErr represents a server request to reset the subscription, it's typed so +// we can mark it as temporary and so attempt to retry first time without +// notifying clients. +type resetErr string + +// Temporary Implements the internal Temporary interface +func (e resetErr) Temporary() bool { + return true +} + +// Error implements error +func (e resetErr) Error() string { + return string(e) +} + +// updateView implements viewState +func (m *RPCMaterializer) updateView(events []*pbsubscribe.Event, index uint64) error { + return m.mat.updateView(events, index) +} + +// reset implements viewState +func (m *RPCMaterializer) reset() { + m.mat.reset() +} diff --git a/agent/submatview/store.go b/agent/submatview/store.go index 07363f740..0b2734793 100644 --- a/agent/submatview/store.go +++ b/agent/submatview/store.go @@ -34,8 +34,14 @@ type Store struct { idleTTL time.Duration } +// A Materializer maintains a materialized view of a subscription on an event stream. +type Materializer interface { + Run(ctx context.Context) + Query(ctx context.Context, minIndex uint64) (Result, error) +} + type entry struct { - materializer *Materializer + materializer Materializer expiry *ttlcache.Entry stop func() // requests is the count of active requests using this entry. This entry will @@ -100,7 +106,7 @@ type Request interface { // NewMaterializer will be called if there is no active materializer to fulfil // the request. It should return a Materializer appropriate for streaming // data to fulfil this request. - NewMaterializer() (*Materializer, error) + NewMaterializer() (Materializer, error) // Type should return a string which uniquely identifies this type of request. // The returned value is used as the prefix of the key used to index // entries in the Store. @@ -124,7 +130,7 @@ func (s *Store) Get(ctx context.Context, req Request) (Result, error) { defer cancel() } - result, err := materializer.getFromView(ctx, info.MinIndex) + result, err := materializer.Query(ctx, info.MinIndex) // context.DeadlineExceeded is translated to nil to match the timeout // behaviour of agent/cache.Cache.Get. if err == nil || errors.Is(err, context.DeadlineExceeded) { @@ -155,7 +161,7 @@ func (s *Store) Notify( index := info.MinIndex for { - result, err := materializer.getFromView(ctx, index) + result, err := materializer.Query(ctx, index) switch { case ctx.Err() != nil: return @@ -185,7 +191,7 @@ func (s *Store) Notify( // readEntry from the store, and increment the requests counter. releaseEntry // must be called when the request is finished to decrement the counter. -func (s *Store) readEntry(req Request) (string, *Materializer, error) { +func (s *Store) readEntry(req Request) (string, Materializer, error) { info := req.CacheInfo() key := makeEntryKey(req.Type(), info) diff --git a/agent/submatview/store_integration_test.go b/agent/submatview/store_integration_test.go index e8247b818..72eed5a5f 100644 --- a/agent/submatview/store_integration_test.go +++ b/agent/submatview/store_integration_test.go @@ -253,7 +253,6 @@ func (e *eventProducer) Produce(ctx context.Context, pub *stream.EventPublisher) }, }, } - } e.nodesLock.Lock() diff --git a/agent/submatview/store_test.go b/agent/submatview/store_test.go index b177380d3..bdbc576c7 100644 --- a/agent/submatview/store_test.go +++ b/agent/submatview/store_test.go @@ -24,7 +24,7 @@ func TestStore_Get(t *testing.T) { store := NewStore(hclog.New(nil)) go store.Run(ctx) - req := &fakeRequest{ + req := &fakeRPCRequest{ client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents( @@ -199,14 +199,14 @@ type resultOrError struct { Err error } -type fakeRequest struct { +type fakeRPCRequest struct { index uint64 timeout time.Duration key string client *TestStreamingClient } -func (r *fakeRequest) CacheInfo() cache.RequestInfo { +func (r *fakeRPCRequest) CacheInfo() cache.RequestInfo { key := r.key if key == "" { key = "key" @@ -220,10 +220,9 @@ func (r *fakeRequest) CacheInfo() cache.RequestInfo { } } -func (r *fakeRequest) NewMaterializer() (*Materializer, error) { - return NewMaterializer(Deps{ +func (r *fakeRPCRequest) NewMaterializer() (Materializer, error) { + deps := Deps{ View: &fakeView{srvs: make(map[string]*pbservice.CheckServiceNode)}, - Client: r.client, Logger: hclog.New(nil), Request: func(index uint64) *pbsubscribe.SubscribeRequest { req := &pbsubscribe.SubscribeRequest{ @@ -236,10 +235,11 @@ func (r *fakeRequest) NewMaterializer() (*Materializer, error) { } return req }, - }), nil + } + return NewRPCMaterializer(r.client, deps), nil } -func (r *fakeRequest) Type() string { +func (r *fakeRPCRequest) Type() string { return fmt.Sprintf("%T", r) } @@ -291,7 +291,7 @@ func TestStore_Notify(t *testing.T) { store := NewStore(hclog.New(nil)) go store.Run(ctx) - req := &fakeRequest{ + req := &fakeRPCRequest{ client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents( @@ -360,7 +360,7 @@ func TestStore_Notify_ManyRequests(t *testing.T) { store := NewStore(hclog.New(nil)) go store.Run(ctx) - req := &fakeRequest{ + req := &fakeRPCRequest{ client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents(newEndOfSnapshotEvent(2)) @@ -393,13 +393,13 @@ func TestStore_Notify_ManyRequests(t *testing.T) { assertRequestCount(r, store, req, 4) }) - var req2 *fakeRequest + var req2 *fakeRPCRequest runStep(t, "Get and Notify with a different key", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - req2 = &fakeRequest{client: req.client, key: "key2", index: 22} + req2 = &fakeRPCRequest{client: req.client, key: "key2", index: 22} require.NoError(t, store.Notify(ctx, req2, cID, ch1)) go func() { @@ -472,7 +472,7 @@ func TestStore_Run_ExpiresEntries(t *testing.T) { store.idleTTL = ttl go store.Run(ctx) - req := &fakeRequest{ + req := &fakeRPCRequest{ client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents(newEndOfSnapshotEvent(2)) diff --git a/agent/testagent.go b/agent/testagent.go index 11ca9a518..4dbf859bc 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -87,8 +87,6 @@ type TestAgent struct { // NewTestAgent returns a started agent with the given configuration. It fails // the test if the Agent could not be started. -// The caller is responsible for calling Shutdown() to stop the agent and remove -// temporary directories. func NewTestAgent(t *testing.T, hcl string) *TestAgent { a := StartTestAgent(t, TestAgent{HCL: hcl}) t.Cleanup(func() { a.Shutdown() }) diff --git a/api/agent.go b/api/agent.go index 7bbe39ea7..f69c697c5 100644 --- a/api/agent.go +++ b/api/agent.go @@ -92,6 +92,7 @@ type AgentService struct { ContentHash string `json:",omitempty" bexpr:"-"` Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` Connect *AgentServiceConnect `json:",omitempty"` + PeerName string `json:",omitempty"` // NOTE: If we ever set the ContentHash outside of singular service lookup then we may need // to include the Namespace in the hash. When we do, then we are in for lots of fun with tests. // For now though, ignoring it works well enough. diff --git a/api/agent_test.go b/api/agent_test.go index 6b5c97e76..0c1660b1e 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -784,7 +784,7 @@ func TestAPI_AgentService(t *testing.T) { ID: "foo", Service: "foo", Tags: []string{"bar", "baz"}, - ContentHash: "f72563cae6924fb5", + ContentHash: "3e352f348d44f7eb", Port: 8000, Weights: AgentWeights{ Passing: 1, diff --git a/api/config_entry_exports.go b/api/config_entry_exports.go index ae9cb2ff6..14a021f64 100644 --- a/api/config_entry_exports.go +++ b/api/config_entry_exports.go @@ -44,9 +44,14 @@ type ExportedService struct { } // ServiceConsumer represents a downstream consumer of the service to be exported. +// At most one of Partition or PeerName must be specified. type ServiceConsumer struct { // Partition is the admin partition to export the service to. + // Deprecated: PeerName should be used for both remote peers and local partitions. Partition string + + // PeerName is the name of the peer to export the service to. + PeerName string } func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices } diff --git a/api/config_entry_exports_test.go b/api/config_entry_exports_test.go new file mode 100644 index 000000000..e1df48f75 --- /dev/null +++ b/api/config_entry_exports_test.go @@ -0,0 +1,102 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAPI_ConfigEntries_ExportedServices(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + entries := c.ConfigEntries() + + runStep(t, "set and get", func(t *testing.T) { + exports := &ExportedServicesConfigEntry{ + Name: PartitionDefaultName, + Partition: defaultPartition, + Meta: map[string]string{ + "gir": "zim", + }, + } + + _, wm, err := entries.Set(exports, nil) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotEqual(t, 0, wm.RequestTime) + + entry, qm, err := entries.Get(ExportedServices, PartitionDefaultName, nil) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotEqual(t, 0, qm.RequestTime) + + result, ok := entry.(*ExportedServicesConfigEntry) + require.True(t, ok) + + // ignore indexes + result.CreateIndex = 0 + result.ModifyIndex = 0 + require.Equal(t, exports, result) + }) + + runStep(t, "update", func(t *testing.T) { + updated := &ExportedServicesConfigEntry{ + Name: PartitionDefaultName, + Services: []ExportedService{ + { + Name: "db", + Namespace: defaultNamespace, + Consumers: []ServiceConsumer{ + { + PeerName: "alpha", + }, + }, + }, + }, + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, + Partition: defaultPartition, + } + + _, wm, err := entries.Set(updated, nil) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotEqual(t, 0, wm.RequestTime) + + entry, qm, err := entries.Get(ExportedServices, PartitionDefaultName, nil) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotEqual(t, 0, qm.RequestTime) + + result, ok := entry.(*ExportedServicesConfigEntry) + require.True(t, ok) + + // ignore indexes + result.CreateIndex = 0 + result.ModifyIndex = 0 + require.Equal(t, updated, result) + }) + + runStep(t, "list", func(t *testing.T) { + entries, qm, err := entries.List(ExportedServices, nil) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotEqual(t, 0, qm.RequestTime) + require.Len(t, entries, 1) + }) + + runStep(t, "delete", func(t *testing.T) { + wm, err := entries.Delete(ExportedServices, PartitionDefaultName, nil) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotEqual(t, 0, wm.RequestTime) + + // verify deletion + _, _, err = entries.Get(MeshConfig, PartitionDefaultName, nil) + require.Error(t, err) + }) +} diff --git a/go.mod b/go.mod index eb848e555..15e82d5fe 100644 --- a/go.mod +++ b/go.mod @@ -96,6 +96,7 @@ require ( google.golang.org/protobuf v1.25.0 gopkg.in/square/go-jose.v2 v2.5.1 gotest.tools/v3 v3.0.3 + inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 k8s.io/api v0.18.2 k8s.io/apimachinery v0.18.2 k8s.io/client-go v0.18.2 diff --git a/go.sum b/go.sum index bbb6a03a1..1e6194da7 100644 --- a/go.sum +++ b/go.sum @@ -160,6 +160,7 @@ github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0 h1:ZoRgc53qJCfSLimXqJDrmBhnt5GChDsExMCK7t48o0Y= github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -634,6 +635,10 @@ go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE= +go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 h1:Tx9kY6yUkLge/pFG7IEMwDZy6CS2ajFc9TvQdPCW0uA= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -785,6 +790,7 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -853,8 +859,9 @@ golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -972,6 +979,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 h1:acCzuUSQ79tGsM/O50VRFySfMm19IoMKL+sZztZkCxw= +inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6/go.mod h1:y3MGhcFMlh0KZPMuXXow8mpjxxAk3yoDNsp4cQz54i8= k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= diff --git a/logging/names.go b/logging/names.go index 17db364af..015e4a0fc 100644 --- a/logging/names.go +++ b/logging/names.go @@ -50,6 +50,7 @@ const ( Sentinel string = "sentinel" Snapshot string = "snapshot" Partition string = "partition" + Peering string = "peering" TerminatingGateway string = "terminating_gateway" TLSUtil string = "tlsutil" Transaction string = "txn" diff --git a/proto/pbpeering/generate.go b/proto/pbpeering/generate.go new file mode 100644 index 000000000..1f060a6cf --- /dev/null +++ b/proto/pbpeering/generate.go @@ -0,0 +1,9 @@ +// TODO: files generated from this go:generate may fail the CI check because of relative source. +// Figure out a way to robustly use this file. +//go:generate protoc --gofast_out=. --gofast_opt=paths=source_relative --go-binary_out=. peering.proto +// requires: +// - protoc +// - github.com/gogo/protobuf/protoc-gen-gofast +// - github.com/hashicorp/protoc-gen-go-binary + +package pbpeering diff --git a/proto/pbpeering/peering.go b/proto/pbpeering/peering.go new file mode 100644 index 000000000..5a7cea91b --- /dev/null +++ b/proto/pbpeering/peering.go @@ -0,0 +1,202 @@ +package pbpeering + +import "time" + +// TODO(peering): These are byproducts of not embedding +// types in our protobuf definitions and are temporary; +// Hoping to replace them with 1 or 2 methods per request +// using https://github.com/hashicorp/consul/pull/12507 + +func (msg *PeeringReadRequest) RequestDatacenter() string { + return msg.Datacenter +} + +func (msg *PeeringReadRequest) IsRead() bool { + return true +} + +func (msg *PeeringReadRequest) AllowStaleRead() bool { + return false +} + +func (msg *PeeringReadRequest) TokenSecret() string { + return "" +} + +func (msg *PeeringReadRequest) SetTokenSecret(s string) { + return +} + +func (msg *PeeringReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *PeeringReadRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +func (msg *PeeringListRequest) RequestDatacenter() string { + return msg.Datacenter +} + +func (msg *PeeringListRequest) IsRead() bool { + return true +} + +func (msg *PeeringListRequest) AllowStaleRead() bool { + return false +} + +func (msg *PeeringListRequest) TokenSecret() string { + return "" +} + +func (msg *PeeringListRequest) SetTokenSecret(s string) { + return +} + +func (msg *PeeringListRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *PeeringListRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +func (msg *PeeringWriteRequest) RequestDatacenter() string { + return msg.Datacenter +} + +func (msg *PeeringWriteRequest) IsRead() bool { + return false +} + +func (msg *PeeringWriteRequest) AllowStaleRead() bool { + return false +} + +func (msg *PeeringWriteRequest) TokenSecret() string { + return "" +} + +func (msg *PeeringWriteRequest) SetTokenSecret(s string) { + return +} + +func (msg *PeeringWriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *PeeringWriteRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +func (msg *PeeringDeleteRequest) RequestDatacenter() string { + return msg.Datacenter +} + +func (msg *PeeringDeleteRequest) IsRead() bool { + return false +} + +func (msg *PeeringDeleteRequest) AllowStaleRead() bool { + return false +} + +func (msg *PeeringDeleteRequest) TokenSecret() string { + return "" +} + +func (msg *PeeringDeleteRequest) SetTokenSecret(s string) { + return +} + +func (msg *PeeringDeleteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *PeeringDeleteRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +// RequestDatacenter implements structs.RPCInfo +func (req *GenerateTokenRequest) RequestDatacenter() string { + return req.Datacenter +} + +// IsRead implements structs.RPCInfo +func (req *GenerateTokenRequest) IsRead() bool { + return false +} + +// AllowStaleRead implements structs.RPCInfo +func (req *GenerateTokenRequest) AllowStaleRead() bool { + return false +} + +// TokenSecret implements structs.RPCInfo +func (req *GenerateTokenRequest) TokenSecret() string { + return req.Token +} + +// SetTokenSecret implements structs.RPCInfo +func (req *GenerateTokenRequest) SetTokenSecret(token string) { + req.Token = token +} + +// HasTimedOut implements structs.RPCInfo +func (req *GenerateTokenRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *GenerateTokenRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +// RequestDatacenter implements structs.RPCInfo +func (req *InitiateRequest) RequestDatacenter() string { + return req.Datacenter +} + +// IsRead implements structs.RPCInfo +func (req *InitiateRequest) IsRead() bool { + return false +} + +// AllowStaleRead implements structs.RPCInfo +func (req *InitiateRequest) AllowStaleRead() bool { + return false +} + +// TokenSecret implements structs.RPCInfo +func (req *InitiateRequest) TokenSecret() string { + return req.Token +} + +// SetTokenSecret implements structs.RPCInfo +func (req *InitiateRequest) SetTokenSecret(token string) { + req.Token = token +} + +// HasTimedOut implements structs.RPCInfo +func (req *InitiateRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *InitiateRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +// ShouldDial returns true when the peering was stored via the peering initiation endpoint, +// AND the peering is not marked as terminated by our peer. +// If we generated a token for this peer we did not store our server addresses under PeerServerAddresses. +// These server addresses are for dialing, and only the peer initiating the peering will do the dialing. +func (p *Peering) ShouldDial() bool { + return len(p.PeerServerAddresses) > 0 && p.State != PeeringState_TERMINATED +} diff --git a/proto/pbpeering/peering.pb.binary.go b/proto/pbpeering/peering.pb.binary.go new file mode 100644 index 000000000..e7e755778 --- /dev/null +++ b/proto/pbpeering/peering.pb.binary.go @@ -0,0 +1,248 @@ +// Code generated by protoc-gen-go-binary. DO NOT EDIT. +// source: proto/pbpeering/peering.proto + +package pbpeering + +import ( + "github.com/golang/protobuf/proto" +) + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *Peering) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *Peering) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTrustBundle) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTrustBundle) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringReadRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringReadRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringReadResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringReadResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringListRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringListRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringListResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringListResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringWriteRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringWriteRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringWriteResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringWriteResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringDeleteRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringDeleteRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringDeleteResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringDeleteResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTerminateByIDRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTerminateByIDRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTerminateByIDResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTerminateByIDResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTrustBundleWriteRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTrustBundleWriteRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTrustBundleWriteResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTrustBundleWriteResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTrustBundleDeleteRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTrustBundleDeleteRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTrustBundleDeleteResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTrustBundleDeleteResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *GenerateTokenRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *GenerateTokenRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *GenerateTokenResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *GenerateTokenResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *InitiateRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *InitiateRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *InitiateResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *InitiateResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ReplicationMessage) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ReplicationMessage) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ReplicationMessage_Request) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ReplicationMessage_Request) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ReplicationMessage_Response) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ReplicationMessage_Response) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ReplicationMessage_Terminated) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ReplicationMessage_Terminated) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto/pbpeering/peering.pb.go b/proto/pbpeering/peering.pb.go new file mode 100644 index 000000000..902b8c28c --- /dev/null +++ b/proto/pbpeering/peering.pb.go @@ -0,0 +1,2569 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 +// source: proto/pbpeering/peering.proto + +package pbpeering + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + pbstatus "github.com/hashicorp/consul/proto/pbstatus" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// PeeringState enumerates all the states a peering can be in +type PeeringState int32 + +const ( + // Undefined represents an unset value for PeeringState during + // writes. + PeeringState_UNDEFINED PeeringState = 0 + // Initial means a Peering has been initialized and is awaiting + // acknowledgement from a remote peer. + PeeringState_INITIAL PeeringState = 1 + // Active means that the peering connection is active and healthy. + PeeringState_ACTIVE PeeringState = 2 + // Failing means the peering connection has been interrupted but has not yet + // been terminated. + PeeringState_FAILING PeeringState = 3 + // Terminated means the peering relationship has been removed. + PeeringState_TERMINATED PeeringState = 4 +) + +// Enum value maps for PeeringState. +var ( + PeeringState_name = map[int32]string{ + 0: "UNDEFINED", + 1: "INITIAL", + 2: "ACTIVE", + 3: "FAILING", + 4: "TERMINATED", + } + PeeringState_value = map[string]int32{ + "UNDEFINED": 0, + "INITIAL": 1, + "ACTIVE": 2, + "FAILING": 3, + "TERMINATED": 4, + } +) + +func (x PeeringState) Enum() *PeeringState { + p := new(PeeringState) + *p = x + return p +} + +func (x PeeringState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PeeringState) Descriptor() protoreflect.EnumDescriptor { + return file_proto_pbpeering_peering_proto_enumTypes[0].Descriptor() +} + +func (PeeringState) Type() protoreflect.EnumType { + return &file_proto_pbpeering_peering_proto_enumTypes[0] +} + +func (x PeeringState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PeeringState.Descriptor instead. +func (PeeringState) EnumDescriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{0} +} + +// Operation enumerates supported operations for replicated resources. +type ReplicationMessage_Response_Operation int32 + +const ( + ReplicationMessage_Response_Unknown ReplicationMessage_Response_Operation = 0 + // UPSERT represents a create or update event. + ReplicationMessage_Response_UPSERT ReplicationMessage_Response_Operation = 1 + // DELETE indicates the resource should be deleted. + // In DELETE operations no Resource will be returned. + // Deletion by an importing peer must be done with the type URL and ID. + ReplicationMessage_Response_DELETE ReplicationMessage_Response_Operation = 2 +) + +// Enum value maps for ReplicationMessage_Response_Operation. +var ( + ReplicationMessage_Response_Operation_name = map[int32]string{ + 0: "Unknown", + 1: "UPSERT", + 2: "DELETE", + } + ReplicationMessage_Response_Operation_value = map[string]int32{ + "Unknown": 0, + "UPSERT": 1, + "DELETE": 2, + } +) + +func (x ReplicationMessage_Response_Operation) Enum() *ReplicationMessage_Response_Operation { + p := new(ReplicationMessage_Response_Operation) + *p = x + return p +} + +func (x ReplicationMessage_Response_Operation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReplicationMessage_Response_Operation) Descriptor() protoreflect.EnumDescriptor { + return file_proto_pbpeering_peering_proto_enumTypes[1].Descriptor() +} + +func (ReplicationMessage_Response_Operation) Type() protoreflect.EnumType { + return &file_proto_pbpeering_peering_proto_enumTypes[1] +} + +func (x ReplicationMessage_Response_Operation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReplicationMessage_Response_Operation.Descriptor instead. +func (ReplicationMessage_Response_Operation) EnumDescriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{20, 1, 0} +} + +// Peering defines a peering relationship between two disparate Consul clusters +type Peering struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is a datacenter-scoped UUID for the peering. + // The ID is generated when a peering is first written to the state store. + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Name is the local alias for the peering relationship. + Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` + // Partition is the local partition connecting to the peer. + Partition string `protobuf:"bytes,3,opt,name=Partition,proto3" json:"Partition,omitempty"` + // State is one of the valid PeeringState values to represent the status of + // peering relationship. + State PeeringState `protobuf:"varint,4,opt,name=State,proto3,enum=pbpeering.PeeringState" json:"State,omitempty"` + // PeerID is the ID that our peer assigned to this peering. + // This ID is to be used when dialing the peer, so that it can know who dialed it. + PeerID string `protobuf:"bytes,5,opt,name=PeerID,proto3" json:"PeerID,omitempty"` + // PeerCAPems contains all the CA certificates for the remote peer. + PeerCAPems []string `protobuf:"bytes,6,rep,name=PeerCAPems,proto3" json:"PeerCAPems,omitempty"` + // PeerServerName is the name of the remote server as it relates to TLS. + PeerServerName string `protobuf:"bytes,7,opt,name=PeerServerName,proto3" json:"PeerServerName,omitempty"` + // PeerServerAddresses contains all the the connection addresses for the remote peer. + PeerServerAddresses []string `protobuf:"bytes,8,rep,name=PeerServerAddresses,proto3" json:"PeerServerAddresses,omitempty"` + // CreateIndex is the Raft index at which the Peering was created. + CreateIndex uint64 `protobuf:"varint,9,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty"` + // ModifyIndex is the latest Raft index at which the Peering. was modified. + ModifyIndex uint64 `protobuf:"varint,10,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty"` +} + +func (x *Peering) Reset() { + *x = Peering{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Peering) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Peering) ProtoMessage() {} + +func (x *Peering) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Peering.ProtoReflect.Descriptor instead. +func (*Peering) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{0} +} + +func (x *Peering) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Peering) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Peering) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *Peering) GetState() PeeringState { + if x != nil { + return x.State + } + return PeeringState_UNDEFINED +} + +func (x *Peering) GetPeerID() string { + if x != nil { + return x.PeerID + } + return "" +} + +func (x *Peering) GetPeerCAPems() []string { + if x != nil { + return x.PeerCAPems + } + return nil +} + +func (x *Peering) GetPeerServerName() string { + if x != nil { + return x.PeerServerName + } + return "" +} + +func (x *Peering) GetPeerServerAddresses() []string { + if x != nil { + return x.PeerServerAddresses + } + return nil +} + +func (x *Peering) GetCreateIndex() uint64 { + if x != nil { + return x.CreateIndex + } + return 0 +} + +func (x *Peering) GetModifyIndex() uint64 { + if x != nil { + return x.ModifyIndex + } + return 0 +} + +// PeeringTrustBundle holds the trust information for validating requests from a peer. +type PeeringTrustBundle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // TrustDomain is the domain for the bundle, example.com, foo.bar.gov for example. Note that this must not have a prefix such as "spiffe://". + TrustDomain string `protobuf:"bytes,1,opt,name=TrustDomain,proto3" json:"TrustDomain,omitempty"` + // PeerName associates the trust bundle with a peer. + PeerName string `protobuf:"bytes,2,opt,name=PeerName,proto3" json:"PeerName,omitempty"` + // Partition isolates the bundle from other trust bundles in separate partitions. + Partition string `protobuf:"bytes,3,opt,name=Partition,proto3" json:"Partition,omitempty"` + // RootPEMs holds ASN.1 DER encoded X.509 certificate data for the trust bundle. + RootPEMs []string `protobuf:"bytes,4,rep,name=RootPEMs,proto3" json:"RootPEMs,omitempty"` + // CreateIndex is the Raft index at which the trust domain was created. + CreateIndex uint64 `protobuf:"varint,5,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty"` + // ModifyIndex is the latest Raft index at which the trust bundle was modified. + ModifyIndex uint64 `protobuf:"varint,6,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty"` +} + +func (x *PeeringTrustBundle) Reset() { + *x = PeeringTrustBundle{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTrustBundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTrustBundle) ProtoMessage() {} + +func (x *PeeringTrustBundle) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTrustBundle.ProtoReflect.Descriptor instead. +func (*PeeringTrustBundle) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{1} +} + +func (x *PeeringTrustBundle) GetTrustDomain() string { + if x != nil { + return x.TrustDomain + } + return "" +} + +func (x *PeeringTrustBundle) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + +func (x *PeeringTrustBundle) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *PeeringTrustBundle) GetRootPEMs() []string { + if x != nil { + return x.RootPEMs + } + return nil +} + +func (x *PeeringTrustBundle) GetCreateIndex() uint64 { + if x != nil { + return x.CreateIndex + } + return 0 +} + +func (x *PeeringTrustBundle) GetModifyIndex() uint64 { + if x != nil { + return x.ModifyIndex + } + return 0 +} + +type PeeringReadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringReadRequest) Reset() { + *x = PeeringReadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringReadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringReadRequest) ProtoMessage() {} + +func (x *PeeringReadRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringReadRequest.ProtoReflect.Descriptor instead. +func (*PeeringReadRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{2} +} + +func (x *PeeringReadRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PeeringReadRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *PeeringReadRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +type PeeringReadResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Peering *Peering `protobuf:"bytes,1,opt,name=Peering,proto3" json:"Peering,omitempty"` +} + +func (x *PeeringReadResponse) Reset() { + *x = PeeringReadResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringReadResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringReadResponse) ProtoMessage() {} + +func (x *PeeringReadResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringReadResponse.ProtoReflect.Descriptor instead. +func (*PeeringReadResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{3} +} + +func (x *PeeringReadResponse) GetPeering() *Peering { + if x != nil { + return x.Peering + } + return nil +} + +type PeeringListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Partition string `protobuf:"bytes,1,opt,name=Partition,proto3" json:"Partition,omitempty"` + Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringListRequest) Reset() { + *x = PeeringListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringListRequest) ProtoMessage() {} + +func (x *PeeringListRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringListRequest.ProtoReflect.Descriptor instead. +func (*PeeringListRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{4} +} + +func (x *PeeringListRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *PeeringListRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +type PeeringListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Peerings []*Peering `protobuf:"bytes,1,rep,name=Peerings,proto3" json:"Peerings,omitempty"` +} + +func (x *PeeringListResponse) Reset() { + *x = PeeringListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringListResponse) ProtoMessage() {} + +func (x *PeeringListResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringListResponse.ProtoReflect.Descriptor instead. +func (*PeeringListResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{5} +} + +func (x *PeeringListResponse) GetPeerings() []*Peering { + if x != nil { + return x.Peerings + } + return nil +} + +type PeeringWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Peering *Peering `protobuf:"bytes,1,opt,name=Peering,proto3" json:"Peering,omitempty"` + //TODO(peering): what to do with embedded write request? + Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringWriteRequest) Reset() { + *x = PeeringWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringWriteRequest) ProtoMessage() {} + +func (x *PeeringWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringWriteRequest.ProtoReflect.Descriptor instead. +func (*PeeringWriteRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{6} +} + +func (x *PeeringWriteRequest) GetPeering() *Peering { + if x != nil { + return x.Peering + } + return nil +} + +func (x *PeeringWriteRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +// TODO(peering): Consider returning Peering if we keep this endpoint around +type PeeringWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PeeringWriteResponse) Reset() { + *x = PeeringWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringWriteResponse) ProtoMessage() {} + +func (x *PeeringWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringWriteResponse.ProtoReflect.Descriptor instead. +func (*PeeringWriteResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{7} +} + +type PeeringDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + //TODO(peering): what to do with embedded write request? + Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringDeleteRequest) Reset() { + *x = PeeringDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringDeleteRequest) ProtoMessage() {} + +func (x *PeeringDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringDeleteRequest.ProtoReflect.Descriptor instead. +func (*PeeringDeleteRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{8} +} + +func (x *PeeringDeleteRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PeeringDeleteRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *PeeringDeleteRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +type PeeringDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PeeringDeleteResponse) Reset() { + *x = PeeringDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringDeleteResponse) ProtoMessage() {} + +func (x *PeeringDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringDeleteResponse.ProtoReflect.Descriptor instead. +func (*PeeringDeleteResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{9} +} + +type PeeringTerminateByIDRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (x *PeeringTerminateByIDRequest) Reset() { + *x = PeeringTerminateByIDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTerminateByIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTerminateByIDRequest) ProtoMessage() {} + +func (x *PeeringTerminateByIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTerminateByIDRequest.ProtoReflect.Descriptor instead. +func (*PeeringTerminateByIDRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{10} +} + +func (x *PeeringTerminateByIDRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +type PeeringTerminateByIDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PeeringTerminateByIDResponse) Reset() { + *x = PeeringTerminateByIDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTerminateByIDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTerminateByIDResponse) ProtoMessage() {} + +func (x *PeeringTerminateByIDResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTerminateByIDResponse.ProtoReflect.Descriptor instead. +func (*PeeringTerminateByIDResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{11} +} + +type PeeringTrustBundleWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeeringTrustBundle *PeeringTrustBundle `protobuf:"bytes,1,opt,name=PeeringTrustBundle,proto3" json:"PeeringTrustBundle,omitempty"` + //TODO(peering): what to do with embedded write request? + Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringTrustBundleWriteRequest) Reset() { + *x = PeeringTrustBundleWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTrustBundleWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTrustBundleWriteRequest) ProtoMessage() {} + +func (x *PeeringTrustBundleWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTrustBundleWriteRequest.ProtoReflect.Descriptor instead. +func (*PeeringTrustBundleWriteRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{12} +} + +func (x *PeeringTrustBundleWriteRequest) GetPeeringTrustBundle() *PeeringTrustBundle { + if x != nil { + return x.PeeringTrustBundle + } + return nil +} + +func (x *PeeringTrustBundleWriteRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +type PeeringTrustBundleWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PeeringTrustBundleWriteResponse) Reset() { + *x = PeeringTrustBundleWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTrustBundleWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTrustBundleWriteResponse) ProtoMessage() {} + +func (x *PeeringTrustBundleWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTrustBundleWriteResponse.ProtoReflect.Descriptor instead. +func (*PeeringTrustBundleWriteResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{13} +} + +type PeeringTrustBundleDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + //TODO(peering): what to do with embedded write request? + Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringTrustBundleDeleteRequest) Reset() { + *x = PeeringTrustBundleDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTrustBundleDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTrustBundleDeleteRequest) ProtoMessage() {} + +func (x *PeeringTrustBundleDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTrustBundleDeleteRequest.ProtoReflect.Descriptor instead. +func (*PeeringTrustBundleDeleteRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{14} +} + +func (x *PeeringTrustBundleDeleteRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PeeringTrustBundleDeleteRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *PeeringTrustBundleDeleteRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +type PeeringTrustBundleDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PeeringTrustBundleDeleteResponse) Reset() { + *x = PeeringTrustBundleDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTrustBundleDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTrustBundleDeleteResponse) ProtoMessage() {} + +func (x *PeeringTrustBundleDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTrustBundleDeleteResponse.ProtoReflect.Descriptor instead. +func (*PeeringTrustBundleDeleteResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{15} +} + +type GenerateTokenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the remote peer. + PeerName string `protobuf:"bytes,1,opt,name=PeerName,proto3" json:"PeerName,omitempty"` + // Partition to to be peered. + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + // these are common fields required for implementing structs.RPCInfo methods + // that are used to forward requests + Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + Token string `protobuf:"bytes,4,opt,name=Token,proto3" json:"Token,omitempty"` +} + +func (x *GenerateTokenRequest) Reset() { + *x = GenerateTokenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateTokenRequest) ProtoMessage() {} + +func (x *GenerateTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateTokenRequest.ProtoReflect.Descriptor instead. +func (*GenerateTokenRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{16} +} + +func (x *GenerateTokenRequest) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + +func (x *GenerateTokenRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *GenerateTokenRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +func (x *GenerateTokenRequest) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +type GenerateTokenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // PeeringToken is an opaque string provided to the remote peer for it to complete + // the peering initialization handshake. + PeeringToken string `protobuf:"bytes,1,opt,name=PeeringToken,proto3" json:"PeeringToken,omitempty"` +} + +func (x *GenerateTokenResponse) Reset() { + *x = GenerateTokenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateTokenResponse) ProtoMessage() {} + +func (x *GenerateTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateTokenResponse.ProtoReflect.Descriptor instead. +func (*GenerateTokenResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{17} +} + +func (x *GenerateTokenResponse) GetPeeringToken() string { + if x != nil { + return x.PeeringToken + } + return "" +} + +type InitiateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the remote peer. + PeerName string `protobuf:"bytes,1,opt,name=PeerName,proto3" json:"PeerName,omitempty"` + // The peering token returned from the peer's GenerateToken endpoint. + PeeringToken string `protobuf:"bytes,2,opt,name=PeeringToken,proto3" json:"PeeringToken,omitempty"` + // these are common fields required for implementing structs.RPCInfo methods + // that are used to forward requests + Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + Token string `protobuf:"bytes,4,opt,name=Token,proto3" json:"Token,omitempty"` +} + +func (x *InitiateRequest) Reset() { + *x = InitiateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitiateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitiateRequest) ProtoMessage() {} + +func (x *InitiateRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitiateRequest.ProtoReflect.Descriptor instead. +func (*InitiateRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{18} +} + +func (x *InitiateRequest) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + +func (x *InitiateRequest) GetPeeringToken() string { + if x != nil { + return x.PeeringToken + } + return "" +} + +func (x *InitiateRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +func (x *InitiateRequest) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +type InitiateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // this is just a placeholder to avoid returning google.protobuf.Empty + // (and consequently gogo.protobuf.types that it will be replaced with) + Status uint32 `protobuf:"varint,1,opt,name=Status,proto3" json:"Status,omitempty"` +} + +func (x *InitiateResponse) Reset() { + *x = InitiateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitiateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitiateResponse) ProtoMessage() {} + +func (x *InitiateResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitiateResponse.ProtoReflect.Descriptor instead. +func (*InitiateResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{19} +} + +func (x *InitiateResponse) GetStatus() uint32 { + if x != nil { + return x.Status + } + return 0 +} + +type ReplicationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: + // *ReplicationMessage_Request_ + // *ReplicationMessage_Response_ + // *ReplicationMessage_Terminated_ + Payload isReplicationMessage_Payload `protobuf_oneof:"Payload"` +} + +func (x *ReplicationMessage) Reset() { + *x = ReplicationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationMessage) ProtoMessage() {} + +func (x *ReplicationMessage) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationMessage.ProtoReflect.Descriptor instead. +func (*ReplicationMessage) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{20} +} + +func (m *ReplicationMessage) GetPayload() isReplicationMessage_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *ReplicationMessage) GetRequest() *ReplicationMessage_Request { + if x, ok := x.GetPayload().(*ReplicationMessage_Request_); ok { + return x.Request + } + return nil +} + +func (x *ReplicationMessage) GetResponse() *ReplicationMessage_Response { + if x, ok := x.GetPayload().(*ReplicationMessage_Response_); ok { + return x.Response + } + return nil +} + +func (x *ReplicationMessage) GetTerminated() *ReplicationMessage_Terminated { + if x, ok := x.GetPayload().(*ReplicationMessage_Terminated_); ok { + return x.Terminated + } + return nil +} + +type isReplicationMessage_Payload interface { + isReplicationMessage_Payload() +} + +type ReplicationMessage_Request_ struct { + Request *ReplicationMessage_Request `protobuf:"bytes,1,opt,name=request,proto3,oneof"` +} + +type ReplicationMessage_Response_ struct { + Response *ReplicationMessage_Response `protobuf:"bytes,2,opt,name=response,proto3,oneof"` +} + +type ReplicationMessage_Terminated_ struct { + Terminated *ReplicationMessage_Terminated `protobuf:"bytes,3,opt,name=terminated,proto3,oneof"` +} + +func (*ReplicationMessage_Request_) isReplicationMessage_Payload() {} + +func (*ReplicationMessage_Response_) isReplicationMessage_Payload() {} + +func (*ReplicationMessage_Terminated_) isReplicationMessage_Payload() {} + +// A Request requests to subscribe to a resource of a given type. +type ReplicationMessage_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An identifier for the peer making the request. + // This identifier is provisioned by the serving peer prior to the request from the dialing peer. + PeerID string `protobuf:"bytes,1,opt,name=PeerID,proto3" json:"PeerID,omitempty"` + // Nonce corresponding to that of the response being ACKed or NACKed. + // Initial subscription requests will have an empty nonce. + // The nonce is generated and incremented by the exporting peer. + Nonce string `protobuf:"bytes,2,opt,name=Nonce,proto3" json:"Nonce,omitempty"` + // The type URL for the resource being requested or ACK/NACKed. + ResourceURL string `protobuf:"bytes,3,opt,name=ResourceURL,proto3" json:"ResourceURL,omitempty"` + // The error if the previous response was not applied successfully. + // This field is empty in the first subscription request. + Error *pbstatus.Status `protobuf:"bytes,4,opt,name=Error,proto3" json:"Error,omitempty"` +} + +func (x *ReplicationMessage_Request) Reset() { + *x = ReplicationMessage_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicationMessage_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationMessage_Request) ProtoMessage() {} + +func (x *ReplicationMessage_Request) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationMessage_Request.ProtoReflect.Descriptor instead. +func (*ReplicationMessage_Request) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *ReplicationMessage_Request) GetPeerID() string { + if x != nil { + return x.PeerID + } + return "" +} + +func (x *ReplicationMessage_Request) GetNonce() string { + if x != nil { + return x.Nonce + } + return "" +} + +func (x *ReplicationMessage_Request) GetResourceURL() string { + if x != nil { + return x.ResourceURL + } + return "" +} + +func (x *ReplicationMessage_Request) GetError() *pbstatus.Status { + if x != nil { + return x.Error + } + return nil +} + +// A Response contains resources corresponding to a subscription request. +type ReplicationMessage_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Nonce identifying a response in a stream. + Nonce string `protobuf:"bytes,1,opt,name=Nonce,proto3" json:"Nonce,omitempty"` + // The type URL of resource being returned. + ResourceURL string `protobuf:"bytes,2,opt,name=ResourceURL,proto3" json:"ResourceURL,omitempty"` + // An identifier for the resource being returned. + // This could be the SPIFFE ID of the service. + ResourceID string `protobuf:"bytes,3,opt,name=ResourceID,proto3" json:"ResourceID,omitempty"` + // The resource being returned. + Resource *anypb.Any `protobuf:"bytes,4,opt,name=Resource,proto3" json:"Resource,omitempty"` + // REQUIRED. The operation to be performed in relation to the resource. + Operation ReplicationMessage_Response_Operation `protobuf:"varint,5,opt,name=operation,proto3,enum=pbpeering.ReplicationMessage_Response_Operation" json:"operation,omitempty"` +} + +func (x *ReplicationMessage_Response) Reset() { + *x = ReplicationMessage_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicationMessage_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationMessage_Response) ProtoMessage() {} + +func (x *ReplicationMessage_Response) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationMessage_Response.ProtoReflect.Descriptor instead. +func (*ReplicationMessage_Response) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{20, 1} +} + +func (x *ReplicationMessage_Response) GetNonce() string { + if x != nil { + return x.Nonce + } + return "" +} + +func (x *ReplicationMessage_Response) GetResourceURL() string { + if x != nil { + return x.ResourceURL + } + return "" +} + +func (x *ReplicationMessage_Response) GetResourceID() string { + if x != nil { + return x.ResourceID + } + return "" +} + +func (x *ReplicationMessage_Response) GetResource() *anypb.Any { + if x != nil { + return x.Resource + } + return nil +} + +func (x *ReplicationMessage_Response) GetOperation() ReplicationMessage_Response_Operation { + if x != nil { + return x.Operation + } + return ReplicationMessage_Response_Unknown +} + +// Terminated is sent when a peering is deleted locally. +// This message signals to the peer that they should clean up their local state about the peering. +type ReplicationMessage_Terminated struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReplicationMessage_Terminated) Reset() { + *x = ReplicationMessage_Terminated{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicationMessage_Terminated) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationMessage_Terminated) ProtoMessage() {} + +func (x *ReplicationMessage_Terminated) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationMessage_Terminated.ProtoReflect.Descriptor instead. +func (*ReplicationMessage_Terminated) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{20, 2} +} + +var File_proto_pbpeering_peering_proto protoreflect.FileDescriptor + +var file_proto_pbpeering_peering_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x09, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xd0, 0x02, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x0e, + 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, + 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x17, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x41, 0x50, 0x65, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x50, 0x65, 0x65, + 0x72, 0x43, 0x41, 0x50, 0x65, 0x6d, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x30, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x50, 0x65, + 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xd0, 0x01, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, + 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x52, 0x6f, 0x6f, 0x74, + 0x50, 0x45, 0x4d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x52, 0x6f, 0x6f, 0x74, + 0x50, 0x45, 0x4d, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x4d, 0x6f, 0x64, + 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x66, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x22, 0x43, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x50, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x52, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x50, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, + 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, + 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x45, 0x0a, 0x13, 0x50, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2e, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, + 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x73, + 0x22, 0x63, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x50, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, + 0x14, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, + 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, + 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x17, 0x0a, 0x15, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x2d, 0x0a, 0x1b, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x65, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x22, + 0x1e, 0x0a, 0x1c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x74, 0x65, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x8f, 0x01, 0x0a, 0x1e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, + 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x12, 0x50, + 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, + 0x72, 0x22, 0x21, 0x0a, 0x1f, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, + 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x73, 0x0a, 0x1f, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, + 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, + 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, + 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x22, 0x0a, 0x20, 0x50, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x01, + 0x0a, 0x14, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x3b, 0x0a, 0x15, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x22, 0x87, 0x01, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, + 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x2a, 0x0a, + 0x10, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x9c, 0x05, 0x0a, 0x12, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x41, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0a, 0x74, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x65, 0x64, 0x1a, 0x7f, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, + 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, + 0x12, 0x24, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x96, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x1e, 0x0a, 0x0a, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x12, 0x30, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x4e, 0x0a, + 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x30, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x30, 0x0a, + 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x53, 0x45, 0x52, + 0x54, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x1a, + 0x0c, 0x0a, 0x0a, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x42, 0x09, 0x0a, + 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2a, 0x53, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, + 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x49, 0x54, 0x49, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x45, 0x44, 0x10, 0x04, 0x32, 0xbf, 0x04, + 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x52, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x1f, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, + 0x12, 0x1a, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x49, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, + 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x50, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x12, 0x1d, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1d, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0d, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0c, 0x50, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x70, 0x62, 0x70, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x62, 0x70, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x0f, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x1d, 0x2e, + 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1d, 0x2e, 0x70, + 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, + 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_pbpeering_peering_proto_rawDescOnce sync.Once + file_proto_pbpeering_peering_proto_rawDescData = file_proto_pbpeering_peering_proto_rawDesc +) + +func file_proto_pbpeering_peering_proto_rawDescGZIP() []byte { + file_proto_pbpeering_peering_proto_rawDescOnce.Do(func() { + file_proto_pbpeering_peering_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbpeering_peering_proto_rawDescData) + }) + return file_proto_pbpeering_peering_proto_rawDescData +} + +var file_proto_pbpeering_peering_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_proto_pbpeering_peering_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_proto_pbpeering_peering_proto_goTypes = []interface{}{ + (PeeringState)(0), // 0: pbpeering.PeeringState + (ReplicationMessage_Response_Operation)(0), // 1: pbpeering.ReplicationMessage.Response.Operation + (*Peering)(nil), // 2: pbpeering.Peering + (*PeeringTrustBundle)(nil), // 3: pbpeering.PeeringTrustBundle + (*PeeringReadRequest)(nil), // 4: pbpeering.PeeringReadRequest + (*PeeringReadResponse)(nil), // 5: pbpeering.PeeringReadResponse + (*PeeringListRequest)(nil), // 6: pbpeering.PeeringListRequest + (*PeeringListResponse)(nil), // 7: pbpeering.PeeringListResponse + (*PeeringWriteRequest)(nil), // 8: pbpeering.PeeringWriteRequest + (*PeeringWriteResponse)(nil), // 9: pbpeering.PeeringWriteResponse + (*PeeringDeleteRequest)(nil), // 10: pbpeering.PeeringDeleteRequest + (*PeeringDeleteResponse)(nil), // 11: pbpeering.PeeringDeleteResponse + (*PeeringTerminateByIDRequest)(nil), // 12: pbpeering.PeeringTerminateByIDRequest + (*PeeringTerminateByIDResponse)(nil), // 13: pbpeering.PeeringTerminateByIDResponse + (*PeeringTrustBundleWriteRequest)(nil), // 14: pbpeering.PeeringTrustBundleWriteRequest + (*PeeringTrustBundleWriteResponse)(nil), // 15: pbpeering.PeeringTrustBundleWriteResponse + (*PeeringTrustBundleDeleteRequest)(nil), // 16: pbpeering.PeeringTrustBundleDeleteRequest + (*PeeringTrustBundleDeleteResponse)(nil), // 17: pbpeering.PeeringTrustBundleDeleteResponse + (*GenerateTokenRequest)(nil), // 18: pbpeering.GenerateTokenRequest + (*GenerateTokenResponse)(nil), // 19: pbpeering.GenerateTokenResponse + (*InitiateRequest)(nil), // 20: pbpeering.InitiateRequest + (*InitiateResponse)(nil), // 21: pbpeering.InitiateResponse + (*ReplicationMessage)(nil), // 22: pbpeering.ReplicationMessage + (*ReplicationMessage_Request)(nil), // 23: pbpeering.ReplicationMessage.Request + (*ReplicationMessage_Response)(nil), // 24: pbpeering.ReplicationMessage.Response + (*ReplicationMessage_Terminated)(nil), // 25: pbpeering.ReplicationMessage.Terminated + (*pbstatus.Status)(nil), // 26: status.Status + (*anypb.Any)(nil), // 27: google.protobuf.Any +} +var file_proto_pbpeering_peering_proto_depIdxs = []int32{ + 0, // 0: pbpeering.Peering.State:type_name -> pbpeering.PeeringState + 2, // 1: pbpeering.PeeringReadResponse.Peering:type_name -> pbpeering.Peering + 2, // 2: pbpeering.PeeringListResponse.Peerings:type_name -> pbpeering.Peering + 2, // 3: pbpeering.PeeringWriteRequest.Peering:type_name -> pbpeering.Peering + 3, // 4: pbpeering.PeeringTrustBundleWriteRequest.PeeringTrustBundle:type_name -> pbpeering.PeeringTrustBundle + 23, // 5: pbpeering.ReplicationMessage.request:type_name -> pbpeering.ReplicationMessage.Request + 24, // 6: pbpeering.ReplicationMessage.response:type_name -> pbpeering.ReplicationMessage.Response + 25, // 7: pbpeering.ReplicationMessage.terminated:type_name -> pbpeering.ReplicationMessage.Terminated + 26, // 8: pbpeering.ReplicationMessage.Request.Error:type_name -> status.Status + 27, // 9: pbpeering.ReplicationMessage.Response.Resource:type_name -> google.protobuf.Any + 1, // 10: pbpeering.ReplicationMessage.Response.operation:type_name -> pbpeering.ReplicationMessage.Response.Operation + 18, // 11: pbpeering.PeeringService.GenerateToken:input_type -> pbpeering.GenerateTokenRequest + 20, // 12: pbpeering.PeeringService.Initiate:input_type -> pbpeering.InitiateRequest + 4, // 13: pbpeering.PeeringService.PeeringRead:input_type -> pbpeering.PeeringReadRequest + 6, // 14: pbpeering.PeeringService.PeeringList:input_type -> pbpeering.PeeringListRequest + 10, // 15: pbpeering.PeeringService.PeeringDelete:input_type -> pbpeering.PeeringDeleteRequest + 8, // 16: pbpeering.PeeringService.PeeringWrite:input_type -> pbpeering.PeeringWriteRequest + 22, // 17: pbpeering.PeeringService.StreamResources:input_type -> pbpeering.ReplicationMessage + 19, // 18: pbpeering.PeeringService.GenerateToken:output_type -> pbpeering.GenerateTokenResponse + 21, // 19: pbpeering.PeeringService.Initiate:output_type -> pbpeering.InitiateResponse + 5, // 20: pbpeering.PeeringService.PeeringRead:output_type -> pbpeering.PeeringReadResponse + 7, // 21: pbpeering.PeeringService.PeeringList:output_type -> pbpeering.PeeringListResponse + 11, // 22: pbpeering.PeeringService.PeeringDelete:output_type -> pbpeering.PeeringDeleteResponse + 9, // 23: pbpeering.PeeringService.PeeringWrite:output_type -> pbpeering.PeeringWriteResponse + 22, // 24: pbpeering.PeeringService.StreamResources:output_type -> pbpeering.ReplicationMessage + 18, // [18:25] is the sub-list for method output_type + 11, // [11:18] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_proto_pbpeering_peering_proto_init() } +func file_proto_pbpeering_peering_proto_init() { + if File_proto_pbpeering_peering_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbpeering_peering_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Peering); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTrustBundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringReadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringReadResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTerminateByIDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTerminateByIDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTrustBundleWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTrustBundleWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTrustBundleDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTrustBundleDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GenerateTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GenerateTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitiateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitiateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationMessage_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationMessage_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationMessage_Terminated); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_proto_pbpeering_peering_proto_msgTypes[20].OneofWrappers = []interface{}{ + (*ReplicationMessage_Request_)(nil), + (*ReplicationMessage_Response_)(nil), + (*ReplicationMessage_Terminated_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbpeering_peering_proto_rawDesc, + NumEnums: 2, + NumMessages: 24, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_pbpeering_peering_proto_goTypes, + DependencyIndexes: file_proto_pbpeering_peering_proto_depIdxs, + EnumInfos: file_proto_pbpeering_peering_proto_enumTypes, + MessageInfos: file_proto_pbpeering_peering_proto_msgTypes, + }.Build() + File_proto_pbpeering_peering_proto = out.File + file_proto_pbpeering_peering_proto_rawDesc = nil + file_proto_pbpeering_peering_proto_goTypes = nil + file_proto_pbpeering_peering_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// PeeringServiceClient is the client API for PeeringService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PeeringServiceClient interface { + GenerateToken(ctx context.Context, in *GenerateTokenRequest, opts ...grpc.CallOption) (*GenerateTokenResponse, error) + Initiate(ctx context.Context, in *InitiateRequest, opts ...grpc.CallOption) (*InitiateResponse, error) + PeeringRead(ctx context.Context, in *PeeringReadRequest, opts ...grpc.CallOption) (*PeeringReadResponse, error) + PeeringList(ctx context.Context, in *PeeringListRequest, opts ...grpc.CallOption) (*PeeringListResponse, error) + PeeringDelete(ctx context.Context, in *PeeringDeleteRequest, opts ...grpc.CallOption) (*PeeringDeleteResponse, error) + // TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. + // Consider removing if we can find another way to populate state store in peering_endpoint_test.go + PeeringWrite(ctx context.Context, in *PeeringWriteRequest, opts ...grpc.CallOption) (*PeeringWriteResponse, error) + // StreamResources opens an event stream for resources to share between peers, such as services. + // Events are streamed as they happen. + StreamResources(ctx context.Context, opts ...grpc.CallOption) (PeeringService_StreamResourcesClient, error) +} + +type peeringServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewPeeringServiceClient(cc grpc.ClientConnInterface) PeeringServiceClient { + return &peeringServiceClient{cc} +} + +func (c *peeringServiceClient) GenerateToken(ctx context.Context, in *GenerateTokenRequest, opts ...grpc.CallOption) (*GenerateTokenResponse, error) { + out := new(GenerateTokenResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/GenerateToken", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) Initiate(ctx context.Context, in *InitiateRequest, opts ...grpc.CallOption) (*InitiateResponse, error) { + out := new(InitiateResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/Initiate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) PeeringRead(ctx context.Context, in *PeeringReadRequest, opts ...grpc.CallOption) (*PeeringReadResponse, error) { + out := new(PeeringReadResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/PeeringRead", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) PeeringList(ctx context.Context, in *PeeringListRequest, opts ...grpc.CallOption) (*PeeringListResponse, error) { + out := new(PeeringListResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/PeeringList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) PeeringDelete(ctx context.Context, in *PeeringDeleteRequest, opts ...grpc.CallOption) (*PeeringDeleteResponse, error) { + out := new(PeeringDeleteResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/PeeringDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) PeeringWrite(ctx context.Context, in *PeeringWriteRequest, opts ...grpc.CallOption) (*PeeringWriteResponse, error) { + out := new(PeeringWriteResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/PeeringWrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) StreamResources(ctx context.Context, opts ...grpc.CallOption) (PeeringService_StreamResourcesClient, error) { + stream, err := c.cc.NewStream(ctx, &_PeeringService_serviceDesc.Streams[0], "/pbpeering.PeeringService/StreamResources", opts...) + if err != nil { + return nil, err + } + x := &peeringServiceStreamResourcesClient{stream} + return x, nil +} + +type PeeringService_StreamResourcesClient interface { + Send(*ReplicationMessage) error + Recv() (*ReplicationMessage, error) + grpc.ClientStream +} + +type peeringServiceStreamResourcesClient struct { + grpc.ClientStream +} + +func (x *peeringServiceStreamResourcesClient) Send(m *ReplicationMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *peeringServiceStreamResourcesClient) Recv() (*ReplicationMessage, error) { + m := new(ReplicationMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// PeeringServiceServer is the server API for PeeringService service. +type PeeringServiceServer interface { + GenerateToken(context.Context, *GenerateTokenRequest) (*GenerateTokenResponse, error) + Initiate(context.Context, *InitiateRequest) (*InitiateResponse, error) + PeeringRead(context.Context, *PeeringReadRequest) (*PeeringReadResponse, error) + PeeringList(context.Context, *PeeringListRequest) (*PeeringListResponse, error) + PeeringDelete(context.Context, *PeeringDeleteRequest) (*PeeringDeleteResponse, error) + // TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. + // Consider removing if we can find another way to populate state store in peering_endpoint_test.go + PeeringWrite(context.Context, *PeeringWriteRequest) (*PeeringWriteResponse, error) + // StreamResources opens an event stream for resources to share between peers, such as services. + // Events are streamed as they happen. + StreamResources(PeeringService_StreamResourcesServer) error +} + +// UnimplementedPeeringServiceServer can be embedded to have forward compatible implementations. +type UnimplementedPeeringServiceServer struct { +} + +func (*UnimplementedPeeringServiceServer) GenerateToken(context.Context, *GenerateTokenRequest) (*GenerateTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenerateToken not implemented") +} +func (*UnimplementedPeeringServiceServer) Initiate(context.Context, *InitiateRequest) (*InitiateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Initiate not implemented") +} +func (*UnimplementedPeeringServiceServer) PeeringRead(context.Context, *PeeringReadRequest) (*PeeringReadResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PeeringRead not implemented") +} +func (*UnimplementedPeeringServiceServer) PeeringList(context.Context, *PeeringListRequest) (*PeeringListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PeeringList not implemented") +} +func (*UnimplementedPeeringServiceServer) PeeringDelete(context.Context, *PeeringDeleteRequest) (*PeeringDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PeeringDelete not implemented") +} +func (*UnimplementedPeeringServiceServer) PeeringWrite(context.Context, *PeeringWriteRequest) (*PeeringWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PeeringWrite not implemented") +} +func (*UnimplementedPeeringServiceServer) StreamResources(PeeringService_StreamResourcesServer) error { + return status.Errorf(codes.Unimplemented, "method StreamResources not implemented") +} + +func RegisterPeeringServiceServer(s *grpc.Server, srv PeeringServiceServer) { + s.RegisterService(&_PeeringService_serviceDesc, srv) +} + +func _PeeringService_GenerateToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GenerateTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).GenerateToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/GenerateToken", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).GenerateToken(ctx, req.(*GenerateTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_Initiate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitiateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).Initiate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/Initiate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).Initiate(ctx, req.(*InitiateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_PeeringRead_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PeeringReadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).PeeringRead(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/PeeringRead", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).PeeringRead(ctx, req.(*PeeringReadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_PeeringList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PeeringListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).PeeringList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/PeeringList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).PeeringList(ctx, req.(*PeeringListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_PeeringDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PeeringDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).PeeringDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/PeeringDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).PeeringDelete(ctx, req.(*PeeringDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_PeeringWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PeeringWriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).PeeringWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/PeeringWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).PeeringWrite(ctx, req.(*PeeringWriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_StreamResources_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(PeeringServiceServer).StreamResources(&peeringServiceStreamResourcesServer{stream}) +} + +type PeeringService_StreamResourcesServer interface { + Send(*ReplicationMessage) error + Recv() (*ReplicationMessage, error) + grpc.ServerStream +} + +type peeringServiceStreamResourcesServer struct { + grpc.ServerStream +} + +func (x *peeringServiceStreamResourcesServer) Send(m *ReplicationMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *peeringServiceStreamResourcesServer) Recv() (*ReplicationMessage, error) { + m := new(ReplicationMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _PeeringService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pbpeering.PeeringService", + HandlerType: (*PeeringServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GenerateToken", + Handler: _PeeringService_GenerateToken_Handler, + }, + { + MethodName: "Initiate", + Handler: _PeeringService_Initiate_Handler, + }, + { + MethodName: "PeeringRead", + Handler: _PeeringService_PeeringRead_Handler, + }, + { + MethodName: "PeeringList", + Handler: _PeeringService_PeeringList_Handler, + }, + { + MethodName: "PeeringDelete", + Handler: _PeeringService_PeeringDelete_Handler, + }, + { + MethodName: "PeeringWrite", + Handler: _PeeringService_PeeringWrite_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamResources", + Handler: _PeeringService_StreamResources_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "proto/pbpeering/peering.proto", +} diff --git a/proto/pbpeering/peering.proto b/proto/pbpeering/peering.proto new file mode 100644 index 000000000..e0c126caf --- /dev/null +++ b/proto/pbpeering/peering.proto @@ -0,0 +1,283 @@ +syntax = "proto3"; + +package pbpeering; + +option go_package = "github.com/hashicorp/consul/proto/pbpeering"; + +import "google/protobuf/any.proto"; + +// TODO(peering): Handle this some other way +import "proto/pbstatus/status.proto"; + +// PeeringService handles operations for establishing peering relationships +// between disparate Consul clusters. +service PeeringService { + rpc GenerateToken(GenerateTokenRequest) returns (GenerateTokenResponse); + rpc Initiate(InitiateRequest) returns (InitiateResponse); + rpc PeeringRead(PeeringReadRequest) returns (PeeringReadResponse); + rpc PeeringList(PeeringListRequest) returns (PeeringListResponse); + rpc PeeringDelete(PeeringDeleteRequest) returns (PeeringDeleteResponse); + + // TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. + // Consider removing if we can find another way to populate state store in peering_endpoint_test.go + rpc PeeringWrite(PeeringWriteRequest) returns (PeeringWriteResponse); + + // StreamResources opens an event stream for resources to share between peers, such as services. + // Events are streamed as they happen. + rpc StreamResources(stream ReplicationMessage) returns (stream ReplicationMessage); +} + +// PeeringState enumerates all the states a peering can be in +enum PeeringState { + // Undefined represents an unset value for PeeringState during + // writes. + UNDEFINED = 0; + + // Initial means a Peering has been initialized and is awaiting + // acknowledgement from a remote peer. + INITIAL = 1; + + // Active means that the peering connection is active and healthy. + ACTIVE = 2; + + // Failing means the peering connection has been interrupted but has not yet + // been terminated. + FAILING = 3; + + // Terminated means the peering relationship has been removed. + TERMINATED = 4; +} + +// Peering defines a peering relationship between two disparate Consul clusters +message Peering { + // ID is a datacenter-scoped UUID for the peering. + // The ID is generated when a peering is first written to the state store. + string ID = 1; + + // Name is the local alias for the peering relationship. + string Name = 2; + + // Partition is the local partition connecting to the peer. + string Partition = 3; + + // State is one of the valid PeeringState values to represent the status of + // peering relationship. + PeeringState State = 4; + + // PeerID is the ID that our peer assigned to this peering. + // This ID is to be used when dialing the peer, so that it can know who dialed it. + string PeerID = 5; + + // PeerCAPems contains all the CA certificates for the remote peer. + repeated string PeerCAPems = 6; + + // PeerServerName is the name of the remote server as it relates to TLS. + string PeerServerName = 7; + + // PeerServerAddresses contains all the the connection addresses for the remote peer. + repeated string PeerServerAddresses = 8; + + // CreateIndex is the Raft index at which the Peering was created. + uint64 CreateIndex = 9; + + // ModifyIndex is the latest Raft index at which the Peering. was modified. + uint64 ModifyIndex = 10; +} + +// PeeringTrustBundle holds the trust information for validating requests from a peer. +message PeeringTrustBundle { + // TrustDomain is the domain for the bundle, example.com, foo.bar.gov for example. Note that this must not have a prefix such as "spiffe://". + string TrustDomain = 1; + + // PeerName associates the trust bundle with a peer. + string PeerName = 2; + + // Partition isolates the bundle from other trust bundles in separate partitions. + string Partition = 3; + + // RootPEMs holds ASN.1 DER encoded X.509 certificate data for the trust bundle. + repeated string RootPEMs = 4; + + // CreateIndex is the Raft index at which the trust domain was created. + uint64 CreateIndex = 5; + + // ModifyIndex is the latest Raft index at which the trust bundle was modified. + uint64 ModifyIndex = 6; +} + +message PeeringReadRequest { + string Name = 1; + string Partition = 2; + + string Datacenter = 3; + + //TODO(peering) query metadata +} + +message PeeringReadResponse { + Peering Peering = 1; + + //TODO(peering) query metadata +} + +message PeeringListRequest { + string Partition = 1; + + string Datacenter = 2; + + //TODO(peering) query metadata +} + +message PeeringListResponse { + repeated Peering Peerings = 1; + + //TODO(peering) query metadata +} + +message PeeringWriteRequest { + Peering Peering = 1; + + //TODO(peering): what to do with embedded write request? + string Datacenter = 2; +} + +// TODO(peering): Consider returning Peering if we keep this endpoint around +message PeeringWriteResponse{} + +message PeeringDeleteRequest { + string Name = 1; + + string Partition = 2; + + //TODO(peering): what to do with embedded write request? + string Datacenter = 3; +} + +message PeeringDeleteResponse {} + +message PeeringTerminateByIDRequest { + string ID = 1; +} + +message PeeringTerminateByIDResponse {} + +message PeeringTrustBundleWriteRequest { + PeeringTrustBundle PeeringTrustBundle = 1; + + //TODO(peering): what to do with embedded write request? + string Datacenter = 2; +} + +message PeeringTrustBundleWriteResponse{} + +message PeeringTrustBundleDeleteRequest { + string Name = 1; + + string Partition = 2; + + //TODO(peering): what to do with embedded write request? + string Datacenter = 3; +} + +message PeeringTrustBundleDeleteResponse{} + +message GenerateTokenRequest { + // Name of the remote peer. + string PeerName = 1; + + // Partition to to be peered. + string Partition = 2; + + // these are common fields required for implementing structs.RPCInfo methods + // that are used to forward requests + string Datacenter = 3; + string Token = 4; +} + +message GenerateTokenResponse { + // PeeringToken is an opaque string provided to the remote peer for it to complete + // the peering initialization handshake. + string PeeringToken = 1; +} + +message InitiateRequest { + // Name of the remote peer. + string PeerName = 1; + + // The peering token returned from the peer's GenerateToken endpoint. + string PeeringToken = 2; + + // these are common fields required for implementing structs.RPCInfo methods + // that are used to forward requests + string Datacenter = 3; + string Token = 4; +} + +message InitiateResponse { + // this is just a placeholder to avoid returning google.protobuf.Empty + // (and consequently gogo.protobuf.types that it will be replaced with) + uint32 Status = 1; +} + +message ReplicationMessage { + oneof Payload { + Request request = 1; + Response response = 2; + Terminated terminated = 3; + } + + // A Request requests to subscribe to a resource of a given type. + message Request { + // An identifier for the peer making the request. + // This identifier is provisioned by the serving peer prior to the request from the dialing peer. + string PeerID = 1; + + // Nonce corresponding to that of the response being ACKed or NACKed. + // Initial subscription requests will have an empty nonce. + // The nonce is generated and incremented by the exporting peer. + string Nonce = 2; + + // The type URL for the resource being requested or ACK/NACKed. + string ResourceURL = 3; + + // The error if the previous response was not applied successfully. + // This field is empty in the first subscription request. + status.Status Error = 4; + } + + // A Response contains resources corresponding to a subscription request. + message Response { + // Nonce identifying a response in a stream. + string Nonce = 1; + + // The type URL of resource being returned. + string ResourceURL = 2; + + // An identifier for the resource being returned. + // This could be the SPIFFE ID of the service. + string ResourceID = 3; + + // The resource being returned. + google.protobuf.Any Resource = 4; + + // Operation enumerates supported operations for replicated resources. + enum Operation { + Unknown = 0; + + // UPSERT represents a create or update event. + UPSERT = 1; + + // DELETE indicates the resource should be deleted. + // In DELETE operations no Resource will be returned. + // Deletion by an importing peer must be done with the type URL and ID. + DELETE = 2; + } + + // REQUIRED. The operation to be performed in relation to the resource. + Operation operation = 5; + } + + // Terminated is sent when a peering is deleted locally. + // This message signals to the peer that they should clean up their local state about the peering. + message Terminated {} +} diff --git a/proto/pbpeering/peering_oss.go b/proto/pbpeering/peering_oss.go new file mode 100644 index 000000000..d5e5b4a89 --- /dev/null +++ b/proto/pbpeering/peering_oss.go @@ -0,0 +1,16 @@ +//go:build !consulent +// +build !consulent + +package pbpeering + +func (r *GenerateTokenRequest) PartitionOrDefault() string { + return "" +} + +func (p *Peering) PartitionOrDefault() string { + return "" +} + +func (ptb *PeeringTrustBundle) PartitionOrDefault() string { + return "" +} diff --git a/proto/pbpeering/types.go b/proto/pbpeering/types.go new file mode 100644 index 000000000..3e6b092e2 --- /dev/null +++ b/proto/pbpeering/types.go @@ -0,0 +1,5 @@ +package pbpeering + +const ( + TypeURLService = "type.googleapis.com/consul.api.Service" +) diff --git a/proto/pbservice/healthcheck.gen.go b/proto/pbservice/healthcheck.gen.go index a38fd30c2..4eef24bef 100644 --- a/proto/pbservice/healthcheck.gen.go +++ b/proto/pbservice/healthcheck.gen.go @@ -93,6 +93,7 @@ func HealthCheckToStructs(s *HealthCheck, t *structs.HealthCheck) { t.Interval = s.Interval t.Timeout = s.Timeout t.ExposedPort = int(s.ExposedPort) + t.PeerName = s.PeerName if s.Definition != nil { HealthCheckDefinitionToStructs(s.Definition, &t.Definition) } @@ -116,6 +117,7 @@ func HealthCheckFromStructs(t *structs.HealthCheck, s *HealthCheck) { s.Interval = t.Interval s.Timeout = t.Timeout s.ExposedPort = int32(t.ExposedPort) + s.PeerName = t.PeerName { var x HealthCheckDefinition HealthCheckDefinitionFromStructs(&t.Definition, &x) diff --git a/proto/pbservice/healthcheck.pb.go b/proto/pbservice/healthcheck.pb.go index d28ed185c..3620a1aa2 100644 --- a/proto/pbservice/healthcheck.pb.go +++ b/proto/pbservice/healthcheck.pb.go @@ -59,6 +59,7 @@ type HealthCheck struct { ExposedPort int32 `protobuf:"varint,14,opt,name=ExposedPort,proto3" json:"ExposedPort,omitempty"` Interval string `protobuf:"bytes,15,opt,name=Interval,proto3" json:"Interval,omitempty"` Timeout string `protobuf:"bytes,16,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + PeerName string `protobuf:"bytes,17,opt,name=PeerName,proto3" json:"PeerName,omitempty"` } func (x *HealthCheck) Reset() { @@ -205,6 +206,13 @@ func (x *HealthCheck) GetTimeout() string { return "" } +func (x *HealthCheck) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + type HeaderValue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -802,7 +810,7 @@ var file_proto_pbservice_healthcheck_proto_rawDesc = []byte{ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x96, 0x04, 0x0a, 0x0b, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x04, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, @@ -836,149 +844,151 @@ var file_proto_pbservice_healthcheck_proto_rawDesc = []byte{ 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x22, 0x23, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb2, 0x07, 0x0a, 0x15, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, - 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x12, 0x44, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x12, 0x12, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x54, 0x43, 0x50, 0x12, 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0d, 0x4f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x22, 0x23, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb2, 0x07, 0x0a, 0x15, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, + 0x54, 0x54, 0x50, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, + 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, + 0x44, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x42, 0x6f, 0x64, + 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x44, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x10, 0x0a, + 0x03, 0x54, 0x43, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, + 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x4f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x33, 0x0a, 0x07, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, + 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, + 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x66, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, + 0x67, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x41, 0x72, 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x48, 0x32, 0x50, 0x49, + 0x4e, 0x47, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, + 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, + 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, + 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x47, 0x52, + 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x54, 0x54, + 0x4c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x1a, 0x51, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd4, 0x09, 0x0a, 0x09, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, + 0x6f, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, + 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x38, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x42, 0x6f, + 0x64, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2a, + 0x0a, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x43, + 0x50, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, 0x35, 0x0a, 0x08, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, + 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x48, 0x32, 0x50, + 0x49, 0x4e, 0x47, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, + 0x47, 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, + 0x53, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, + 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x52, 0x50, + 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x47, + 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x54, 0x54, + 0x4c, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x65, + 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, 0x15, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, + 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, + 0x67, 0x12, 0x36, 0x0a, 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, + 0x6f, 0x72, 0x65, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x18, 0x16, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x48, 0x54, 0x54, 0x50, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x48, 0x54, 0x54, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x47, 0x52, 0x50, 0x43, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x47, 0x52, 0x50, 0x43, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, 0x6b, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, - 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, 0x32, - 0x50, 0x49, 0x4e, 0x47, 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, - 0x65, 0x54, 0x4c, 0x53, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, 0x69, - 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, 0x0a, - 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x1c, 0x0a, 0x09, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2b, - 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x1a, 0x51, 0x0a, 0x0b, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd4, - 0x09, 0x0a, 0x09, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x38, 0x0a, 0x06, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, - 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, - 0x70, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x42, 0x6f, - 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x44, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x10, - 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, - 0x12, 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, - 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, - 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, - 0x32, 0x50, 0x49, 0x4e, 0x47, 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, - 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, - 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, - 0x43, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, - 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x24, 0x0a, - 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x1b, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, - 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2b, - 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x32, 0x0a, 0x14, 0x53, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, - 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, - 0x34, 0x0a, 0x15, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, - 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, - 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, - 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x18, - 0x16, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, - 0x65, 0x66, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x12, 0x1c, 0x0a, - 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x48, 0x54, 0x54, 0x50, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x48, 0x54, 0x54, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x47, 0x52, 0x50, 0x43, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x50, 0x72, 0x6f, 0x78, 0x79, 0x47, 0x52, 0x50, 0x43, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, - 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x19, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, - 0x7a, 0x65, 0x1a, 0x51, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x51, + 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/pbservice/healthcheck.proto b/proto/pbservice/healthcheck.proto index 67629ba98..afda5dc25 100644 --- a/proto/pbservice/healthcheck.proto +++ b/proto/pbservice/healthcheck.proto @@ -40,6 +40,7 @@ message HealthCheck { string Interval = 15; string Timeout = 16; + string PeerName = 17; } message HeaderValue { diff --git a/proto/pbservice/node.gen.go b/proto/pbservice/node.gen.go index cadf2c7e9..f231ea836 100644 --- a/proto/pbservice/node.gen.go +++ b/proto/pbservice/node.gen.go @@ -13,6 +13,7 @@ func NodeToStructs(s *Node, t *structs.Node) { t.Address = s.Address t.Datacenter = s.Datacenter t.Partition = s.Partition + t.PeerName = s.PeerName t.TaggedAddresses = s.TaggedAddresses t.Meta = s.Meta t.RaftIndex = RaftIndexToStructs(s.RaftIndex) @@ -26,6 +27,7 @@ func NodeFromStructs(t *structs.Node, s *Node) { s.Address = t.Address s.Datacenter = t.Datacenter s.Partition = t.Partition + s.PeerName = t.PeerName s.TaggedAddresses = t.TaggedAddresses s.Meta = t.Meta s.RaftIndex = NewRaftIndexFromStructs(t.RaftIndex) @@ -53,6 +55,7 @@ func NodeServiceToStructs(s *NodeService, t *structs.NodeService) { } t.LocallyRegisteredAsSidecar = s.LocallyRegisteredAsSidecar t.EnterpriseMeta = EnterpriseMetaToStructs(s.EnterpriseMeta) + t.PeerName = s.PeerName t.RaftIndex = RaftIndexToStructs(s.RaftIndex) } func NodeServiceFromStructs(t *structs.NodeService, s *NodeService) { @@ -82,5 +85,6 @@ func NodeServiceFromStructs(t *structs.NodeService, s *NodeService) { } s.LocallyRegisteredAsSidecar = t.LocallyRegisteredAsSidecar s.EnterpriseMeta = NewEnterpriseMetaFromStructs(t.EnterpriseMeta) + s.PeerName = t.PeerName s.RaftIndex = NewRaftIndexFromStructs(t.RaftIndex) } diff --git a/proto/pbservice/node.pb.binary.go b/proto/pbservice/node.pb.binary.go index b7533436f..f1e2bec68 100644 --- a/proto/pbservice/node.pb.binary.go +++ b/proto/pbservice/node.pb.binary.go @@ -7,6 +7,16 @@ import ( "github.com/golang/protobuf/proto" ) +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *IndexedCheckServiceNodes) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *IndexedCheckServiceNodes) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + // MarshalBinary implements encoding.BinaryMarshaler func (msg *CheckServiceNode) MarshalBinary() ([]byte, error) { return proto.Marshal(msg) diff --git a/proto/pbservice/node.pb.go b/proto/pbservice/node.pb.go index 44340c9aa..1e76b0e0d 100644 --- a/proto/pbservice/node.pb.go +++ b/proto/pbservice/node.pb.go @@ -26,6 +26,62 @@ const ( // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 +// IndexedCheckServiceNodes is used to return multiple instances for a given service. +type IndexedCheckServiceNodes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Index uint64 `protobuf:"varint,1,opt,name=Index,proto3" json:"Index,omitempty"` + Nodes []*CheckServiceNode `protobuf:"bytes,2,rep,name=Nodes,proto3" json:"Nodes,omitempty"` +} + +func (x *IndexedCheckServiceNodes) Reset() { + *x = IndexedCheckServiceNodes{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_node_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IndexedCheckServiceNodes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IndexedCheckServiceNodes) ProtoMessage() {} + +func (x *IndexedCheckServiceNodes) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_node_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IndexedCheckServiceNodes.ProtoReflect.Descriptor instead. +func (*IndexedCheckServiceNodes) Descriptor() ([]byte, []int) { + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{0} +} + +func (x *IndexedCheckServiceNodes) GetIndex() uint64 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *IndexedCheckServiceNodes) GetNodes() []*CheckServiceNode { + if x != nil { + return x.Nodes + } + return nil +} + // CheckServiceNode is used to provide the node, its service // definition, as well as a HealthCheck that is associated. type CheckServiceNode struct { @@ -41,7 +97,7 @@ type CheckServiceNode struct { func (x *CheckServiceNode) Reset() { *x = CheckServiceNode{} if protoimpl.UnsafeEnabled { - mi := &file_proto_pbservice_node_proto_msgTypes[0] + mi := &file_proto_pbservice_node_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -54,7 +110,7 @@ func (x *CheckServiceNode) String() string { func (*CheckServiceNode) ProtoMessage() {} func (x *CheckServiceNode) ProtoReflect() protoreflect.Message { - mi := &file_proto_pbservice_node_proto_msgTypes[0] + mi := &file_proto_pbservice_node_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -67,7 +123,7 @@ func (x *CheckServiceNode) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckServiceNode.ProtoReflect.Descriptor instead. func (*CheckServiceNode) Descriptor() ([]byte, []int) { - return file_proto_pbservice_node_proto_rawDescGZIP(), []int{0} + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{1} } func (x *CheckServiceNode) GetNode() *Node { @@ -107,6 +163,7 @@ type Node struct { ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` Node string `protobuf:"bytes,2,opt,name=Node,proto3" json:"Node,omitempty"` Partition string `protobuf:"bytes,8,opt,name=Partition,proto3" json:"Partition,omitempty"` + PeerName string `protobuf:"bytes,9,opt,name=PeerName,proto3" json:"PeerName,omitempty"` Address string `protobuf:"bytes,3,opt,name=Address,proto3" json:"Address,omitempty"` Datacenter string `protobuf:"bytes,4,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` TaggedAddresses map[string]string `protobuf:"bytes,5,rep,name=TaggedAddresses,proto3" json:"TaggedAddresses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -118,7 +175,7 @@ type Node struct { func (x *Node) Reset() { *x = Node{} if protoimpl.UnsafeEnabled { - mi := &file_proto_pbservice_node_proto_msgTypes[1] + mi := &file_proto_pbservice_node_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -131,7 +188,7 @@ func (x *Node) String() string { func (*Node) ProtoMessage() {} func (x *Node) ProtoReflect() protoreflect.Message { - mi := &file_proto_pbservice_node_proto_msgTypes[1] + mi := &file_proto_pbservice_node_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -144,7 +201,7 @@ func (x *Node) ProtoReflect() protoreflect.Message { // Deprecated: Use Node.ProtoReflect.Descriptor instead. func (*Node) Descriptor() ([]byte, []int) { - return file_proto_pbservice_node_proto_rawDescGZIP(), []int{1} + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{2} } func (x *Node) GetID() string { @@ -168,6 +225,13 @@ func (x *Node) GetPartition() string { return "" } +func (x *Node) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + func (x *Node) GetAddress() string { if x != nil { return x.Address @@ -267,6 +331,7 @@ type NodeService struct { LocallyRegisteredAsSidecar bool `protobuf:"varint,13,opt,name=LocallyRegisteredAsSidecar,proto3" json:"LocallyRegisteredAsSidecar,omitempty"` // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,16,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` + PeerName string `protobuf:"bytes,18,opt,name=PeerName,proto3" json:"PeerName,omitempty"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,14,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` } @@ -274,7 +339,7 @@ type NodeService struct { func (x *NodeService) Reset() { *x = NodeService{} if protoimpl.UnsafeEnabled { - mi := &file_proto_pbservice_node_proto_msgTypes[2] + mi := &file_proto_pbservice_node_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -287,7 +352,7 @@ func (x *NodeService) String() string { func (*NodeService) ProtoMessage() {} func (x *NodeService) ProtoReflect() protoreflect.Message { - mi := &file_proto_pbservice_node_proto_msgTypes[2] + mi := &file_proto_pbservice_node_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -300,7 +365,7 @@ func (x *NodeService) ProtoReflect() protoreflect.Message { // Deprecated: Use NodeService.ProtoReflect.Descriptor instead. func (*NodeService) Descriptor() ([]byte, []int) { - return file_proto_pbservice_node_proto_rawDescGZIP(), []int{2} + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{3} } func (x *NodeService) GetKind() string { @@ -408,6 +473,13 @@ func (x *NodeService) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { return nil } +func (x *NodeService) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + func (x *NodeService) GetRaftIndex() *pbcommon.RaftIndex { if x != nil { return x.RaftIndex @@ -426,100 +498,110 @@ var file_proto_pbservice_node_proto_rawDesc = []byte{ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x99, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x04, 0x4e, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x4e, 0x6f, 0x64, 0x65, - 0x12, 0x30, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, - 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x06, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x22, 0xaf, 0x03, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, - 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, - 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, - 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, - 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, - 0x65, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, 0x61, - 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, 0x65, - 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x4d, - 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc9, 0x06, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x12, 0x55, 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, 0x62, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, - 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, 0x6f, 0x72, - 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, - 0x68, 0x12, 0x2c, 0x0a, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x57, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x52, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x12, - 0x2c, 0x0a, 0x11, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, 0x72, - 0x72, 0x69, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x33, 0x0a, - 0x05, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, - 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x50, 0x72, 0x6f, - 0x78, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x07, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x3e, 0x0a, 0x1a, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x6c, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x73, 0x53, 0x69, - 0x64, 0x65, 0x63, 0x61, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x73, - 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x12, 0x3e, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x18, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, + 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x31, 0x0a, 0x05, 0x4e, 0x6f, 0x64, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x10, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, + 0x12, 0x23, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, + 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x30, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, + 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0xcb, 0x03, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, + 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, + 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, + 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, + 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, - 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x5d, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, + 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, + 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe5, 0x06, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x55, 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, + 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, 0x6f, + 0x72, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x52, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, + 0x12, 0x2c, 0x0a, 0x11, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x33, + 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, + 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x3e, 0x0a, 0x1a, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x73, 0x53, + 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, + 0x73, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x12, 0x3e, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, 0x61, 0x66, 0x74, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x5d, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2d, 0x5a, + 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -534,43 +616,45 @@ func file_proto_pbservice_node_proto_rawDescGZIP() []byte { return file_proto_pbservice_node_proto_rawDescData } -var file_proto_pbservice_node_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_proto_pbservice_node_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_proto_pbservice_node_proto_goTypes = []interface{}{ - (*CheckServiceNode)(nil), // 0: pbservice.CheckServiceNode - (*Node)(nil), // 1: pbservice.Node - (*NodeService)(nil), // 2: pbservice.NodeService - nil, // 3: pbservice.Node.TaggedAddressesEntry - nil, // 4: pbservice.Node.MetaEntry - nil, // 5: pbservice.NodeService.TaggedAddressesEntry - nil, // 6: pbservice.NodeService.MetaEntry - (*HealthCheck)(nil), // 7: pbservice.HealthCheck - (*pbcommon.RaftIndex)(nil), // 8: common.RaftIndex - (*Weights)(nil), // 9: pbservice.Weights - (*ConnectProxyConfig)(nil), // 10: pbservice.ConnectProxyConfig - (*ServiceConnect)(nil), // 11: pbservice.ServiceConnect - (*pbcommon.EnterpriseMeta)(nil), // 12: common.EnterpriseMeta - (*ServiceAddress)(nil), // 13: pbservice.ServiceAddress + (*IndexedCheckServiceNodes)(nil), // 0: pbservice.IndexedCheckServiceNodes + (*CheckServiceNode)(nil), // 1: pbservice.CheckServiceNode + (*Node)(nil), // 2: pbservice.Node + (*NodeService)(nil), // 3: pbservice.NodeService + nil, // 4: pbservice.Node.TaggedAddressesEntry + nil, // 5: pbservice.Node.MetaEntry + nil, // 6: pbservice.NodeService.TaggedAddressesEntry + nil, // 7: pbservice.NodeService.MetaEntry + (*HealthCheck)(nil), // 8: pbservice.HealthCheck + (*pbcommon.RaftIndex)(nil), // 9: common.RaftIndex + (*Weights)(nil), // 10: pbservice.Weights + (*ConnectProxyConfig)(nil), // 11: pbservice.ConnectProxyConfig + (*ServiceConnect)(nil), // 12: pbservice.ServiceConnect + (*pbcommon.EnterpriseMeta)(nil), // 13: common.EnterpriseMeta + (*ServiceAddress)(nil), // 14: pbservice.ServiceAddress } var file_proto_pbservice_node_proto_depIdxs = []int32{ - 1, // 0: pbservice.CheckServiceNode.Node:type_name -> pbservice.Node - 2, // 1: pbservice.CheckServiceNode.Service:type_name -> pbservice.NodeService - 7, // 2: pbservice.CheckServiceNode.Checks:type_name -> pbservice.HealthCheck - 3, // 3: pbservice.Node.TaggedAddresses:type_name -> pbservice.Node.TaggedAddressesEntry - 4, // 4: pbservice.Node.Meta:type_name -> pbservice.Node.MetaEntry - 8, // 5: pbservice.Node.RaftIndex:type_name -> common.RaftIndex - 5, // 6: pbservice.NodeService.TaggedAddresses:type_name -> pbservice.NodeService.TaggedAddressesEntry - 6, // 7: pbservice.NodeService.Meta:type_name -> pbservice.NodeService.MetaEntry - 9, // 8: pbservice.NodeService.Weights:type_name -> pbservice.Weights - 10, // 9: pbservice.NodeService.Proxy:type_name -> pbservice.ConnectProxyConfig - 11, // 10: pbservice.NodeService.Connect:type_name -> pbservice.ServiceConnect - 12, // 11: pbservice.NodeService.EnterpriseMeta:type_name -> common.EnterpriseMeta - 8, // 12: pbservice.NodeService.RaftIndex:type_name -> common.RaftIndex - 13, // 13: pbservice.NodeService.TaggedAddressesEntry.value:type_name -> pbservice.ServiceAddress - 14, // [14:14] is the sub-list for method output_type - 14, // [14:14] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name + 1, // 0: pbservice.IndexedCheckServiceNodes.Nodes:type_name -> pbservice.CheckServiceNode + 2, // 1: pbservice.CheckServiceNode.Node:type_name -> pbservice.Node + 3, // 2: pbservice.CheckServiceNode.Service:type_name -> pbservice.NodeService + 8, // 3: pbservice.CheckServiceNode.Checks:type_name -> pbservice.HealthCheck + 4, // 4: pbservice.Node.TaggedAddresses:type_name -> pbservice.Node.TaggedAddressesEntry + 5, // 5: pbservice.Node.Meta:type_name -> pbservice.Node.MetaEntry + 9, // 6: pbservice.Node.RaftIndex:type_name -> common.RaftIndex + 6, // 7: pbservice.NodeService.TaggedAddresses:type_name -> pbservice.NodeService.TaggedAddressesEntry + 7, // 8: pbservice.NodeService.Meta:type_name -> pbservice.NodeService.MetaEntry + 10, // 9: pbservice.NodeService.Weights:type_name -> pbservice.Weights + 11, // 10: pbservice.NodeService.Proxy:type_name -> pbservice.ConnectProxyConfig + 12, // 11: pbservice.NodeService.Connect:type_name -> pbservice.ServiceConnect + 13, // 12: pbservice.NodeService.EnterpriseMeta:type_name -> common.EnterpriseMeta + 9, // 13: pbservice.NodeService.RaftIndex:type_name -> common.RaftIndex + 14, // 14: pbservice.NodeService.TaggedAddressesEntry.value:type_name -> pbservice.ServiceAddress + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_proto_pbservice_node_proto_init() } @@ -582,7 +666,7 @@ func file_proto_pbservice_node_proto_init() { file_proto_pbservice_service_proto_init() if !protoimpl.UnsafeEnabled { file_proto_pbservice_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckServiceNode); i { + switch v := v.(*IndexedCheckServiceNodes); i { case 0: return &v.state case 1: @@ -594,7 +678,7 @@ func file_proto_pbservice_node_proto_init() { } } file_proto_pbservice_node_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Node); i { + switch v := v.(*CheckServiceNode); i { case 0: return &v.state case 1: @@ -606,6 +690,18 @@ func file_proto_pbservice_node_proto_init() { } } file_proto_pbservice_node_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Node); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_node_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*NodeService); i { case 0: return &v.state @@ -624,7 +720,7 @@ func file_proto_pbservice_node_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proto_pbservice_node_proto_rawDesc, NumEnums: 0, - NumMessages: 7, + NumMessages: 8, NumExtensions: 0, NumServices: 0, }, diff --git a/proto/pbservice/node.proto b/proto/pbservice/node.proto index 4b8389352..eb55ac2e3 100644 --- a/proto/pbservice/node.proto +++ b/proto/pbservice/node.proto @@ -8,6 +8,12 @@ import "proto/pbcommon/common.proto"; import "proto/pbservice/healthcheck.proto"; import "proto/pbservice/service.proto"; +// IndexedCheckServiceNodes is used to return multiple instances for a given service. +message IndexedCheckServiceNodes { + uint64 Index = 1; + repeated CheckServiceNode Nodes = 2; +} + // CheckServiceNode is used to provide the node, its service // definition, as well as a HealthCheck that is associated. message CheckServiceNode { @@ -29,6 +35,7 @@ message Node { string Node = 2; string Partition = 8; + string PeerName = 9; string Address = 3; string Datacenter = 4; map TaggedAddresses = 5; @@ -105,6 +112,8 @@ message NodeService { // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs common.EnterpriseMeta EnterpriseMeta = 16; + string PeerName = 18; + // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs common.RaftIndex RaftIndex = 14; } diff --git a/proto/pbstatus/status.pb.binary.go b/proto/pbstatus/status.pb.binary.go new file mode 100644 index 000000000..f1dcfbf66 --- /dev/null +++ b/proto/pbstatus/status.pb.binary.go @@ -0,0 +1,18 @@ +// Code generated by protoc-gen-go-binary. DO NOT EDIT. +// source: proto/pbstatus/status.proto + +package pbstatus + +import ( + "github.com/golang/protobuf/proto" +) + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *Status) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *Status) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto/pbstatus/status.pb.go b/proto/pbstatus/status.pb.go new file mode 100644 index 000000000..0b56e62b6 --- /dev/null +++ b/proto/pbstatus/status.pb.go @@ -0,0 +1,204 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 +// source: proto/pbstatus/status.proto + +package pbstatus + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbstatus_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbstatus_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_proto_pbstatus_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_proto_pbstatus_status_proto protoreflect.FileDescriptor + +var file_proto_pbstatus_status_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x54, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, + 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_pbstatus_status_proto_rawDescOnce sync.Once + file_proto_pbstatus_status_proto_rawDescData = file_proto_pbstatus_status_proto_rawDesc +) + +func file_proto_pbstatus_status_proto_rawDescGZIP() []byte { + file_proto_pbstatus_status_proto_rawDescOnce.Do(func() { + file_proto_pbstatus_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbstatus_status_proto_rawDescData) + }) + return file_proto_pbstatus_status_proto_rawDescData +} + +var file_proto_pbstatus_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_proto_pbstatus_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: status.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_proto_pbstatus_status_proto_depIdxs = []int32{ + 1, // 0: status.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_proto_pbstatus_status_proto_init() } +func file_proto_pbstatus_status_proto_init() { + if File_proto_pbstatus_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbstatus_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbstatus_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_pbstatus_status_proto_goTypes, + DependencyIndexes: file_proto_pbstatus_status_proto_depIdxs, + MessageInfos: file_proto_pbstatus_status_proto_msgTypes, + }.Build() + File_proto_pbstatus_status_proto = out.File + file_proto_pbstatus_status_proto_rawDesc = nil + file_proto_pbstatus_status_proto_goTypes = nil + file_proto_pbstatus_status_proto_depIdxs = nil +} diff --git a/proto/pbstatus/status.proto b/proto/pbstatus/status.proto new file mode 100644 index 000000000..eefc29f62 --- /dev/null +++ b/proto/pbstatus/status.proto @@ -0,0 +1,47 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package status; +option go_package = "github.com/hashicorp/consul/proto/pbstatus"; + +import "google/protobuf/any.proto"; + +option cc_enable_arenas = true; +option java_multiple_files = true; +option java_outer_classname = "StatusProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +message Status { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated google.protobuf.Any details = 3; +} \ No newline at end of file diff --git a/proto/pbsubscribe/subscribe.pb.go b/proto/pbsubscribe/subscribe.pb.go index 851991ed0..00245d024 100644 --- a/proto/pbsubscribe/subscribe.pb.go +++ b/proto/pbsubscribe/subscribe.pb.go @@ -170,6 +170,8 @@ type SubscribeRequest struct { // // Partition is an enterprise-only feature. Partition string `protobuf:"bytes,7,opt,name=Partition,proto3" json:"Partition,omitempty"` + // TODO(peering): docs + PeerName string `protobuf:"bytes,8,opt,name=PeerName,proto3" json:"PeerName,omitempty"` } func (x *SubscribeRequest) Reset() { @@ -253,6 +255,13 @@ func (x *SubscribeRequest) GetPartition() string { return "" } +func (x *SubscribeRequest) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + // Event describes a streaming update on a subscription. Events are used both to // describe the current "snapshot" of the result as well as ongoing mutations to // that snapshot. @@ -501,7 +510,7 @@ var file_proto_pbsubscribe_subscribe_proto_rawDesc = []byte{ 0x69, 0x62, 0x65, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x1a, 0x1a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd4, 0x01, 0x0a, 0x10, 0x53, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf0, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, @@ -515,51 +524,53 @@ var file_proto_pbsubscribe_subscribe_proto_rawDesc = []byte{ 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x85, 0x02, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x12, 0x26, 0x0a, 0x0d, 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x45, 0x6e, 0x64, 0x4f, - 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x32, 0x0a, 0x13, 0x4e, 0x65, 0x77, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x4e, 0x65, 0x77, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x37, 0x0a, - 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x0a, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, - 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x42, 0x09, - 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x36, 0x0a, 0x0a, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x28, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x73, 0x22, 0x84, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x02, 0x4f, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, - 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x52, 0x02, 0x4f, 0x70, 0x12, - 0x47, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, - 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x2a, 0x41, 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, - 0x63, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x11, - 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x10, - 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, 0x02, 0x2a, 0x29, 0x0a, 0x09, 0x43, - 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x10, 0x01, 0x32, 0x59, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3e, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x1b, - 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, - 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x62, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x85, 0x02, + 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x26, 0x0a, + 0x0d, 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x32, 0x0a, 0x13, 0x4e, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x4e, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x37, 0x0a, 0x0a, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x46, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x50, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x36, 0x0a, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x28, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x84, 0x01, + 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x02, 0x4f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x43, 0x61, + 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x52, 0x02, 0x4f, 0x70, 0x12, 0x47, 0x0a, 0x10, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4e, 0x6f, 0x64, 0x65, 0x2a, 0x41, 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x0b, 0x0a, + 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x10, 0x01, 0x12, 0x18, 0x0a, + 0x14, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, 0x02, 0x2a, 0x29, 0x0a, 0x09, 0x43, 0x61, 0x74, 0x61, 0x6c, + 0x6f, 0x67, 0x4f, 0x70, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x10, 0x01, 0x32, 0x59, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, + 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x1b, 0x2e, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, 0x01, 0x42, 0x2f, 0x5a, + 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/pbsubscribe/subscribe.proto b/proto/pbsubscribe/subscribe.proto index a860b874b..be98a6f7d 100644 --- a/proto/pbsubscribe/subscribe.proto +++ b/proto/pbsubscribe/subscribe.proto @@ -84,6 +84,9 @@ message SubscribeRequest { // // Partition is an enterprise-only feature. string Partition = 7; + + // TODO(peering): docs + string PeerName = 8; } // Event describes a streaming update on a subscription. Events are used both to diff --git a/testrpc/wait.go b/testrpc/wait.go index 230f2f38a..d6b72749e 100644 --- a/testrpc/wait.go +++ b/testrpc/wait.go @@ -12,6 +12,9 @@ import ( type rpcFn func(string, interface{}, interface{}) error // WaitForLeader ensures we have a leader and a node registration. +// +// Most uses of this would be better served in the agent/consul package by +// using waitForLeaderEstablishment() instead. func WaitForLeader(t *testing.T, rpc rpcFn, dc string, options ...waitOption) { t.Helper()