From f6958894f503fb88eeb834111903b673a5425963 Mon Sep 17 00:00:00 2001 From: Krastin Krastev Date: Tue, 22 Mar 2022 20:36:59 +0100 Subject: [PATCH 001/107] docs: fix a trailing comma in JSON body removing a comma after a last element in JSON body --- website/content/docs/connect/registration/sidecar-service.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/connect/registration/sidecar-service.mdx b/website/content/docs/connect/registration/sidecar-service.mdx index c08bd1791..a795d61e1 100644 --- a/website/content/docs/connect/registration/sidecar-service.mdx +++ b/website/content/docs/connect/registration/sidecar-service.mdx @@ -50,7 +50,7 @@ definitions: { "service": { "name": "web", - "port": 8080, + "port": 8080 } } { From 40402339e803864e8a2bf01343ddf3ee9c168a9d Mon Sep 17 00:00:00 2001 From: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Date: Wed, 8 Jun 2022 17:50:56 -0400 Subject: [PATCH 002/107] docs: correct Vault CA multiple namespace support --- website/content/docs/connect/ca/vault.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/connect/ca/vault.mdx b/website/content/docs/connect/ca/vault.mdx index 02294c2c2..7600d40c7 100644 --- a/website/content/docs/connect/ca/vault.mdx +++ b/website/content/docs/connect/ca/vault.mdx @@ -137,7 +137,7 @@ The configuration options are listed below. that authorized it. - `RootPKINamespace` / `root_pki_namespace` (`string: `) - The absolute namespace - that the `RootPKIPath` is in. Setting this overrides the `Namespace` option for the `RootPKIPath`. Introduced in 1.12.1 + that the `RootPKIPath` is in. Setting this overrides the `Namespace` option for the `RootPKIPath`. Introduced in 1.12.3. - `IntermediatePKIPath` / `intermediate_pki_path` (`string: `) - The path to a PKI secrets engine for the generated intermediate certificate. @@ -149,7 +149,7 @@ The configuration options are listed below. datacenter must specify a unique `intermediate_pki_path`. - `IntermediatePKINamespace` / `intermediate_pki_namespace` (`string: `) - The absolute namespace - that the `IntermediatePKIPath` is in. Setting this overrides the `Namespace` option for the `IntermediatePKIPath`. Introduced in 1.12.1 + that the `IntermediatePKIPath` is in. Setting this overrides the `Namespace` option for the `IntermediatePKIPath`. Introduced in 1.12.3. - `CAFile` / `ca_file` (`string: ""`) - Specifies an optional path to the CA certificate used for Vault communication. If unspecified, this will fallback From 9c0f2478b93d5f1f754c35eae3dd4d0a15d38cce Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Fri, 1 Jul 2022 06:24:53 -0700 Subject: [PATCH 003/107] docs: add Envoy upgrade step to std upgrade docs --- website/content/docs/upgrading/index.mdx | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/website/content/docs/upgrading/index.mdx b/website/content/docs/upgrading/index.mdx index 08a79a8a0..e9a2a892c 100644 --- a/website/content/docs/upgrading/index.mdx +++ b/website/content/docs/upgrading/index.mdx @@ -34,15 +34,23 @@ Consul is A, and version B is released. there are no compatibility issues that will affect your workload. If there are plan accordingly before continuing. -2. On each server, install version B of Consul. +2. On each Consul server agent, install version B of Consul. -3. One server at a time, shut down version A via `consul leave` and restart with version B. Wait until - the server is healthy and has rejoined the cluster before moving on to the - next server. +3. One Consul server agent at a time, shut down version A via `consul leave` and restart with version B. Wait until + the server agent is healthy and has rejoined the cluster before moving on to the + next server agent. -4. Once all the servers are upgraded, begin a rollout of clients following +4. Once all the server agents are upgraded, begin a rollout of client agents following the same process. + -> **Upgrade Envoy proxies:** If a client agent has associated Envoy proxies (e.g., sidecars, gateways), + install a [compatible Envoy version](/docs/connect/proxies/envoy#supported-versions) + for Consul version B. + After stopping client agent version A, + stop its associated Envoy proxies. + After restarting the client agent with version B, + restart its associated Envoy proxies with the compatible Envoy version. + 5. Done! You are now running the latest Consul agent. You can verify this by running `consul members` to make sure all members have the latest build and highest protocol version. From 599f5e2207691e987cd07e6ecef159edc96b7b29 Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Tue, 12 Jul 2022 11:34:14 +0100 Subject: [PATCH 004/107] proxycfg-glue: server-local compiled discovery chain data source This is the OSS portion of enterprise PR 2236. Adds a local blocking query-based implementation of the proxycfg.CompiledDiscoveryChain interface. --- agent/agent.go | 2 + agent/proxycfg-glue/config_entry.go | 1 + agent/proxycfg-glue/discovery_chain.go | 95 ++++++++++++ agent/proxycfg-glue/discovery_chain_test.go | 114 +++++++++++++++ agent/proxycfg-glue/glue.go | 9 +- agent/proxycfg-glue/helpers_test.go | 34 +++++ .../proxycfg-glue/intention_upstreams_test.go | 25 +--- agent/proxycfg-glue/intentions_ent_test.go | 49 +++---- agent/proxycfg-glue/intentions_test.go | 137 +++++++++--------- 9 files changed, 345 insertions(+), 121 deletions(-) create mode 100644 agent/proxycfg-glue/discovery_chain.go create mode 100644 agent/proxycfg-glue/discovery_chain_test.go create mode 100644 agent/proxycfg-glue/helpers_test.go diff --git a/agent/agent.go b/agent/agent.go index 765c1ab91..c1381d0d2 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -4237,6 +4237,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { if server, ok := a.delegate.(*consul.Server); ok { deps := proxycfgglue.ServerDataSourceDeps{ + Datacenter: a.config.Datacenter, EventPublisher: a.baseDeps.EventPublisher, ViewStore: a.baseDeps.ViewStore, Logger: a.logger.Named("proxycfg.server-data-sources"), @@ -4245,6 +4246,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { } sources.ConfigEntry = proxycfgglue.ServerConfigEntry(deps) sources.ConfigEntryList = proxycfgglue.ServerConfigEntryList(deps) + sources.CompiledDiscoveryChain = proxycfgglue.ServerCompiledDiscoveryChain(deps, proxycfgglue.CacheCompiledDiscoveryChain(a.cache)) sources.Intentions = proxycfgglue.ServerIntentions(deps) sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps) } diff --git a/agent/proxycfg-glue/config_entry.go b/agent/proxycfg-glue/config_entry.go index 8f85d5e13..138909369 100644 --- a/agent/proxycfg-glue/config_entry.go +++ b/agent/proxycfg-glue/config_entry.go @@ -19,6 +19,7 @@ import ( // ServerDataSourceDeps contains the dependencies needed for sourcing data from // server-local sources (e.g. materialized views). type ServerDataSourceDeps struct { + Datacenter string ViewStore *submatview.Store EventPublisher *stream.EventPublisher Logger hclog.Logger diff --git a/agent/proxycfg-glue/discovery_chain.go b/agent/proxycfg-glue/discovery_chain.go new file mode 100644 index 000000000..78e1f1653 --- /dev/null +++ b/agent/proxycfg-glue/discovery_chain.go @@ -0,0 +1,95 @@ +package proxycfgglue + +import ( + "context" + + "github.com/hashicorp/go-memdb" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/consul/discoverychain" + "github.com/hashicorp/consul/agent/consul/watch" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" +) + +// CacheCompiledDiscoveryChain satisfies the proxycfg.CompiledDiscoveryChain +// interface by sourcing data from the agent cache. +func CacheCompiledDiscoveryChain(c *cache.Cache) proxycfg.CompiledDiscoveryChain { + return &cacheProxyDataSource[*structs.DiscoveryChainRequest]{c, cachetype.CompiledDiscoveryChainName} +} + +// ServerCompiledDiscoveryChain satisfies the proxycfg.CompiledDiscoveryChain +// interface by sourcing data from a blocking query against the server's state +// store. +// +// Requests for services in remote datacenters will be delegated to the given +// remoteSource (i.e. CacheCompiledDiscoveryChain). +func ServerCompiledDiscoveryChain(deps ServerDataSourceDeps, remoteSource proxycfg.CompiledDiscoveryChain) proxycfg.CompiledDiscoveryChain { + return &serverCompiledDiscoveryChain{deps, remoteSource} +} + +type serverCompiledDiscoveryChain struct { + deps ServerDataSourceDeps + remoteSource proxycfg.CompiledDiscoveryChain +} + +func (s serverCompiledDiscoveryChain) Notify(ctx context.Context, req *structs.DiscoveryChainRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + if req.Datacenter != s.deps.Datacenter { + return s.remoteSource.Notify(ctx, req, correlationID, ch) + } + + entMeta := req.GetEnterpriseMeta() + + evalDC := req.EvaluateInDatacenter + if evalDC == "" { + evalDC = s.deps.Datacenter + } + + compileReq := discoverychain.CompileRequest{ + ServiceName: req.Name, + EvaluateInNamespace: entMeta.NamespaceOrDefault(), + EvaluateInPartition: entMeta.PartitionOrDefault(), + EvaluateInDatacenter: evalDC, + OverrideMeshGateway: req.OverrideMeshGateway, + OverrideProtocol: req.OverrideProtocol, + OverrideConnectTimeout: req.OverrideConnectTimeout, + } + + return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore, + func(ws memdb.WatchSet, store Store) (uint64, *structs.DiscoveryChainResponse, error) { + var authzContext acl.AuthorizerContext + authz, err := s.deps.ACLResolver.ResolveTokenAndDefaultMeta(req.Token, req.GetEnterpriseMeta(), &authzContext) + if err != nil { + return 0, nil, err + } + if err := authz.ToAllowAuthorizer().ServiceReadAllowed(req.Name, &authzContext); err != nil { + // TODO(agentless): the agent cache handles acl.IsErrNotFound specially to + // prevent endlessly retrying if an ACL token is deleted. We should probably + // do this in watch.ServerLocalNotify too. + return 0, nil, err + } + + index, chain, entries, err := store.ServiceDiscoveryChain(ws, req.Name, entMeta, compileReq) + if err != nil { + return 0, nil, err + } + + rsp := &structs.DiscoveryChainResponse{ + Chain: chain, + QueryMeta: structs.QueryMeta{ + Backend: structs.QueryBackendBlocking, + Index: index, + }, + } + + // TODO(boxofrad): Check with @mkeeler that this is the correct thing to do. + if entries.IsEmpty() { + return index, rsp, watch.ErrorNotFound + } + return index, rsp, nil + }, + dispatchBlockingQueryUpdate[*structs.DiscoveryChainResponse](ch), + ) +} diff --git a/agent/proxycfg-glue/discovery_chain_test.go b/agent/proxycfg-glue/discovery_chain_test.go new file mode 100644 index 000000000..7207ffaf1 --- /dev/null +++ b/agent/proxycfg-glue/discovery_chain_test.go @@ -0,0 +1,114 @@ +package proxycfgglue + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" +) + +func TestServerCompiledDiscoveryChain(t *testing.T) { + t.Run("remote queries are delegated to the remote source", func(t *testing.T) { + var ( + ctx = context.Background() + req = &structs.DiscoveryChainRequest{Datacenter: "dc2"} + correlationID = "correlation-id" + ch = make(chan<- proxycfg.UpdateEvent) + result = errors.New("KABOOM") + ) + + remoteSource := newMockCompiledDiscoveryChain(t) + remoteSource.On("Notify", ctx, req, correlationID, ch).Return(result) + + dataSource := ServerCompiledDiscoveryChain(ServerDataSourceDeps{Datacenter: "dc1"}, remoteSource) + err := dataSource.Notify(ctx, req, correlationID, ch) + require.Equal(t, result, err) + }) + + t.Run("local queries are served from the state store", func(t *testing.T) { + const ( + serviceName = "web" + datacenter = "dc1" + index = 123 + ) + + store := state.NewStateStore(nil) + require.NoError(t, store.CASetConfig(index, &structs.CAConfiguration{ClusterID: "cluster-id"})) + require.NoError(t, store.EnsureConfigEntry(index, &structs.ServiceConfigEntry{ + Name: serviceName, + Kind: structs.ServiceDefaults, + })) + + req := &structs.DiscoveryChainRequest{ + Name: serviceName, + Datacenter: datacenter, + } + + resolver := newStaticResolver( + policyAuthorizer(t, fmt.Sprintf(`service "%s" { policy = "read" }`, serviceName)), + ) + + dataSource := ServerCompiledDiscoveryChain(ServerDataSourceDeps{ + ACLResolver: resolver, + Datacenter: datacenter, + GetStore: func() Store { return store }, + }, nil) + + eventCh := make(chan proxycfg.UpdateEvent) + err := dataSource.Notify(context.Background(), req, "", eventCh) + require.NoError(t, err) + + // Check we get an event with the initial state. + result := getEventResult[*structs.DiscoveryChainResponse](t, eventCh) + require.NotNil(t, result.Chain) + + // Change the protocol to HTTP and check we get a recompiled chain. + require.NoError(t, store.EnsureConfigEntry(index+1, &structs.ServiceConfigEntry{ + Name: serviceName, + Kind: structs.ServiceDefaults, + Protocol: "http", + })) + + result = getEventResult[*structs.DiscoveryChainResponse](t, eventCh) + require.NotNil(t, result.Chain) + require.Equal(t, "http", result.Chain.Protocol) + + // Revoke access to the service. + resolver.SwapAuthorizer(acl.DenyAll()) + + // Write another config entry. + require.NoError(t, store.EnsureConfigEntry(index+2, &structs.ServiceConfigEntry{ + Name: serviceName, + Kind: structs.ServiceDefaults, + MaxInboundConnections: 1, + })) + + // Should no longer receive events for this service. + expectNoEvent(t, eventCh) + }) +} + +func newMockCompiledDiscoveryChain(t *testing.T) *mockCompiledDiscoveryChain { + mock := &mockCompiledDiscoveryChain{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +type mockCompiledDiscoveryChain struct { + mock.Mock +} + +func (m *mockCompiledDiscoveryChain) Notify(ctx context.Context, req *structs.DiscoveryChainRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + return m.Called(ctx, req, correlationID, ch).Error(0) +} diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index 0fb0b7752..f06df2827 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -8,6 +8,8 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/configentry" + "github.com/hashicorp/consul/agent/consul/discoverychain" "github.com/hashicorp/consul/agent/consul/watch" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/rpcclient/health" @@ -20,6 +22,7 @@ type Store interface { watch.StateStore IntentionTopology(ws memdb.WatchSet, target structs.ServiceName, downstreams bool, defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType) (uint64, structs.ServiceList, error) + ServiceDiscoveryChain(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, req discoverychain.CompileRequest) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error) } // CacheCARoots satisfies the proxycfg.CARoots interface by sourcing data from @@ -28,12 +31,6 @@ func CacheCARoots(c *cache.Cache) proxycfg.CARoots { return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.ConnectCARootName} } -// CacheCompiledDiscoveryChain satisfies the proxycfg.CompiledDiscoveryChain -// interface by sourcing data from the agent cache. -func CacheCompiledDiscoveryChain(c *cache.Cache) proxycfg.CompiledDiscoveryChain { - return &cacheProxyDataSource[*structs.DiscoveryChainRequest]{c, cachetype.CompiledDiscoveryChainName} -} - // CacheConfigEntry satisfies the proxycfg.ConfigEntry interface by sourcing // data from the agent cache. func CacheConfigEntry(c *cache.Cache) proxycfg.ConfigEntry { diff --git a/agent/proxycfg-glue/helpers_test.go b/agent/proxycfg-glue/helpers_test.go new file mode 100644 index 000000000..5528ed4a5 --- /dev/null +++ b/agent/proxycfg-glue/helpers_test.go @@ -0,0 +1,34 @@ +package proxycfgglue + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/proxycfg" +) + +func getEventResult[ResultType any](t *testing.T, eventCh <-chan proxycfg.UpdateEvent) ResultType { + t.Helper() + + select { + case event := <-eventCh: + require.NoError(t, event.Err, "event should not have an error") + result, ok := event.Result.(ResultType) + require.Truef(t, ok, "unexpected result type: %T", event.Result) + return result + case <-time.After(100 * time.Millisecond): + t.Fatal("timeout waiting for event") + } + + panic("this should never be reached") +} + +func expectNoEvent(t *testing.T, eventCh <-chan proxycfg.UpdateEvent) { + select { + case <-eventCh: + t.Fatal("expected no event") + case <-time.After(100 * time.Millisecond): + } +} diff --git a/agent/proxycfg-glue/intention_upstreams_test.go b/agent/proxycfg-glue/intention_upstreams_test.go index 22d596109..22846f24d 100644 --- a/agent/proxycfg-glue/intention_upstreams_test.go +++ b/agent/proxycfg-glue/intention_upstreams_test.go @@ -3,7 +3,6 @@ package proxycfgglue import ( "context" "testing" - "time" "github.com/stretchr/testify/require" @@ -62,7 +61,7 @@ func TestServerIntentionUpstreams(t *testing.T) { authz := policyAuthorizer(t, `service "db" { policy = "read" }`) dataSource := ServerIntentionUpstreams(ServerDataSourceDeps{ - ACLResolver: staticResolver{authz}, + ACLResolver: newStaticResolver(authz), GetStore: func() Store { return store }, }) @@ -70,28 +69,16 @@ func TestServerIntentionUpstreams(t *testing.T) { err := dataSource.Notify(ctx, &structs.ServiceSpecificRequest{ServiceName: serviceName}, "", ch) require.NoError(t, err) - select { - case event := <-ch: - result, ok := event.Result.(*structs.IndexedServiceList) - require.Truef(t, ok, "expected IndexedServiceList, got: %T", event.Result) - require.Len(t, result.Services, 0) - case <-time.After(100 * time.Millisecond): - t.Fatal("timeout waiting for event") - } + result := getEventResult[*structs.IndexedServiceList](t, ch) + require.Len(t, result.Services, 0) // Create an allow intention for the db service. This should *not* be filtered // out because the ACL token *does* have read access on it. createIntention("db") - select { - case event := <-ch: - result, ok := event.Result.(*structs.IndexedServiceList) - require.Truef(t, ok, "expected IndexedServiceList, got: %T", event.Result) - require.Len(t, result.Services, 1) - require.Equal(t, "db", result.Services[0].Name) - case <-time.After(100 * time.Millisecond): - t.Fatal("timeout waiting for event") - } + result = getEventResult[*structs.IndexedServiceList](t, ch) + require.Len(t, result.Services, 1) + require.Equal(t, "db", result.Services[0].Name) } func disableLegacyIntentions(t *testing.T, store *state.Store) { diff --git a/agent/proxycfg-glue/intentions_ent_test.go b/agent/proxycfg-glue/intentions_ent_test.go index 66f3d62cb..00eb37285 100644 --- a/agent/proxycfg-glue/intentions_ent_test.go +++ b/agent/proxycfg-glue/intentions_ent_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/submatview" "github.com/hashicorp/consul/proto/pbsubscribe" + "github.com/hashicorp/consul/sdk/testutil" ) func TestServerIntentions_Enterprise(t *testing.T) { @@ -39,7 +40,7 @@ func TestServerIntentions_Enterprise(t *testing.T) { go publisher.Run(ctx) intentions := ServerIntentions(ServerDataSourceDeps{ - ACLResolver: staticResolver{acl.ManageAll()}, + ACLResolver: newStaticResolver(acl.ManageAll()), ViewStore: store, EventPublisher: publisher, Logger: logger, @@ -51,37 +52,29 @@ func TestServerIntentions_Enterprise(t *testing.T) { ServiceName: serviceName, }, "", eventCh)) - // Wait for the initial snapshots. - select { - case <-eventCh: - case <-time.After(100 * time.Millisecond): - t.Fatal("timeout waiting for event") - } + testutil.RunStep(t, "initial snapshot", func(t *testing.T) { + getEventResult[structs.Intentions](t, eventCh) + }) - // Publish a namespace wildcard intention. - publisher.Publish([]stream.Event{ - { - Topic: pbsubscribe.Topic_ServiceIntentions, - Index: index + 1, - Payload: state.EventPayloadConfigEntry{ - Op: pbsubscribe.ConfigEntryUpdate_Upsert, - Value: &structs.ServiceIntentionsConfigEntry{ - Name: structs.WildcardSpecifier, - EnterpriseMeta: *acl.WildcardEnterpriseMeta(), - Sources: []*structs.SourceIntention{ - {Name: structs.WildcardSpecifier, Action: structs.IntentionActionAllow, Precedence: 1}, + testutil.RunStep(t, "publish a namespace-wildcard partition", func(t *testing.T) { + publisher.Publish([]stream.Event{ + { + Topic: pbsubscribe.Topic_ServiceIntentions, + Index: index + 1, + Payload: state.EventPayloadConfigEntry{ + Op: pbsubscribe.ConfigEntryUpdate_Upsert, + Value: &structs.ServiceIntentionsConfigEntry{ + Name: structs.WildcardSpecifier, + EnterpriseMeta: *acl.WildcardEnterpriseMeta(), + Sources: []*structs.SourceIntention{ + {Name: structs.WildcardSpecifier, Action: structs.IntentionActionAllow, Precedence: 1}, + }, }, }, }, - }, - }) + }) - select { - case event := <-eventCh: - result, ok := event.Result.(structs.Intentions) - require.Truef(t, ok, "expected Intentions, got: %T", event.Result) + result := getEventResult[structs.Intentions](t, eventCh) require.Len(t, result, 1) - case <-time.After(100 * time.Millisecond): - t.Fatal("timeout waiting for event") - } + }) } diff --git a/agent/proxycfg-glue/intentions_test.go b/agent/proxycfg-glue/intentions_test.go index 0284068bd..3597109f7 100644 --- a/agent/proxycfg-glue/intentions_test.go +++ b/agent/proxycfg-glue/intentions_test.go @@ -2,6 +2,7 @@ package proxycfgglue import ( "context" + "sync" "testing" "time" @@ -16,6 +17,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/submatview" "github.com/hashicorp/consul/proto/pbsubscribe" + "github.com/hashicorp/consul/sdk/testutil" ) func TestServerIntentions(t *testing.T) { @@ -39,7 +41,7 @@ func TestServerIntentions(t *testing.T) { go publisher.Run(ctx) intentions := ServerIntentions(ServerDataSourceDeps{ - ACLResolver: staticResolver{acl.ManageAll()}, + ACLResolver: newStaticResolver(acl.ManageAll()), ViewStore: store, EventPublisher: publisher, Logger: logger, @@ -51,64 +53,53 @@ func TestServerIntentions(t *testing.T) { EnterpriseMeta: *acl.DefaultEnterpriseMeta(), }, "", eventCh)) - // Wait for the initial snapshots. - select { - case <-eventCh: - case <-time.After(100 * time.Millisecond): - t.Fatal("timeout waiting for event") - } + testutil.RunStep(t, "initial snapshot", func(t *testing.T) { + getEventResult[structs.Intentions](t, eventCh) + }) - // Publish an explicit intention on the service. - publisher.Publish([]stream.Event{ - { - Topic: pbsubscribe.Topic_ServiceIntentions, - Index: index + 1, - Payload: state.EventPayloadConfigEntry{ - Op: pbsubscribe.ConfigEntryUpdate_Upsert, - Value: &structs.ServiceIntentionsConfigEntry{ - Name: serviceName, - Sources: []*structs.SourceIntention{ - {Name: "db", Action: structs.IntentionActionAllow, Precedence: 1}, + testutil.RunStep(t, "publishing an explicit intention", func(t *testing.T) { + publisher.Publish([]stream.Event{ + { + Topic: pbsubscribe.Topic_ServiceIntentions, + Index: index + 1, + Payload: state.EventPayloadConfigEntry{ + Op: pbsubscribe.ConfigEntryUpdate_Upsert, + Value: &structs.ServiceIntentionsConfigEntry{ + Name: serviceName, + Sources: []*structs.SourceIntention{ + {Name: "db", Action: structs.IntentionActionAllow, Precedence: 1}, + }, }, }, }, - }, - }) + }) - select { - case event := <-eventCh: - result, ok := event.Result.(structs.Intentions) - require.Truef(t, ok, "expected Intentions, got: %T", event.Result) + result := getEventResult[structs.Intentions](t, eventCh) require.Len(t, result, 1) intention := result[0] require.Equal(t, intention.DestinationName, serviceName) require.Equal(t, intention.SourceName, "db") - case <-time.After(100 * time.Millisecond): - t.Fatal("timeout waiting for event") - } + }) - // Publish a wildcard intention. - publisher.Publish([]stream.Event{ - { - Topic: pbsubscribe.Topic_ServiceIntentions, - Index: index + 2, - Payload: state.EventPayloadConfigEntry{ - Op: pbsubscribe.ConfigEntryUpdate_Upsert, - Value: &structs.ServiceIntentionsConfigEntry{ - Name: structs.WildcardSpecifier, - Sources: []*structs.SourceIntention{ - {Name: structs.WildcardSpecifier, Action: structs.IntentionActionAllow, Precedence: 0}, + testutil.RunStep(t, "publishing a wildcard intention", func(t *testing.T) { + publisher.Publish([]stream.Event{ + { + Topic: pbsubscribe.Topic_ServiceIntentions, + Index: index + 2, + Payload: state.EventPayloadConfigEntry{ + Op: pbsubscribe.ConfigEntryUpdate_Upsert, + Value: &structs.ServiceIntentionsConfigEntry{ + Name: structs.WildcardSpecifier, + Sources: []*structs.SourceIntention{ + {Name: structs.WildcardSpecifier, Action: structs.IntentionActionAllow, Precedence: 0}, + }, }, }, }, - }, - }) + }) - select { - case event := <-eventCh: - result, ok := event.Result.(structs.Intentions) - require.Truef(t, ok, "expected Intentions, got: %T", event.Result) + result := getEventResult[structs.Intentions](t, eventCh) require.Len(t, result, 2) a := result[0] @@ -118,38 +109,48 @@ func TestServerIntentions(t *testing.T) { b := result[1] require.Equal(t, b.DestinationName, structs.WildcardSpecifier) require.Equal(t, b.SourceName, structs.WildcardSpecifier) - case <-time.After(100 * time.Millisecond): - t.Fatal("timeout waiting for event") - } - - // Publish a delete event and observe the intention is removed from the results. - publisher.Publish([]stream.Event{ - { - Topic: pbsubscribe.Topic_ServiceIntentions, - Index: index + 3, - Payload: state.EventPayloadConfigEntry{ - Op: pbsubscribe.ConfigEntryUpdate_Delete, - Value: &structs.ServiceIntentionsConfigEntry{ - Name: serviceName, - }, - }, - }, }) - select { - case event := <-eventCh: - result, ok := event.Result.(structs.Intentions) - require.Truef(t, ok, "expected Intentions, got: %T", event.Result) + testutil.RunStep(t, "publishing a delete event", func(t *testing.T) { + publisher.Publish([]stream.Event{ + { + Topic: pbsubscribe.Topic_ServiceIntentions, + Index: index + 3, + Payload: state.EventPayloadConfigEntry{ + Op: pbsubscribe.ConfigEntryUpdate_Delete, + Value: &structs.ServiceIntentionsConfigEntry{ + Name: serviceName, + }, + }, + }, + }) + + result := getEventResult[structs.Intentions](t, eventCh) require.Len(t, result, 1) - case <-time.After(100 * time.Millisecond): - t.Fatal("timeout waiting for event") - } + }) + } type staticResolver struct { + mu sync.Mutex authorizer acl.Authorizer } -func (r staticResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (resolver.Result, error) { +func newStaticResolver(authz acl.Authorizer) *staticResolver { + resolver := new(staticResolver) + resolver.SwapAuthorizer(authz) + return resolver +} + +func (r *staticResolver) SwapAuthorizer(authz acl.Authorizer) { + r.mu.Lock() + defer r.mu.Unlock() + + r.authorizer = authz +} + +func (r *staticResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (resolver.Result, error) { + r.mu.Lock() + defer r.mu.Unlock() return resolver.Result{Authorizer: r.authorizer}, nil } From 688dfe31381c2ea74cb86a7811061db2a55a461e Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Tue, 12 Jul 2022 11:35:52 +0100 Subject: [PATCH 005/107] proxycfg-glue: server-local implementation of `ServiceList` This is the OSS portion of enterprise PR 2242. This PR introduces a server-local implementation of the proxycfg.ServiceList interface, backed by streaming events and a local materializer. --- agent/agent.go | 1 + agent/consul/fsm/fsm.go | 7 + agent/consul/state/catalog_events.go | 93 ++++++ agent/consul/state/catalog_events_test.go | 112 +++++++ agent/consul/state/events.go | 6 + agent/consul/state/memdb.go | 2 + agent/proxycfg-glue/config_entry.go | 23 +- agent/proxycfg-glue/glue.go | 6 - agent/proxycfg-glue/service_list.go | 124 +++++++ agent/proxycfg-glue/service_list_test.go | 140 ++++++++ proto/pbsubscribe/subscribe.pb.binary.go | 10 + proto/pbsubscribe/subscribe.pb.go | 380 +++++++++++++++------- proto/pbsubscribe/subscribe.proto | 19 ++ 13 files changed, 784 insertions(+), 139 deletions(-) create mode 100644 agent/proxycfg-glue/service_list.go create mode 100644 agent/proxycfg-glue/service_list_test.go diff --git a/agent/agent.go b/agent/agent.go index c1381d0d2..7aa42e3d9 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -4249,6 +4249,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { sources.CompiledDiscoveryChain = proxycfgglue.ServerCompiledDiscoveryChain(deps, proxycfgglue.CacheCompiledDiscoveryChain(a.cache)) sources.Intentions = proxycfgglue.ServerIntentions(deps) sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps) + sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache)) } a.fillEnterpriseProxyDataSources(&sources) diff --git a/agent/consul/fsm/fsm.go b/agent/consul/fsm/fsm.go index 8fa617b45..432e64631 100644 --- a/agent/consul/fsm/fsm.go +++ b/agent/consul/fsm/fsm.go @@ -324,4 +324,11 @@ func (c *FSM) registerStreamSnapshotHandlers() { if err != nil { panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err)) } + + err = c.deps.Publisher.RegisterHandler(state.EventTopicServiceList, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + return c.State().ServiceListSnapshot(req, buf) + }, true) + if err != nil { + panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err)) + } } diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index b4b498b98..06d6414af 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbcommon" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" ) @@ -71,6 +72,39 @@ func (e EventPayloadCheckServiceNode) ToSubscriptionEvent(idx uint64) *pbsubscri } } +// EventPayloadServiceListUpdate is used as the Payload for a stream.Event when +// services (not service instances) are registered/deregistered. These events +// are used to materialize the list of services in a datacenter. +type EventPayloadServiceListUpdate struct { + Op pbsubscribe.CatalogOp + + Name string + EnterpriseMeta acl.EnterpriseMeta + PeerName string +} + +func (e *EventPayloadServiceListUpdate) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + return &pbsubscribe.Event{ + Index: idx, + Payload: &pbsubscribe.Event_Service{ + Service: &pbsubscribe.ServiceListUpdate{ + Op: e.Op, + Name: e.Name, + EnterpriseMeta: pbcommon.NewEnterpriseMetaFromStructs(e.EnterpriseMeta), + PeerName: e.PeerName, + }, + }, + } +} + +func (e *EventPayloadServiceListUpdate) Subject() stream.Subject { return stream.SubjectNone } + +func (e *EventPayloadServiceListUpdate) HasReadPermission(authz acl.Authorizer) bool { + var authzContext acl.AuthorizerContext + e.EnterpriseMeta.FillAuthzContext(&authzContext) + return authz.ServiceRead(e.Name, &authzContext) == acl.Allow +} + // serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot // of stream.Events that describe the current state of a service health query. func (s *Store) ServiceHealthSnapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) { @@ -156,6 +190,65 @@ type nodeTuple struct { var serviceChangeIndirect = serviceChange{changeType: changeIndirect} +// ServiceListUpdateEventsFromChanges returns events representing changes to +// the list of services from the given set of state store changes. +func ServiceListUpdateEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { + var events []stream.Event + for _, change := range changes.Changes { + if change.Table != tableKindServiceNames { + continue + } + + kindName := changeObject(change).(*KindServiceName) + + // TODO(peering): make this peer-aware. + payload := &EventPayloadServiceListUpdate{ + Name: kindName.Service.Name, + EnterpriseMeta: kindName.Service.EnterpriseMeta, + } + + if change.Deleted() { + payload.Op = pbsubscribe.CatalogOp_Deregister + } else { + payload.Op = pbsubscribe.CatalogOp_Register + } + + events = append(events, stream.Event{ + Topic: EventTopicServiceList, + Index: changes.Index, + Payload: payload, + }) + } + return events, nil +} + +// ServiceListSnapshot is a stream.SnapshotFunc that returns a snapshot of +// all service names. +func (s *Store) ServiceListSnapshot(_ stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + index, names, err := s.ServiceNamesOfKind(nil, "") + if err != nil { + return 0, err + } + + if l := len(names); l > 0 { + events := make([]stream.Event, l) + for idx, name := range names { + events[idx] = stream.Event{ + Topic: EventTopicServiceList, + Index: index, + Payload: &EventPayloadServiceListUpdate{ + Op: pbsubscribe.CatalogOp_Register, + Name: name.Service.Name, + EnterpriseMeta: name.Service.EnterpriseMeta, + }, + } + } + buf.Append(events) + } + + return index, nil +} + // ServiceHealthEventsFromChanges returns all the service and Connect health // events that should be emitted given a set of changes to the state store. func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { diff --git a/agent/consul/state/catalog_events_test.go b/agent/consul/state/catalog_events_test.go index 129b834b8..ef398ed6c 100644 --- a/agent/consul/state/catalog_events_test.go +++ b/agent/consul/state/catalog_events_test.go @@ -8,6 +8,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" @@ -2543,3 +2544,114 @@ func newPayloadCheckServiceNodeWithOverride( overrideNamespace: overrideNamespace, } } + +func TestServiceListUpdateSnapshot(t *testing.T) { + const index uint64 = 123 + + store := testStateStore(t) + require.NoError(t, store.EnsureRegistration(index, testServiceRegistration(t, "db"))) + + buf := &snapshotAppender{} + idx, err := store.ServiceListSnapshot(stream.SubscribeRequest{Subject: stream.SubjectNone}, buf) + require.NoError(t, err) + require.NotZero(t, idx) + + require.Len(t, buf.events, 1) + require.Len(t, buf.events[0], 1) + + payload := buf.events[0][0].Payload.(*EventPayloadServiceListUpdate) + require.Equal(t, pbsubscribe.CatalogOp_Register, payload.Op) + require.Equal(t, "db", payload.Name) +} + +func TestServiceListUpdateEventsFromChanges(t *testing.T) { + const changeIndex = 123 + + testCases := map[string]struct { + setup func(*Store, *txn) error + mutate func(*Store, *txn) error + events []stream.Event + }{ + "register new service": { + mutate: func(store *Store, tx *txn) error { + return store.ensureRegistrationTxn(tx, changeIndex, false, testServiceRegistration(t, "db"), false) + }, + events: []stream.Event{ + { + Topic: EventTopicServiceList, + Index: changeIndex, + Payload: &EventPayloadServiceListUpdate{ + Op: pbsubscribe.CatalogOp_Register, + Name: "db", + EnterpriseMeta: *acl.DefaultEnterpriseMeta(), + }, + }, + }, + }, + "service already registered": { + setup: func(store *Store, tx *txn) error { + return store.ensureRegistrationTxn(tx, changeIndex, false, testServiceRegistration(t, "db"), false) + }, + mutate: func(store *Store, tx *txn) error { + return store.ensureRegistrationTxn(tx, changeIndex, false, testServiceRegistration(t, "db"), false) + }, + events: nil, + }, + "deregister last instance of service": { + setup: func(store *Store, tx *txn) error { + return store.ensureRegistrationTxn(tx, changeIndex, false, testServiceRegistration(t, "db"), false) + }, + mutate: func(store *Store, tx *txn) error { + return store.deleteServiceTxn(tx, tx.Index, "node1", "db", nil, "") + }, + events: []stream.Event{ + { + Topic: EventTopicServiceList, + Index: changeIndex, + Payload: &EventPayloadServiceListUpdate{ + Op: pbsubscribe.CatalogOp_Deregister, + Name: "db", + EnterpriseMeta: *acl.DefaultEnterpriseMeta(), + }, + }, + }, + }, + "deregister (not the last) instance of service": { + setup: func(store *Store, tx *txn) error { + if err := store.ensureRegistrationTxn(tx, changeIndex, false, testServiceRegistration(t, "db"), false); err != nil { + return err + } + if err := store.ensureRegistrationTxn(tx, changeIndex, false, testServiceRegistration(t, "db", regNode2), false); err != nil { + return err + } + return nil + }, + mutate: func(store *Store, tx *txn) error { + return store.deleteServiceTxn(tx, tx.Index, "node1", "db", nil, "") + }, + events: nil, + }, + } + for desc, tc := range testCases { + t.Run(desc, func(t *testing.T) { + store := testStateStore(t) + + if tc.setup != nil { + tx := store.db.WriteTxn(0) + require.NoError(t, tc.setup(store, tx)) + require.NoError(t, tx.Commit()) + } + + tx := store.db.WriteTxn(0) + t.Cleanup(tx.Abort) + + if tc.mutate != nil { + require.NoError(t, tc.mutate(store, tx)) + } + + events, err := ServiceListUpdateEventsFromChanges(tx, Changes{Index: changeIndex, Changes: tx.Changes()}) + require.NoError(t, err) + require.Equal(t, tc.events, events) + }) + } +} diff --git a/agent/consul/state/events.go b/agent/consul/state/events.go index e59624511..2e74c44c9 100644 --- a/agent/consul/state/events.go +++ b/agent/consul/state/events.go @@ -43,6 +43,12 @@ func PBToStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta acl.E Name: named.Key, EnterpriseMeta: &entMeta, } + case EventTopicServiceList: + // Events on this topic are published to SubjectNone, but rather than + // exposing this in (and further complicating) the streaming API we rely + // on consumers passing WildcardSubject instead, which is functionally the + // same for this purpose. + return nil, fmt.Errorf("topic %s can only be consumed using WildcardSubject", EventTopicServiceList) default: return nil, fmt.Errorf("cannot construct subject for topic %s", req.Topic) } diff --git a/agent/consul/state/memdb.go b/agent/consul/state/memdb.go index 95a291061..751622977 100644 --- a/agent/consul/state/memdb.go +++ b/agent/consul/state/memdb.go @@ -184,6 +184,7 @@ var ( EventTopicServiceResolver = pbsubscribe.Topic_ServiceResolver EventTopicIngressGateway = pbsubscribe.Topic_IngressGateway EventTopicServiceIntentions = pbsubscribe.Topic_ServiceIntentions + EventTopicServiceList = pbsubscribe.Topic_ServiceList ) func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { @@ -192,6 +193,7 @@ func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { aclChangeUnsubscribeEvent, caRootsChangeEvents, ServiceHealthEventsFromChanges, + ServiceListUpdateEventsFromChanges, ConfigEntryEventsFromChanges, // TODO: add other table handlers here. } diff --git a/agent/proxycfg-glue/config_entry.go b/agent/proxycfg-glue/config_entry.go index 138909369..1f6fbf245 100644 --- a/agent/proxycfg-glue/config_entry.go +++ b/agent/proxycfg-glue/config_entry.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/submatview" + "github.com/hashicorp/consul/proto/pbcommon" "github.com/hashicorp/consul/proto/pbconfigentry" "github.com/hashicorp/consul/proto/pbsubscribe" ) @@ -194,7 +195,7 @@ func (v *configEntryListView) Result(index uint64) any { } func (v *configEntryListView) Update(events []*pbsubscribe.Event) error { - for _, event := range v.filterByEnterpriseMeta(events) { + for _, event := range filterByEnterpriseMeta(events, v.entMeta) { update := event.GetConfigEntry() configEntry := pbconfigentry.ConfigEntryToStructs(update.ConfigEntry) name := structs.NewServiceName(configEntry.GetName(), configEntry.GetEnterpriseMeta()).String() @@ -213,22 +214,26 @@ func (v *configEntryListView) Update(events []*pbsubscribe.Event) error { // don't match the request's enterprise meta - this is necessary because when // subscribing to a topic with SubjectWildcard we'll get events for resources // in all partitions and namespaces. -func (v *configEntryListView) filterByEnterpriseMeta(events []*pbsubscribe.Event) []*pbsubscribe.Event { - partition := v.entMeta.PartitionOrDefault() - namespace := v.entMeta.NamespaceOrDefault() +func filterByEnterpriseMeta(events []*pbsubscribe.Event, entMeta acl.EnterpriseMeta) []*pbsubscribe.Event { + partition := entMeta.PartitionOrDefault() + namespace := entMeta.NamespaceOrDefault() filtered := make([]*pbsubscribe.Event, 0, len(events)) for _, event := range events { - configEntry := event.GetConfigEntry().GetConfigEntry() - if configEntry == nil { + var eventEntMeta *pbcommon.EnterpriseMeta + switch payload := event.Payload.(type) { + case *pbsubscribe.Event_ConfigEntry: + eventEntMeta = payload.ConfigEntry.ConfigEntry.GetEnterpriseMeta() + case *pbsubscribe.Event_Service: + eventEntMeta = payload.Service.GetEnterpriseMeta() + default: continue } - entMeta := configEntry.GetEnterpriseMeta() - if partition != acl.WildcardName && !acl.EqualPartitions(partition, entMeta.GetPartition()) { + if partition != acl.WildcardName && !acl.EqualPartitions(partition, eventEntMeta.GetPartition()) { continue } - if namespace != acl.WildcardName && !acl.EqualNamespaces(namespace, entMeta.GetNamespace()) { + if namespace != acl.WildcardName && !acl.EqualNamespaces(namespace, eventEntMeta.GetNamespace()) { continue } diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index f06df2827..739cc7f64 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -103,12 +103,6 @@ func CacheResolvedServiceConfig(c *cache.Cache) proxycfg.ResolvedServiceConfig { return &cacheProxyDataSource[*structs.ServiceConfigRequest]{c, cachetype.ResolvedServiceConfigName} } -// CacheServiceList satisfies the proxycfg.ServiceList interface by sourcing -// data from the agent cache. -func CacheServiceList(c *cache.Cache) proxycfg.ServiceList { - return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.CatalogServiceListName} -} - // CacheTrustBundle satisfies the proxycfg.TrustBundle interface by sourcing // data from the agent cache. func CacheTrustBundle(c *cache.Cache) proxycfg.TrustBundle { diff --git a/agent/proxycfg-glue/service_list.go b/agent/proxycfg-glue/service_list.go new file mode 100644 index 000000000..14dc13f31 --- /dev/null +++ b/agent/proxycfg-glue/service_list.go @@ -0,0 +1,124 @@ +package proxycfgglue + +import ( + "context" + "sort" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/submatview" + "github.com/hashicorp/consul/proto/pbcommon" + "github.com/hashicorp/consul/proto/pbsubscribe" +) + +// CacheServiceList satisfies the proxycfg.ServiceList interface by sourcing +// data from the agent cache. +func CacheServiceList(c *cache.Cache) proxycfg.ServiceList { + return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.CatalogServiceListName} +} + +func ServerServiceList(deps ServerDataSourceDeps, remoteSource proxycfg.ServiceList) proxycfg.ServiceList { + return &serverServiceList{deps, remoteSource} +} + +type serverServiceList struct { + deps ServerDataSourceDeps + remoteSource proxycfg.ServiceList +} + +func (s *serverServiceList) Notify(ctx context.Context, req *structs.DCSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + if req.Datacenter != s.deps.Datacenter { + return s.remoteSource.Notify(ctx, req, correlationID, ch) + } + return s.deps.ViewStore.NotifyCallback( + ctx, + &serviceListRequest{s.deps, req}, + correlationID, + dispatchCacheUpdate(ch), + ) +} + +type serviceListRequest struct { + deps ServerDataSourceDeps + req *structs.DCSpecificRequest +} + +func (r *serviceListRequest) Request(index uint64) *pbsubscribe.SubscribeRequest { + return &pbsubscribe.SubscribeRequest{ + Topic: pbsubscribe.Topic_ServiceList, + Subject: &pbsubscribe.SubscribeRequest_WildcardSubject{WildcardSubject: true}, + Index: index, + Datacenter: r.req.Datacenter, + Token: r.req.QueryOptions.Token, + } +} + +func (r *serviceListRequest) CacheInfo() cache.RequestInfo { return r.req.CacheInfo() } + +func (r *serviceListRequest) NewMaterializer() (submatview.Materializer, error) { + return submatview.NewLocalMaterializer(submatview.LocalMaterializerDeps{ + Backend: r.deps.EventPublisher, + ACLResolver: r.deps.ACLResolver, + Deps: submatview.Deps{ + View: newServiceListView(r.req.EnterpriseMeta), + Logger: r.deps.Logger, + Request: r.Request, + }, + }), nil +} + +func (serviceListRequest) Type() string { return "proxycfgglue.ServiceList" } + +func newServiceListView(entMeta acl.EnterpriseMeta) *serviceListView { + view := &serviceListView{entMeta: entMeta} + view.Reset() + return view +} + +type serviceListView struct { + entMeta acl.EnterpriseMeta + state map[string]structs.ServiceName +} + +func (v *serviceListView) Reset() { v.state = make(map[string]structs.ServiceName) } + +func (v *serviceListView) Update(events []*pbsubscribe.Event) error { + for _, event := range filterByEnterpriseMeta(events, v.entMeta) { + update := event.GetService() + if update == nil { + continue + } + + var entMeta acl.EnterpriseMeta + pbcommon.EnterpriseMetaToStructs(update.EnterpriseMeta, &entMeta) + name := structs.NewServiceName(update.Name, &entMeta) + + switch update.Op { + case pbsubscribe.CatalogOp_Register: + v.state[name.String()] = name + case pbsubscribe.CatalogOp_Deregister: + delete(v.state, name.String()) + } + } + return nil +} + +func (v *serviceListView) Result(index uint64) any { + serviceList := make(structs.ServiceList, 0, len(v.state)) + for _, name := range v.state { + serviceList = append(serviceList, name) + } + sort.Slice(serviceList, func(a, b int) bool { + return serviceList[a].String() < serviceList[b].String() + }) + return &structs.IndexedServiceList{ + Services: serviceList, + QueryMeta: structs.QueryMeta{ + Backend: structs.QueryBackendStreaming, + Index: index, + }, + } +} diff --git a/agent/proxycfg-glue/service_list_test.go b/agent/proxycfg-glue/service_list_test.go new file mode 100644 index 000000000..eedb211b3 --- /dev/null +++ b/agent/proxycfg-glue/service_list_test.go @@ -0,0 +1,140 @@ +package proxycfgglue + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/submatview" + "github.com/hashicorp/consul/proto/pbsubscribe" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestServerServiceList(t *testing.T) { + t.Run("remote queries are delegated to the remote source", func(t *testing.T) { + var ( + ctx = context.Background() + req = &structs.DCSpecificRequest{Datacenter: "dc2"} + correlationID = "correlation-id" + ch = make(chan<- proxycfg.UpdateEvent) + result = errors.New("KABOOM") + ) + + remoteSource := newMockServiceList(t) + remoteSource.On("Notify", ctx, req, correlationID, ch).Return(result) + + dataSource := ServerServiceList(ServerDataSourceDeps{Datacenter: "dc1"}, remoteSource) + err := dataSource.Notify(ctx, req, correlationID, ch) + require.Equal(t, result, err) + }) + + t.Run("local queries are served from a materialized view", func(t *testing.T) { + const ( + index uint64 = 123 + datacenter = "dc1" + ) + + logger := testutil.Logger(t) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + store := submatview.NewStore(logger) + go store.Run(ctx) + + publisher := stream.NewEventPublisher(10 * time.Second) + publisher.RegisterHandler(pbsubscribe.Topic_ServiceList, + func(stream.SubscribeRequest, stream.SnapshotAppender) (uint64, error) { return index, nil }, + true) + go publisher.Run(ctx) + + dataSource := ServerServiceList(ServerDataSourceDeps{ + Datacenter: datacenter, + ACLResolver: newStaticResolver(acl.ManageAll()), + ViewStore: store, + EventPublisher: publisher, + Logger: logger, + }, nil) + + eventCh := make(chan proxycfg.UpdateEvent) + require.NoError(t, dataSource.Notify(ctx, &structs.DCSpecificRequest{Datacenter: datacenter}, "", eventCh)) + + testutil.RunStep(t, "initial state", func(t *testing.T) { + result := getEventResult[*structs.IndexedServiceList](t, eventCh) + require.Empty(t, result.Services) + }) + + testutil.RunStep(t, "register services", func(t *testing.T) { + publisher.Publish([]stream.Event{ + { + Index: index + 1, + Topic: pbsubscribe.Topic_ServiceList, + Payload: &state.EventPayloadServiceListUpdate{ + Op: pbsubscribe.CatalogOp_Register, + Name: "web", + }, + }, + { + Index: index + 1, + Topic: pbsubscribe.Topic_ServiceList, + Payload: &state.EventPayloadServiceListUpdate{ + Op: pbsubscribe.CatalogOp_Register, + Name: "db", + }, + }, + }) + + result := getEventResult[*structs.IndexedServiceList](t, eventCh) + require.Len(t, result.Services, 2) + + var names []string + for _, service := range result.Services { + names = append(names, service.Name) + } + require.ElementsMatch(t, names, []string{"web", "db"}) + }) + + testutil.RunStep(t, "deregister service", func(t *testing.T) { + publisher.Publish([]stream.Event{ + { + Index: index + 2, + Topic: pbsubscribe.Topic_ServiceList, + Payload: &state.EventPayloadServiceListUpdate{ + Op: pbsubscribe.CatalogOp_Deregister, + Name: "web", + }, + }, + }) + + result := getEventResult[*structs.IndexedServiceList](t, eventCh) + require.Len(t, result.Services, 1) + require.Equal(t, "db", result.Services[0].Name) + }) + }) +} + +func newMockServiceList(t *testing.T) *mockServiceList { + mock := &mockServiceList{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +type mockServiceList struct { + mock.Mock +} + +func (m *mockServiceList) Notify(ctx context.Context, req *structs.DCSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + return m.Called(ctx, req, correlationID, ch).Error(0) +} diff --git a/proto/pbsubscribe/subscribe.pb.binary.go b/proto/pbsubscribe/subscribe.pb.binary.go index 7e0e74509..43b7c8f59 100644 --- a/proto/pbsubscribe/subscribe.pb.binary.go +++ b/proto/pbsubscribe/subscribe.pb.binary.go @@ -66,3 +66,13 @@ func (msg *ConfigEntryUpdate) MarshalBinary() ([]byte, error) { func (msg *ConfigEntryUpdate) UnmarshalBinary(b []byte) error { return proto.Unmarshal(b, msg) } + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ServiceListUpdate) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ServiceListUpdate) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto/pbsubscribe/subscribe.pb.go b/proto/pbsubscribe/subscribe.pb.go index deb43bad4..5d209df9a 100644 --- a/proto/pbsubscribe/subscribe.pb.go +++ b/proto/pbsubscribe/subscribe.pb.go @@ -16,6 +16,7 @@ package pbsubscribe import ( + pbcommon "github.com/hashicorp/consul/proto/pbcommon" pbconfigentry "github.com/hashicorp/consul/proto/pbconfigentry" pbservice "github.com/hashicorp/consul/proto/pbservice" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -49,6 +50,12 @@ const ( Topic_IngressGateway Topic = 5 // ServiceIntentions topic contains events for changes to service intentions. Topic_ServiceIntentions Topic = 6 + // ServiceList topic contains events about services (not service instances) + // getting registered/deregistered. It can be used to materialize a list of + // the services in the given datacenter. + // + // Note: WildcardSubject is the only supported Subject on this topic. + Topic_ServiceList Topic = 7 ) // Enum value maps for Topic. @@ -61,6 +68,7 @@ var ( 4: "ServiceResolver", 5: "IngressGateway", 6: "ServiceIntentions", + 7: "ServiceList", } Topic_value = map[string]int32{ "Unknown": 0, @@ -70,6 +78,7 @@ var ( "ServiceResolver": 4, "IngressGateway": 5, "ServiceIntentions": 6, + "ServiceList": 7, } ) @@ -467,6 +476,7 @@ type Event struct { // *Event_EventBatch // *Event_ServiceHealth // *Event_ConfigEntry + // *Event_Service Payload isEvent_Payload `protobuf_oneof:"Payload"` } @@ -551,6 +561,13 @@ func (x *Event) GetConfigEntry() *ConfigEntryUpdate { return nil } +func (x *Event) GetService() *ServiceListUpdate { + if x, ok := x.GetPayload().(*Event_Service); ok { + return x.Service + } + return nil +} + type isEvent_Payload interface { isEvent_Payload() } @@ -589,6 +606,11 @@ type Event_ConfigEntry struct { ConfigEntry *ConfigEntryUpdate `protobuf:"bytes,11,opt,name=ConfigEntry,proto3,oneof"` } +type Event_Service struct { + // Service is used for ServiceList topic. + Service *ServiceListUpdate `protobuf:"bytes,12,opt,name=Service,proto3,oneof"` +} + func (*Event_EndOfSnapshot) isEvent_Payload() {} func (*Event_NewSnapshotToFollow) isEvent_Payload() {} @@ -599,6 +621,8 @@ func (*Event_ServiceHealth) isEvent_Payload() {} func (*Event_ConfigEntry) isEvent_Payload() {} +func (*Event_Service) isEvent_Payload() {} + type EventBatch struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -756,121 +780,211 @@ func (x *ConfigEntryUpdate) GetConfigEntry() *pbconfigentry.ConfigEntry { return nil } +type ServiceListUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Op CatalogOp `protobuf:"varint,1,opt,name=Op,proto3,enum=subscribe.CatalogOp" json:"Op,omitempty"` + Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` + EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,3,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` + PeerName string `protobuf:"bytes,4,opt,name=PeerName,proto3" json:"PeerName,omitempty"` +} + +func (x *ServiceListUpdate) Reset() { + *x = ServiceListUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbsubscribe_subscribe_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceListUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceListUpdate) ProtoMessage() {} + +func (x *ServiceListUpdate) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbsubscribe_subscribe_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceListUpdate.ProtoReflect.Descriptor instead. +func (*ServiceListUpdate) Descriptor() ([]byte, []int) { + return file_proto_pbsubscribe_subscribe_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceListUpdate) GetOp() CatalogOp { + if x != nil { + return x.Op + } + return CatalogOp_Register +} + +func (x *ServiceListUpdate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ServiceListUpdate) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { + if x != nil { + return x.EnterpriseMeta + } + return nil +} + +func (x *ServiceListUpdate) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + var File_proto_pbsubscribe_subscribe_proto protoreflect.FileDescriptor var file_proto_pbsubscribe_subscribe_proto_rawDesc = []byte{ 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x1a, 0x26, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, - 0x74, 0x72, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x78, 0x0a, 0x0c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x1a, 0x1b, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x78, 0x0a, 0x0c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4b, 0x65, + 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xe6, 0x02, 0x0a, 0x10, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, + 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, + 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, + 0x6e, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xe6, 0x02, 0x0a, - 0x10, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x26, 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x52, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, - 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, - 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x2a, 0x0a, 0x0f, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0f, 0x57, 0x69, 0x6c, 0x64, - 0x63, 0x61, 0x72, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x53, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0xc7, 0x02, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x26, 0x0a, 0x0d, 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, - 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x32, 0x0a, - 0x13, 0x4e, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, - 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x4e, 0x65, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x0f, + 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0f, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, + 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x53, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x22, 0x81, 0x03, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x12, 0x26, 0x0a, 0x0d, 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x45, 0x6e, 0x64, + 0x4f, 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x32, 0x0a, 0x13, 0x4e, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, - 0x77, 0x12, 0x37, 0x0a, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, - 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x0a, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, 0x0d, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x48, 0x00, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x12, 0x40, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x42, 0x09, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, - 0x36, 0x0a, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x28, 0x0a, - 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, - 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, - 0x24, 0x0a, 0x02, 0x4f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, - 0x70, 0x52, 0x02, 0x4f, 0x70, 0x12, 0x5f, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x22, 0xc4, 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x35, 0x0a, 0x02, - 0x4f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x70, 0x52, - 0x02, 0x4f, 0x70, 0x12, 0x54, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x22, 0x0a, 0x08, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x4f, 0x70, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x10, 0x01, 0x2a, 0x91, 0x01, - 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, - 0x77, 0x6e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, - 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x10, - 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, - 0x6c, 0x76, 0x65, 0x72, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, - 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x10, 0x05, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x10, - 0x06, 0x2a, 0x29, 0x0a, 0x09, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x12, 0x0c, - 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, - 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x10, 0x01, 0x32, 0x59, 0x0a, 0x17, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x12, 0x1b, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, - 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, 0x01, 0x42, 0x92, 0x01, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x2e, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x42, 0x0e, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2d, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, - 0x62, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0xa2, 0x02, 0x03, 0x53, 0x58, 0x58, - 0xaa, 0x02, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0xca, 0x02, 0x09, 0x53, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0xe2, 0x02, 0x15, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0xea, 0x02, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x4e, 0x65, 0x77, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x37, + 0x0a, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x0a, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x00, + 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, + 0x40, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x38, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x48, 0x00, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x36, 0x0a, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x28, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x9c, + 0x01, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x02, 0x4f, 0x70, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x43, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x52, 0x02, 0x4f, 0x70, 0x12, 0x5f, 0x0a, 0x10, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x10, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x22, 0xc4, 0x01, + 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x12, 0x35, 0x0a, 0x02, 0x4f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x25, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4f, 0x70, 0x52, 0x02, 0x4f, 0x70, 0x12, 0x54, 0x0a, 0x0b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x32, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x22, 0x22, 0x0a, 0x08, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x70, 0x12, 0x0a, 0x0a, 0x06, + 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x10, 0x01, 0x22, 0xc3, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x02, 0x4f, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x52, 0x02, 0x4f, 0x70, + 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x58, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, + 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, + 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1a, + 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x2a, 0xa2, 0x01, 0x0a, 0x05, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, 0x02, 0x12, 0x0e, + 0x0a, 0x0a, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x10, 0x03, 0x12, 0x13, + 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x10, 0x05, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x10, 0x06, 0x12, 0x0f, + 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x10, 0x07, 0x2a, + 0x29, 0x0a, 0x09, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x12, 0x0c, 0x0a, 0x08, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x65, + 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x10, 0x01, 0x32, 0x59, 0x0a, 0x17, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x12, 0x1b, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x22, 0x00, 0x30, 0x01, 0x42, 0x92, 0x01, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x2e, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x42, 0x0e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, + 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0xa2, 0x02, 0x03, 0x53, 0x58, 0x58, 0xaa, 0x02, + 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0xca, 0x02, 0x09, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0xe2, 0x02, 0x15, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -886,7 +1000,7 @@ func file_proto_pbsubscribe_subscribe_proto_rawDescGZIP() []byte { } var file_proto_pbsubscribe_subscribe_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_proto_pbsubscribe_subscribe_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_proto_pbsubscribe_subscribe_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_proto_pbsubscribe_subscribe_proto_goTypes = []interface{}{ (Topic)(0), // 0: subscribe.Topic (CatalogOp)(0), // 1: subscribe.CatalogOp @@ -897,8 +1011,10 @@ var file_proto_pbsubscribe_subscribe_proto_goTypes = []interface{}{ (*EventBatch)(nil), // 6: subscribe.EventBatch (*ServiceHealthUpdate)(nil), // 7: subscribe.ServiceHealthUpdate (*ConfigEntryUpdate)(nil), // 8: subscribe.ConfigEntryUpdate - (*pbservice.CheckServiceNode)(nil), // 9: hashicorp.consul.internal.service.CheckServiceNode - (*pbconfigentry.ConfigEntry)(nil), // 10: hashicorp.consul.internal.configentry.ConfigEntry + (*ServiceListUpdate)(nil), // 9: subscribe.ServiceListUpdate + (*pbservice.CheckServiceNode)(nil), // 10: hashicorp.consul.internal.service.CheckServiceNode + (*pbconfigentry.ConfigEntry)(nil), // 11: hashicorp.consul.internal.configentry.ConfigEntry + (*pbcommon.EnterpriseMeta)(nil), // 12: hashicorp.consul.internal.common.EnterpriseMeta } var file_proto_pbsubscribe_subscribe_proto_depIdxs = []int32{ 0, // 0: subscribe.SubscribeRequest.Topic:type_name -> subscribe.Topic @@ -906,18 +1022,21 @@ var file_proto_pbsubscribe_subscribe_proto_depIdxs = []int32{ 6, // 2: subscribe.Event.EventBatch:type_name -> subscribe.EventBatch 7, // 3: subscribe.Event.ServiceHealth:type_name -> subscribe.ServiceHealthUpdate 8, // 4: subscribe.Event.ConfigEntry:type_name -> subscribe.ConfigEntryUpdate - 5, // 5: subscribe.EventBatch.Events:type_name -> subscribe.Event - 1, // 6: subscribe.ServiceHealthUpdate.Op:type_name -> subscribe.CatalogOp - 9, // 7: subscribe.ServiceHealthUpdate.CheckServiceNode:type_name -> hashicorp.consul.internal.service.CheckServiceNode - 2, // 8: subscribe.ConfigEntryUpdate.Op:type_name -> subscribe.ConfigEntryUpdate.UpdateOp - 10, // 9: subscribe.ConfigEntryUpdate.ConfigEntry:type_name -> hashicorp.consul.internal.configentry.ConfigEntry - 4, // 10: subscribe.StateChangeSubscription.Subscribe:input_type -> subscribe.SubscribeRequest - 5, // 11: subscribe.StateChangeSubscription.Subscribe:output_type -> subscribe.Event - 11, // [11:12] is the sub-list for method output_type - 10, // [10:11] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 9, // 5: subscribe.Event.Service:type_name -> subscribe.ServiceListUpdate + 5, // 6: subscribe.EventBatch.Events:type_name -> subscribe.Event + 1, // 7: subscribe.ServiceHealthUpdate.Op:type_name -> subscribe.CatalogOp + 10, // 8: subscribe.ServiceHealthUpdate.CheckServiceNode:type_name -> hashicorp.consul.internal.service.CheckServiceNode + 2, // 9: subscribe.ConfigEntryUpdate.Op:type_name -> subscribe.ConfigEntryUpdate.UpdateOp + 11, // 10: subscribe.ConfigEntryUpdate.ConfigEntry:type_name -> hashicorp.consul.internal.configentry.ConfigEntry + 1, // 11: subscribe.ServiceListUpdate.Op:type_name -> subscribe.CatalogOp + 12, // 12: subscribe.ServiceListUpdate.EnterpriseMeta:type_name -> hashicorp.consul.internal.common.EnterpriseMeta + 4, // 13: subscribe.StateChangeSubscription.Subscribe:input_type -> subscribe.SubscribeRequest + 5, // 14: subscribe.StateChangeSubscription.Subscribe:output_type -> subscribe.Event + 14, // [14:15] is the sub-list for method output_type + 13, // [13:14] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name } func init() { file_proto_pbsubscribe_subscribe_proto_init() } @@ -998,6 +1117,18 @@ func file_proto_pbsubscribe_subscribe_proto_init() { return nil } } + file_proto_pbsubscribe_subscribe_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceListUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_proto_pbsubscribe_subscribe_proto_msgTypes[1].OneofWrappers = []interface{}{ (*SubscribeRequest_WildcardSubject)(nil), @@ -1009,6 +1140,7 @@ func file_proto_pbsubscribe_subscribe_proto_init() { (*Event_EventBatch)(nil), (*Event_ServiceHealth)(nil), (*Event_ConfigEntry)(nil), + (*Event_Service)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -1016,7 +1148,7 @@ func file_proto_pbsubscribe_subscribe_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proto_pbsubscribe_subscribe_proto_rawDesc, NumEnums: 3, - NumMessages: 6, + NumMessages: 7, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/pbsubscribe/subscribe.proto b/proto/pbsubscribe/subscribe.proto index 4f1e092c4..e68a0cecc 100644 --- a/proto/pbsubscribe/subscribe.proto +++ b/proto/pbsubscribe/subscribe.proto @@ -10,6 +10,7 @@ syntax = "proto3"; // compatibility. package subscribe; +import "proto/pbcommon/common.proto"; import "proto/pbconfigentry/config_entry.proto"; import "proto/pbservice/node.proto"; @@ -62,6 +63,13 @@ enum Topic { // ServiceIntentions topic contains events for changes to service intentions. ServiceIntentions = 6; + + // ServiceList topic contains events about services (not service instances) + // getting registered/deregistered. It can be used to materialize a list of + // the services in the given datacenter. + // + // Note: WildcardSubject is the only supported Subject on this topic. + ServiceList = 7; } message NamedSubject { @@ -171,6 +179,9 @@ message Event { // ConfigEntry is used for config entry topics (e.g. MeshConfig). ConfigEntryUpdate ConfigEntry = 11; + + // Service is used for ServiceList topic. + ServiceListUpdate Service = 12; } } @@ -197,3 +208,11 @@ message ConfigEntryUpdate { UpdateOp Op = 1; hashicorp.consul.internal.configentry.ConfigEntry ConfigEntry = 2; } + +message ServiceListUpdate { + CatalogOp Op = 1; + + string Name = 2; + hashicorp.consul.internal.common.EnterpriseMeta EnterpriseMeta = 3; + string PeerName = 4; +} From 70f29942f4d3642f1a6fcf53d367b8bce754bcd7 Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Tue, 12 Jul 2022 11:37:48 +0100 Subject: [PATCH 006/107] proxycfg-glue: server-local implementation of the `Health` interface This is the OSS portion of enterprise PR 2249. This PR introduces an implementation of the proxycfg.Health interface based on a local materialized view of the health events. It reuses the view and request machinery from agent/rpcclient/health, which made it super straightforward. --- agent/agent.go | 3 +- agent/proxycfg-glue/glue.go | 20 ---- agent/proxycfg-glue/health.go | 82 +++++++++++++++ agent/proxycfg-glue/health_test.go | 149 ++++++++++++++++++++++++++++ agent/rpcclient/health/health.go | 4 +- agent/rpcclient/health/view.go | 16 +-- agent/rpcclient/health/view_test.go | 4 +- 7 files changed, 245 insertions(+), 33 deletions(-) create mode 100644 agent/proxycfg-glue/health.go create mode 100644 agent/proxycfg-glue/health_test.go diff --git a/agent/agent.go b/agent/agent.go index 7aa42e3d9..5aa591f14 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -4220,7 +4220,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { Datacenters: proxycfgglue.CacheDatacenters(a.cache), FederationStateListMeshGateways: proxycfgglue.CacheFederationStateListMeshGateways(a.cache), GatewayServices: proxycfgglue.CacheGatewayServices(a.cache), - Health: proxycfgglue.Health(a.rpcClientHealth), + Health: proxycfgglue.ClientHealth(a.rpcClientHealth), HTTPChecks: proxycfgglue.CacheHTTPChecks(a.cache), Intentions: proxycfgglue.CacheIntentions(a.cache), IntentionUpstreams: proxycfgglue.CacheIntentionUpstreams(a.cache), @@ -4247,6 +4247,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { sources.ConfigEntry = proxycfgglue.ServerConfigEntry(deps) sources.ConfigEntryList = proxycfgglue.ServerConfigEntryList(deps) sources.CompiledDiscoveryChain = proxycfgglue.ServerCompiledDiscoveryChain(deps, proxycfgglue.CacheCompiledDiscoveryChain(a.cache)) + sources.Health = proxycfgglue.ServerHealth(deps, proxycfgglue.ClientHealth(a.rpcClientHealth)) sources.Intentions = proxycfgglue.ServerIntentions(deps) sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps) sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache)) diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index 739cc7f64..e7924010a 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/consul/agent/consul/discoverychain" "github.com/hashicorp/consul/agent/consul/watch" "github.com/hashicorp/consul/agent/proxycfg" - "github.com/hashicorp/consul/agent/rpcclient/health" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto/pbpeering" ) @@ -139,25 +138,6 @@ func (c *cacheProxyDataSource[ReqType]) Notify( return c.c.NotifyCallback(ctx, c.t, req, correlationID, dispatchCacheUpdate(ch)) } -// Health wraps health.Client so that the proxycfg package doesn't need to -// reference cache.UpdateEvent directly. -func Health(client *health.Client) proxycfg.Health { - return &healthWrapper{client} -} - -type healthWrapper struct { - client *health.Client -} - -func (h *healthWrapper) Notify( - ctx context.Context, - req *structs.ServiceSpecificRequest, - correlationID string, - ch chan<- proxycfg.UpdateEvent, -) error { - return h.client.Notify(ctx, *req, correlationID, dispatchCacheUpdate(ch)) -} - func dispatchCacheUpdate(ch chan<- proxycfg.UpdateEvent) cache.Callback { return func(ctx context.Context, e cache.UpdateEvent) { u := proxycfg.UpdateEvent{ diff --git a/agent/proxycfg-glue/health.go b/agent/proxycfg-glue/health.go new file mode 100644 index 000000000..331c8012b --- /dev/null +++ b/agent/proxycfg-glue/health.go @@ -0,0 +1,82 @@ +package proxycfgglue + +import ( + "context" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/rpcclient/health" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/submatview" +) + +// ClientHealth satisfies the proxycfg.Health interface by sourcing data from +// the given health.Client. +func ClientHealth(client *health.Client) proxycfg.Health { + return &clientHealth{client} +} + +type clientHealth struct { + client *health.Client +} + +func (h *clientHealth) Notify( + ctx context.Context, + req *structs.ServiceSpecificRequest, + correlationID string, + ch chan<- proxycfg.UpdateEvent, +) error { + return h.client.Notify(ctx, *req, correlationID, dispatchCacheUpdate(ch)) +} + +// ServerHealth satisfies the proxycfg.Health interface by sourcing data from +// a local materialized view (backed by an EventPublisher subscription). +// +// Requests for services in remote datacenters will be delegated to the given +// remoteSource (i.e. ClientHealth). +func ServerHealth(deps ServerDataSourceDeps, remoteSource proxycfg.Health) proxycfg.Health { + return &serverHealth{deps, remoteSource} +} + +type serverHealth struct { + deps ServerDataSourceDeps + remoteSource proxycfg.Health +} + +func (h *serverHealth) Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + if req.Datacenter != h.deps.Datacenter { + return h.remoteSource.Notify(ctx, req, correlationID, ch) + } + + return h.deps.ViewStore.NotifyCallback( + ctx, + &healthRequest{h.deps, *req}, + correlationID, + dispatchCacheUpdate(ch), + ) +} + +type healthRequest struct { + deps ServerDataSourceDeps + req structs.ServiceSpecificRequest +} + +func (r *healthRequest) CacheInfo() cache.RequestInfo { return r.req.CacheInfo() } + +func (r *healthRequest) NewMaterializer() (submatview.Materializer, error) { + view, err := health.NewHealthView(r.req) + if err != nil { + return nil, err + } + return submatview.NewLocalMaterializer(submatview.LocalMaterializerDeps{ + Backend: r.deps.EventPublisher, + ACLResolver: r.deps.ACLResolver, + Deps: submatview.Deps{ + View: view, + Logger: r.deps.Logger, + Request: health.NewMaterializerRequest(r.req), + }, + }), nil +} + +func (r *healthRequest) Type() string { return "proxycfgglue.Health" } diff --git a/agent/proxycfg-glue/health_test.go b/agent/proxycfg-glue/health_test.go new file mode 100644 index 000000000..b4e6035ee --- /dev/null +++ b/agent/proxycfg-glue/health_test.go @@ -0,0 +1,149 @@ +package proxycfgglue + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/submatview" + "github.com/hashicorp/consul/proto/pbsubscribe" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestServerHealth(t *testing.T) { + t.Run("remote queries are delegated to the remote source", func(t *testing.T) { + var ( + ctx = context.Background() + req = &structs.ServiceSpecificRequest{Datacenter: "dc2"} + correlationID = "correlation-id" + ch = make(chan<- proxycfg.UpdateEvent) + result = errors.New("KABOOM") + ) + + remoteSource := newMockHealth(t) + remoteSource.On("Notify", ctx, req, correlationID, ch).Return(result) + + dataSource := ServerHealth(ServerDataSourceDeps{Datacenter: "dc1"}, remoteSource) + err := dataSource.Notify(ctx, req, correlationID, ch) + require.Equal(t, result, err) + }) + + t.Run("local queries are served from a materialized view", func(t *testing.T) { + // Note: the view is tested more thoroughly in the agent/rpcclient/health + // package, so this is more of a high-level integration test with the local + // materializer. + const ( + index uint64 = 123 + datacenter = "dc1" + serviceName = "web" + ) + + logger := testutil.Logger(t) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + store := submatview.NewStore(logger) + go store.Run(ctx) + + publisher := stream.NewEventPublisher(10 * time.Second) + publisher.RegisterHandler(pbsubscribe.Topic_ServiceHealth, + func(stream.SubscribeRequest, stream.SnapshotAppender) (uint64, error) { return index, nil }, + true) + go publisher.Run(ctx) + + dataSource := ServerHealth(ServerDataSourceDeps{ + Datacenter: datacenter, + ACLResolver: newStaticResolver(acl.ManageAll()), + ViewStore: store, + EventPublisher: publisher, + Logger: logger, + }, nil) + + eventCh := make(chan proxycfg.UpdateEvent) + require.NoError(t, dataSource.Notify(ctx, &structs.ServiceSpecificRequest{ + Datacenter: datacenter, + ServiceName: serviceName, + }, "", eventCh)) + + testutil.RunStep(t, "initial state", func(t *testing.T) { + result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh) + require.Empty(t, result.Nodes) + }) + + testutil.RunStep(t, "register services", func(t *testing.T) { + publisher.Publish([]stream.Event{ + { + Index: index + 1, + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: &state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Node: &structs.Node{Node: "node1"}, + Service: &structs.NodeService{Service: serviceName}, + }, + }, + }, + { + Index: index + 1, + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: &state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Node: &structs.Node{Node: "node2"}, + Service: &structs.NodeService{Service: serviceName}, + }, + }, + }, + }) + + result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh) + require.Len(t, result.Nodes, 2) + }) + + testutil.RunStep(t, "deregister service", func(t *testing.T) { + publisher.Publish([]stream.Event{ + { + Index: index + 2, + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: &state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Deregister, + Value: &structs.CheckServiceNode{ + Node: &structs.Node{Node: "node2"}, + Service: &structs.NodeService{Service: serviceName}, + }, + }, + }, + }) + + result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh) + require.Len(t, result.Nodes, 1) + }) + }) +} + +func newMockHealth(t *testing.T) *mockHealth { + mock := &mockHealth{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +type mockHealth struct { + mock.Mock +} + +func (m *mockHealth) Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + return m.Called(ctx, req, correlationID, ch).Error(0) +} diff --git a/agent/rpcclient/health/health.go b/agent/rpcclient/health/health.go index dd4be64ce..a4bdae78a 100644 --- a/agent/rpcclient/health/health.go +++ b/agent/rpcclient/health/health.go @@ -136,14 +136,14 @@ func (r serviceRequest) Type() string { } func (r serviceRequest) NewMaterializer() (submatview.Materializer, error) { - view, err := newHealthView(r.ServiceSpecificRequest) + view, err := NewHealthView(r.ServiceSpecificRequest) if err != nil { return nil, err } deps := submatview.Deps{ View: view, Logger: r.deps.Logger, - Request: newMaterializerRequest(r.ServiceSpecificRequest), + Request: NewMaterializerRequest(r.ServiceSpecificRequest), } return submatview.NewRPCMaterializer(pbsubscribe.NewStateChangeSubscriptionClient(r.deps.Conn), deps), nil diff --git a/agent/rpcclient/health/view.go b/agent/rpcclient/health/view.go index fa591b7b7..fd19cb4a0 100644 --- a/agent/rpcclient/health/view.go +++ b/agent/rpcclient/health/view.go @@ -21,7 +21,7 @@ type MaterializerDeps struct { Logger hclog.Logger } -func newMaterializerRequest(srvReq structs.ServiceSpecificRequest) func(index uint64) *pbsubscribe.SubscribeRequest { +func NewMaterializerRequest(srvReq structs.ServiceSpecificRequest) func(index uint64) *pbsubscribe.SubscribeRequest { return func(index uint64) *pbsubscribe.SubscribeRequest { req := &pbsubscribe.SubscribeRequest{ Topic: pbsubscribe.Topic_ServiceHealth, @@ -44,29 +44,29 @@ func newMaterializerRequest(srvReq structs.ServiceSpecificRequest) func(index ui } } -func newHealthView(req structs.ServiceSpecificRequest) (*healthView, error) { +func NewHealthView(req structs.ServiceSpecificRequest) (*HealthView, error) { fe, err := newFilterEvaluator(req) if err != nil { return nil, err } - return &healthView{ + return &HealthView{ state: make(map[string]structs.CheckServiceNode), filter: fe, }, nil } -// healthView implements submatview.View for storing the view state +// HealthView implements submatview.View for storing the view state // of a service health result. We store it as a map to make updates and // deletions a little easier but we could just store a result type // (IndexedCheckServiceNodes) and update it in place for each event - that // involves re-sorting each time etc. though. -type healthView struct { +type HealthView struct { state map[string]structs.CheckServiceNode filter filterEvaluator } // Update implements View -func (s *healthView) Update(events []*pbsubscribe.Event) error { +func (s *HealthView) Update(events []*pbsubscribe.Event) error { for _, event := range events { serviceHealth := event.GetServiceHealth() if serviceHealth == nil { @@ -181,7 +181,7 @@ func sortCheckServiceNodes(serviceNodes *structs.IndexedCheckServiceNodes) { } // Result returns the structs.IndexedCheckServiceNodes stored by this view. -func (s *healthView) Result(index uint64) interface{} { +func (s *HealthView) Result(index uint64) interface{} { result := structs.IndexedCheckServiceNodes{ Nodes: make(structs.CheckServiceNodes, 0, len(s.state)), QueryMeta: structs.QueryMeta{ @@ -197,7 +197,7 @@ func (s *healthView) Result(index uint64) interface{} { return &result } -func (s *healthView) Reset() { +func (s *HealthView) Reset() { s.state = make(map[string]structs.CheckServiceNode) } diff --git a/agent/rpcclient/health/view_test.go b/agent/rpcclient/health/view_test.go index ddc2afc1a..8fcb50da3 100644 --- a/agent/rpcclient/health/view_test.go +++ b/agent/rpcclient/health/view_test.go @@ -602,14 +602,14 @@ type serviceRequestStub struct { } func (r serviceRequestStub) NewMaterializer() (submatview.Materializer, error) { - view, err := newHealthView(r.ServiceSpecificRequest) + view, err := NewHealthView(r.ServiceSpecificRequest) if err != nil { return nil, err } deps := submatview.Deps{ View: view, Logger: hclog.New(nil), - Request: newMaterializerRequest(r.ServiceSpecificRequest), + Request: NewMaterializerRequest(r.ServiceSpecificRequest), } return submatview.NewRPCMaterializer(r.streamClient, deps), nil } From a280c9a10b6d6688c5d30dd585bb56dbc95c0793 Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Tue, 12 Jul 2022 11:39:27 +0100 Subject: [PATCH 007/107] proxycfg-glue: server-local implementation of `TrustBundle` and `TrustBundleList` This is the OSS portion of enterprise PR 2250. This PR provides server-local implementations of the proxycfg.TrustBundle and proxycfg.TrustBundleList interfaces, based on local blocking queries. --- agent/agent.go | 2 + agent/proxycfg-glue/glue.go | 16 +-- agent/proxycfg-glue/trust_bundle.go | 103 +++++++++++++++ agent/proxycfg-glue/trust_bundle_test.go | 152 +++++++++++++++++++++++ 4 files changed, 261 insertions(+), 12 deletions(-) create mode 100644 agent/proxycfg-glue/trust_bundle.go create mode 100644 agent/proxycfg-glue/trust_bundle_test.go diff --git a/agent/agent.go b/agent/agent.go index 5aa591f14..4a95732d7 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -4251,6 +4251,8 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { sources.Intentions = proxycfgglue.ServerIntentions(deps) sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps) sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache)) + sources.TrustBundle = proxycfgglue.ServerTrustBundle(deps) + sources.TrustBundleList = proxycfgglue.ServerTrustBundleList(deps) } a.fillEnterpriseProxyDataSources(&sources) diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index e7924010a..2bfc8d580 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -10,6 +10,7 @@ import ( cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/consul/discoverychain" + "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/watch" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" @@ -22,6 +23,9 @@ type Store interface { IntentionTopology(ws memdb.WatchSet, target structs.ServiceName, downstreams bool, defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType) (uint64, structs.ServiceList, error) ServiceDiscoveryChain(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, req discoverychain.CompileRequest) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error) + PeeringTrustBundleRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.PeeringTrustBundle, error) + PeeringTrustBundleList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error) + TrustBundleListByService(ws memdb.WatchSet, service, dc string, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error) } // CacheCARoots satisfies the proxycfg.CARoots interface by sourcing data from @@ -102,18 +106,6 @@ func CacheResolvedServiceConfig(c *cache.Cache) proxycfg.ResolvedServiceConfig { return &cacheProxyDataSource[*structs.ServiceConfigRequest]{c, cachetype.ResolvedServiceConfigName} } -// CacheTrustBundle satisfies the proxycfg.TrustBundle interface by sourcing -// data from the agent cache. -func CacheTrustBundle(c *cache.Cache) proxycfg.TrustBundle { - return &cacheProxyDataSource[*pbpeering.TrustBundleReadRequest]{c, cachetype.TrustBundleReadName} -} - -// CacheTrustBundleList satisfies the proxycfg.TrustBundleList interface by sourcing -// data from the agent cache. -func CacheTrustBundleList(c *cache.Cache) proxycfg.TrustBundleList { - return &cacheProxyDataSource[*pbpeering.TrustBundleListByServiceRequest]{c, cachetype.TrustBundleListName} -} - // CacheExportedPeeredServices satisfies the proxycfg.ExportedPeeredServices // interface by sourcing data from the agent cache. func CacheExportedPeeredServices(c *cache.Cache) proxycfg.ExportedPeeredServices { diff --git a/agent/proxycfg-glue/trust_bundle.go b/agent/proxycfg-glue/trust_bundle.go new file mode 100644 index 000000000..4ce42591b --- /dev/null +++ b/agent/proxycfg-glue/trust_bundle.go @@ -0,0 +1,103 @@ +package proxycfgglue + +import ( + "context" + "errors" + + "github.com/hashicorp/go-memdb" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/watch" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +// CacheTrustBundle satisfies the proxycfg.TrustBundle interface by sourcing +// data from the agent cache. +func CacheTrustBundle(c *cache.Cache) proxycfg.TrustBundle { + return &cacheProxyDataSource[*pbpeering.TrustBundleReadRequest]{c, cachetype.TrustBundleReadName} +} + +// ServerTrustBundle satisfies the proxycfg.TrustBundle interface by sourcing +// data from a blocking query against the server's state store. +func ServerTrustBundle(deps ServerDataSourceDeps) proxycfg.TrustBundle { + return &serverTrustBundle{deps} +} + +type serverTrustBundle struct { + deps ServerDataSourceDeps +} + +func (s *serverTrustBundle) Notify(ctx context.Context, req *pbpeering.TrustBundleReadRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + // TODO(peering): ACL check. + return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore, + func(ws memdb.WatchSet, store Store) (uint64, *pbpeering.TrustBundleReadResponse, error) { + index, bundle, err := store.PeeringTrustBundleRead(ws, state.Query{ + Value: req.Name, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + }) + if err != nil { + return 0, nil, err + } + return index, &pbpeering.TrustBundleReadResponse{ + Index: index, + Bundle: bundle, + }, nil + }, + dispatchBlockingQueryUpdate[*pbpeering.TrustBundleReadResponse](ch), + ) +} + +// CacheTrustBundleList satisfies the proxycfg.TrustBundleList interface by sourcing +// data from the agent cache. +func CacheTrustBundleList(c *cache.Cache) proxycfg.TrustBundleList { + return &cacheProxyDataSource[*pbpeering.TrustBundleListByServiceRequest]{c, cachetype.TrustBundleListName} +} + +// ServerTrustBundleList satisfies the proxycfg.TrustBundle interface by +// sourcing data from a blocking query against the server's state store. +func ServerTrustBundleList(deps ServerDataSourceDeps) proxycfg.TrustBundleList { + return &serverTrustBundleList{deps} +} + +type serverTrustBundleList struct { + deps ServerDataSourceDeps +} + +func (s *serverTrustBundleList) Notify(ctx context.Context, req *pbpeering.TrustBundleListByServiceRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + entMeta := acl.NewEnterpriseMetaWithPartition(req.Partition, req.Namespace) + + // TODO(peering): ACL check. + return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore, + func(ws memdb.WatchSet, store Store) (uint64, *pbpeering.TrustBundleListByServiceResponse, error) { + var ( + index uint64 + bundles []*pbpeering.PeeringTrustBundle + err error + ) + switch { + case req.ServiceName != "": + index, bundles, err = store.TrustBundleListByService(ws, req.ServiceName, s.deps.Datacenter, entMeta) + case req.Kind == string(structs.ServiceKindMeshGateway): + index, bundles, err = store.PeeringTrustBundleList(ws, entMeta) + case req.Kind != "": + err = errors.New("kind must be mesh-gateway if set") + default: + err = errors.New("one of service or kind is required") + } + if err != nil { + return 0, nil, err + } + + return index, &pbpeering.TrustBundleListByServiceResponse{ + Index: index, + Bundles: bundles, + }, nil + }, + dispatchBlockingQueryUpdate[*pbpeering.TrustBundleListByServiceResponse](ch), + ) +} diff --git a/agent/proxycfg-glue/trust_bundle_test.go b/agent/proxycfg-glue/trust_bundle_test.go new file mode 100644 index 000000000..65c343a05 --- /dev/null +++ b/agent/proxycfg-glue/trust_bundle_test.go @@ -0,0 +1,152 @@ +package proxycfgglue + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/lib" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestServerTrustBundle(t *testing.T) { + const ( + index uint64 = 123 + peerName = "peer1" + ) + + store := state.NewStateStore(nil) + + require.NoError(t, store.PeeringTrustBundleWrite(index, &pbpeering.PeeringTrustBundle{ + PeerName: peerName, + TrustDomain: "before.com", + })) + + dataSource := ServerTrustBundle(ServerDataSourceDeps{ + GetStore: func() Store { return store }, + }) + + eventCh := make(chan proxycfg.UpdateEvent) + err := dataSource.Notify(context.Background(), &pbpeering.TrustBundleReadRequest{ + Name: peerName, + }, "", eventCh) + require.NoError(t, err) + + testutil.RunStep(t, "initial state", func(t *testing.T) { + result := getEventResult[*pbpeering.TrustBundleReadResponse](t, eventCh) + require.Equal(t, "before.com", result.Bundle.TrustDomain) + }) + + testutil.RunStep(t, "update trust bundle", func(t *testing.T) { + require.NoError(t, store.PeeringTrustBundleWrite(index+1, &pbpeering.PeeringTrustBundle{ + PeerName: peerName, + TrustDomain: "after.com", + })) + + result := getEventResult[*pbpeering.TrustBundleReadResponse](t, eventCh) + require.Equal(t, "after.com", result.Bundle.TrustDomain) + }) +} + +func TestServerTrustBundleList(t *testing.T) { + const index uint64 = 123 + + t.Run("list by service", func(t *testing.T) { + const ( + serviceName = "web" + us = "default" + them = "peer2" + ) + + store := state.NewStateStore(nil) + require.NoError(t, store.CASetConfig(index, &structs.CAConfiguration{ClusterID: "cluster-id"})) + + testutil.RunStep(t, "export service to peer", func(t *testing.T) { + require.NoError(t, store.PeeringWrite(index, &pbpeering.Peering{ + ID: testUUID(t), + Name: them, + State: pbpeering.PeeringState_ACTIVE, + })) + + require.NoError(t, store.PeeringTrustBundleWrite(index, &pbpeering.PeeringTrustBundle{ + PeerName: them, + })) + + require.NoError(t, store.EnsureConfigEntry(index, &structs.ExportedServicesConfigEntry{ + Name: us, + Services: []structs.ExportedService{ + { + Name: serviceName, + Consumers: []structs.ServiceConsumer{ + {PeerName: them}, + }, + }, + }, + })) + }) + + dataSource := ServerTrustBundleList(ServerDataSourceDeps{ + Datacenter: "dc1", + GetStore: func() Store { return store }, + }) + + eventCh := make(chan proxycfg.UpdateEvent) + err := dataSource.Notify(context.Background(), &pbpeering.TrustBundleListByServiceRequest{ + ServiceName: serviceName, + Partition: us, + }, "", eventCh) + require.NoError(t, err) + + testutil.RunStep(t, "initial state", func(t *testing.T) { + result := getEventResult[*pbpeering.TrustBundleListByServiceResponse](t, eventCh) + require.Len(t, result.Bundles, 1) + }) + + testutil.RunStep(t, "unexport the service", func(t *testing.T) { + require.NoError(t, store.EnsureConfigEntry(index+1, &structs.ExportedServicesConfigEntry{ + Name: us, + Services: []structs.ExportedService{}, + })) + + result := getEventResult[*pbpeering.TrustBundleListByServiceResponse](t, eventCh) + require.Len(t, result.Bundles, 0) + }) + }) + + t.Run("list for mesh gateway", func(t *testing.T) { + store := state.NewStateStore(nil) + require.NoError(t, store.CASetConfig(index, &structs.CAConfiguration{ClusterID: "cluster-id"})) + + require.NoError(t, store.PeeringTrustBundleWrite(index, &pbpeering.PeeringTrustBundle{ + PeerName: "peer1", + })) + require.NoError(t, store.PeeringTrustBundleWrite(index, &pbpeering.PeeringTrustBundle{ + PeerName: "peer2", + })) + + dataSource := ServerTrustBundleList(ServerDataSourceDeps{ + GetStore: func() Store { return store }, + }) + + eventCh := make(chan proxycfg.UpdateEvent) + err := dataSource.Notify(context.Background(), &pbpeering.TrustBundleListByServiceRequest{ + Kind: string(structs.ServiceKindMeshGateway), + Partition: "default", + }, "", eventCh) + require.NoError(t, err) + + result := getEventResult[*pbpeering.TrustBundleListByServiceResponse](t, eventCh) + require.Len(t, result.Bundles, 2) + }) +} + +func testUUID(t *testing.T) string { + v, err := lib.GenerateUUID(nil) + require.NoError(t, err) + return v +} From a5a6102a3b69e74f2fbbdc8317f77abf37d73883 Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Tue, 12 Jul 2022 11:41:29 +0100 Subject: [PATCH 008/107] proxycfg-glue: server-local implementation of `GatewayServices` This is the OSS portion of enterprise PR 2259. This PR provides a server-local implementation of the proxycfg.GatewayServices interface based on blocking queries. --- agent/agent.go | 1 + agent/proxycfg-glue/gateway_services.go | 63 ++++++++ agent/proxycfg-glue/gateway_services_test.go | 155 +++++++++++++++++++ agent/proxycfg-glue/glue.go | 7 +- agent/proxycfg-glue/helpers_test.go | 14 ++ 5 files changed, 234 insertions(+), 6 deletions(-) create mode 100644 agent/proxycfg-glue/gateway_services.go create mode 100644 agent/proxycfg-glue/gateway_services_test.go diff --git a/agent/agent.go b/agent/agent.go index 4a95732d7..59827e021 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -4247,6 +4247,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { sources.ConfigEntry = proxycfgglue.ServerConfigEntry(deps) sources.ConfigEntryList = proxycfgglue.ServerConfigEntryList(deps) sources.CompiledDiscoveryChain = proxycfgglue.ServerCompiledDiscoveryChain(deps, proxycfgglue.CacheCompiledDiscoveryChain(a.cache)) + sources.GatewayServices = proxycfgglue.ServerGatewayServices(deps) sources.Health = proxycfgglue.ServerHealth(deps, proxycfgglue.ClientHealth(a.rpcClientHealth)) sources.Intentions = proxycfgglue.ServerIntentions(deps) sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps) diff --git a/agent/proxycfg-glue/gateway_services.go b/agent/proxycfg-glue/gateway_services.go new file mode 100644 index 000000000..8c90f949d --- /dev/null +++ b/agent/proxycfg-glue/gateway_services.go @@ -0,0 +1,63 @@ +package proxycfgglue + +import ( + "context" + + "github.com/hashicorp/go-memdb" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/consul/watch" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/structs/aclfilter" +) + +// CacheGatewayServices satisfies the proxycfg.GatewayServices interface by +// sourcing data from the agent cache. +func CacheGatewayServices(c *cache.Cache) proxycfg.GatewayServices { + return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.GatewayServicesName} +} + +// ServerGatewayServices satisfies the proxycfg.GatewayServices interface by +// sourcing data from a blocking query against the server's state store. +func ServerGatewayServices(deps ServerDataSourceDeps) proxycfg.GatewayServices { + return &serverGatewayServices{deps} +} + +type serverGatewayServices struct { + deps ServerDataSourceDeps +} + +func (s *serverGatewayServices) Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore, + func(ws memdb.WatchSet, store Store) (uint64, *structs.IndexedGatewayServices, error) { + var authzContext acl.AuthorizerContext + authz, err := s.deps.ACLResolver.ResolveTokenAndDefaultMeta(req.Token, &req.EnterpriseMeta, &authzContext) + if err != nil { + return 0, nil, err + } + if err := authz.ToAllowAuthorizer().ServiceReadAllowed(req.ServiceName, &authzContext); err != nil { + return 0, nil, err + } + + index, services, err := store.GatewayServices(ws, req.ServiceName, &req.EnterpriseMeta) + if err != nil { + return 0, nil, err + } + + response := &structs.IndexedGatewayServices{ + Services: services, + QueryMeta: structs.QueryMeta{ + Backend: structs.QueryBackendBlocking, + Index: index, + }, + } + aclfilter.New(authz, s.deps.Logger).Filter(response) + + return index, response, nil + }, + dispatchBlockingQueryUpdate[*structs.IndexedGatewayServices](ch), + ) +} diff --git a/agent/proxycfg-glue/gateway_services_test.go b/agent/proxycfg-glue/gateway_services_test.go new file mode 100644 index 000000000..bb20f489d --- /dev/null +++ b/agent/proxycfg-glue/gateway_services_test.go @@ -0,0 +1,155 @@ +package proxycfgglue + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestServerGatewayServices(t *testing.T) { + const index uint64 = 123 + + t.Run("ingress gateway", func(t *testing.T) { + store := state.NewStateStore(nil) + + authz := policyAuthorizer(t, ` + service "igw" { policy = "read" } + service "web" { policy = "read" } + service "db" { policy = "read" } + `) + + require.NoError(t, store.EnsureConfigEntry(index, &structs.IngressGatewayConfigEntry{ + Name: "igw", + Listeners: []structs.IngressListener{ + { + Protocol: "tcp", + Services: []structs.IngressService{ + {Name: "web"}, + }, + }, + { + Protocol: "tcp", + Services: []structs.IngressService{ + {Name: "db"}, + }, + }, + { + Protocol: "tcp", + Services: []structs.IngressService{ + {Name: "no-access"}, + }, + }, + }, + })) + + dataSource := ServerGatewayServices(ServerDataSourceDeps{ + ACLResolver: newStaticResolver(authz), + GetStore: func() Store { return store }, + }) + + eventCh := make(chan proxycfg.UpdateEvent) + require.NoError(t, dataSource.Notify(context.Background(), &structs.ServiceSpecificRequest{ServiceName: "igw"}, "", eventCh)) + + testutil.RunStep(t, "initial state", func(t *testing.T) { + result := getEventResult[*structs.IndexedGatewayServices](t, eventCh) + require.Len(t, result.Services, 2) + }) + + testutil.RunStep(t, "remove service mapping", func(t *testing.T) { + require.NoError(t, store.EnsureConfigEntry(index+1, &structs.IngressGatewayConfigEntry{ + Name: "igw", + Listeners: []structs.IngressListener{ + { + Protocol: "tcp", + Services: []structs.IngressService{ + {Name: "web"}, + }, + }, + }, + })) + + result := getEventResult[*structs.IndexedGatewayServices](t, eventCh) + require.Len(t, result.Services, 1) + }) + }) + + t.Run("terminating gateway", func(t *testing.T) { + store := state.NewStateStore(nil) + + authz := policyAuthorizer(t, ` + service "tgw" { policy = "read" } + service "web" { policy = "read" } + service "db" { policy = "read" } + `) + + require.NoError(t, store.EnsureConfigEntry(index, &structs.TerminatingGatewayConfigEntry{ + Name: "tgw", + Services: []structs.LinkedService{ + {Name: "web"}, + {Name: "db"}, + {Name: "no-access"}, + }, + })) + + dataSource := ServerGatewayServices(ServerDataSourceDeps{ + ACLResolver: newStaticResolver(authz), + GetStore: func() Store { return store }, + }) + + eventCh := make(chan proxycfg.UpdateEvent) + require.NoError(t, dataSource.Notify(context.Background(), &structs.ServiceSpecificRequest{ServiceName: "tgw"}, "", eventCh)) + + testutil.RunStep(t, "initial state", func(t *testing.T) { + result := getEventResult[*structs.IndexedGatewayServices](t, eventCh) + require.Len(t, result.Services, 2) + }) + + testutil.RunStep(t, "remove service mapping", func(t *testing.T) { + require.NoError(t, store.EnsureConfigEntry(index+1, &structs.TerminatingGatewayConfigEntry{ + Name: "tgw", + Services: []structs.LinkedService{ + {Name: "web"}, + }, + })) + + result := getEventResult[*structs.IndexedGatewayServices](t, eventCh) + require.Len(t, result.Services, 1) + }) + }) + + t.Run("no access to gateway", func(t *testing.T) { + store := state.NewStateStore(nil) + + authz := policyAuthorizer(t, ` + service "tgw" { policy = "deny" } + service "web" { policy = "read" } + service "db" { policy = "read" } + `) + + require.NoError(t, store.EnsureConfigEntry(index, &structs.TerminatingGatewayConfigEntry{ + Name: "tgw", + Services: []structs.LinkedService{ + {Name: "web"}, + {Name: "db"}, + }, + })) + + dataSource := ServerGatewayServices(ServerDataSourceDeps{ + ACLResolver: newStaticResolver(authz), + GetStore: func() Store { return store }, + }) + + eventCh := make(chan proxycfg.UpdateEvent) + require.NoError(t, dataSource.Notify(context.Background(), &structs.ServiceSpecificRequest{ServiceName: "tgw"}, "", eventCh)) + + err := getEventError(t, eventCh) + require.True(t, acl.IsErrPermissionDenied(err), "expected permission denied error") + }) +} diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index 2bfc8d580..9fb064890 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -21,6 +21,7 @@ import ( type Store interface { watch.StateStore + GatewayServices(ws memdb.WatchSet, gateway string, entMeta *acl.EnterpriseMeta) (uint64, structs.GatewayServices, error) IntentionTopology(ws memdb.WatchSet, target structs.ServiceName, downstreams bool, defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType) (uint64, structs.ServiceList, error) ServiceDiscoveryChain(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, req discoverychain.CompileRequest) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error) PeeringTrustBundleRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.PeeringTrustBundle, error) @@ -58,12 +59,6 @@ func CacheFederationStateListMeshGateways(c *cache.Cache) proxycfg.FederationSta return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.FederationStateListMeshGatewaysName} } -// CacheGatewayServices satisfies the proxycfg.GatewayServices interface by -// sourcing data from the agent cache. -func CacheGatewayServices(c *cache.Cache) proxycfg.GatewayServices { - return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.GatewayServicesName} -} - // CacheHTTPChecks satisifies the proxycfg.HTTPChecks interface by sourcing // data from the agent cache. func CacheHTTPChecks(c *cache.Cache) proxycfg.HTTPChecks { diff --git a/agent/proxycfg-glue/helpers_test.go b/agent/proxycfg-glue/helpers_test.go index 5528ed4a5..7e3b9078e 100644 --- a/agent/proxycfg-glue/helpers_test.go +++ b/agent/proxycfg-glue/helpers_test.go @@ -32,3 +32,17 @@ func expectNoEvent(t *testing.T, eventCh <-chan proxycfg.UpdateEvent) { case <-time.After(100 * time.Millisecond): } } + +func getEventError(t *testing.T, eventCh <-chan proxycfg.UpdateEvent) error { + t.Helper() + + select { + case event := <-eventCh: + require.Error(t, event.Err) + return event.Err + case <-time.After(100 * time.Millisecond): + t.Fatal("timeout waiting for event") + } + + panic("this should never be reached") +} From 7f69e279269bdff0360ca4341e2b11a48b480ab3 Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Tue, 12 Jul 2022 11:43:42 +0100 Subject: [PATCH 009/107] proxycfg-glue: server-local implementation of `FederationStateListMeshGateways` This is the OSS portion of enterprise PR 2265. This PR provides a server-local implementation of the proxycfg.FederationStateListMeshGateways interface based on blocking queries. --- agent/agent.go | 1 + .../federation_state_list_mesh_gateways.go | 67 ++++++++++++ ...ederation_state_list_mesh_gateways_test.go | 103 ++++++++++++++++++ agent/proxycfg-glue/glue.go | 7 +- 4 files changed, 172 insertions(+), 6 deletions(-) create mode 100644 agent/proxycfg-glue/federation_state_list_mesh_gateways.go create mode 100644 agent/proxycfg-glue/federation_state_list_mesh_gateways_test.go diff --git a/agent/agent.go b/agent/agent.go index 59827e021..44157a91f 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -4247,6 +4247,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { sources.ConfigEntry = proxycfgglue.ServerConfigEntry(deps) sources.ConfigEntryList = proxycfgglue.ServerConfigEntryList(deps) sources.CompiledDiscoveryChain = proxycfgglue.ServerCompiledDiscoveryChain(deps, proxycfgglue.CacheCompiledDiscoveryChain(a.cache)) + sources.FederationStateListMeshGateways = proxycfgglue.ServerFederationStateListMeshGateways(deps) sources.GatewayServices = proxycfgglue.ServerGatewayServices(deps) sources.Health = proxycfgglue.ServerHealth(deps, proxycfgglue.ClientHealth(a.rpcClientHealth)) sources.Intentions = proxycfgglue.ServerIntentions(deps) diff --git a/agent/proxycfg-glue/federation_state_list_mesh_gateways.go b/agent/proxycfg-glue/federation_state_list_mesh_gateways.go new file mode 100644 index 000000000..ea3640ad9 --- /dev/null +++ b/agent/proxycfg-glue/federation_state_list_mesh_gateways.go @@ -0,0 +1,67 @@ +package proxycfgglue + +import ( + "context" + + "github.com/hashicorp/go-memdb" + + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/consul/watch" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/structs/aclfilter" +) + +// CacheFederationStateListMeshGateways satisfies the proxycfg.FederationStateListMeshGateways +// interface by sourcing data from the agent cache. +func CacheFederationStateListMeshGateways(c *cache.Cache) proxycfg.FederationStateListMeshGateways { + return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.FederationStateListMeshGatewaysName} +} + +// ServerFederationStateListMeshGateways satisfies the proxycfg.FederationStateListMeshGateways +// interface by sourcing data from a blocking query against the server's state +// store. +func ServerFederationStateListMeshGateways(deps ServerDataSourceDeps) proxycfg.FederationStateListMeshGateways { + return &serverFederationStateListMeshGateways{deps} +} + +type serverFederationStateListMeshGateways struct { + deps ServerDataSourceDeps +} + +func (s *serverFederationStateListMeshGateways) Notify(ctx context.Context, req *structs.DCSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore, + func(ws memdb.WatchSet, store Store) (uint64, *structs.DatacenterIndexedCheckServiceNodes, error) { + authz, err := s.deps.ACLResolver.ResolveTokenAndDefaultMeta(req.Token, &req.EnterpriseMeta, nil) + if err != nil { + return 0, nil, err + } + + index, fedStates, err := store.FederationStateList(ws) + if err != nil { + return 0, nil, err + } + + results := make(map[string]structs.CheckServiceNodes) + for _, fs := range fedStates { + if gws := fs.MeshGateways; len(gws) != 0 { + // Shallow clone to prevent ACL filtering manipulating the slice in memdb. + results[fs.Datacenter] = gws.ShallowClone() + } + } + + rsp := &structs.DatacenterIndexedCheckServiceNodes{ + DatacenterNodes: results, + QueryMeta: structs.QueryMeta{ + Index: index, + Backend: structs.QueryBackendBlocking, + }, + } + aclfilter.New(authz, s.deps.Logger).Filter(rsp) + + return index, rsp, nil + }, + dispatchBlockingQueryUpdate[*structs.DatacenterIndexedCheckServiceNodes](ch), + ) +} diff --git a/agent/proxycfg-glue/federation_state_list_mesh_gateways_test.go b/agent/proxycfg-glue/federation_state_list_mesh_gateways_test.go new file mode 100644 index 000000000..5c716d24c --- /dev/null +++ b/agent/proxycfg-glue/federation_state_list_mesh_gateways_test.go @@ -0,0 +1,103 @@ +package proxycfgglue + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestServerFederationStateListMeshGateways(t *testing.T) { + const index uint64 = 123 + + store := state.NewStateStore(nil) + + authz := policyAuthorizer(t, ` + service_prefix "dc2-" { policy = "read" } + node_prefix "dc2-" { policy = "read" } + + service_prefix "dc3-" { policy = "read" } + node_prefix "dc3-" { policy = "read" } + `) + + require.NoError(t, store.FederationStateSet(index, &structs.FederationState{ + Datacenter: "dc2", + MeshGateways: structs.CheckServiceNodes{ + { + Service: &structs.NodeService{Service: "dc2-gw1"}, + Node: &structs.Node{Node: "dc2-gw1"}, + }, + }, + })) + + // No access to this DC, we shouldn't see it in results. + require.NoError(t, store.FederationStateSet(index, &structs.FederationState{ + Datacenter: "dc4", + MeshGateways: structs.CheckServiceNodes{ + { + Service: &structs.NodeService{Service: "dc4-gw1"}, + Node: &structs.Node{Node: "dc4-gw1"}, + }, + }, + })) + + dataSource := ServerFederationStateListMeshGateways(ServerDataSourceDeps{ + ACLResolver: newStaticResolver(authz), + GetStore: func() Store { return store }, + }) + + eventCh := make(chan proxycfg.UpdateEvent) + require.NoError(t, dataSource.Notify(context.Background(), &structs.DCSpecificRequest{Datacenter: "dc1"}, "", eventCh)) + + testutil.RunStep(t, "initial state", func(t *testing.T) { + result := getEventResult[*structs.DatacenterIndexedCheckServiceNodes](t, eventCh) + require.Equal(t, map[string]structs.CheckServiceNodes{ + "dc2": { + { + Service: &structs.NodeService{Service: "dc2-gw1"}, + Node: &structs.Node{Node: "dc2-gw1"}, + }, + }, + }, result.DatacenterNodes) + }) + + testutil.RunStep(t, "add new datacenter", func(t *testing.T) { + require.NoError(t, store.FederationStateSet(index+1, &structs.FederationState{ + Datacenter: "dc3", + MeshGateways: structs.CheckServiceNodes{ + { + Service: &structs.NodeService{Service: "dc3-gw1"}, + Node: &structs.Node{Node: "dc3-gw1"}, + }, + }, + })) + + result := getEventResult[*structs.DatacenterIndexedCheckServiceNodes](t, eventCh) + require.Equal(t, map[string]structs.CheckServiceNodes{ + "dc2": { + { + Service: &structs.NodeService{Service: "dc2-gw1"}, + Node: &structs.Node{Node: "dc2-gw1"}, + }, + }, + "dc3": { + { + Service: &structs.NodeService{Service: "dc3-gw1"}, + Node: &structs.Node{Node: "dc3-gw1"}, + }, + }, + }, result.DatacenterNodes) + }) + + testutil.RunStep(t, "delete datacenter", func(t *testing.T) { + require.NoError(t, store.FederationStateDelete(index+2, "dc3")) + + result := getEventResult[*structs.DatacenterIndexedCheckServiceNodes](t, eventCh) + require.NotContains(t, result.DatacenterNodes, "dc3") + }) +} diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index 9fb064890..06798939b 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -21,6 +21,7 @@ import ( type Store interface { watch.StateStore + FederationStateList(ws memdb.WatchSet) (uint64, []*structs.FederationState, error) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *acl.EnterpriseMeta) (uint64, structs.GatewayServices, error) IntentionTopology(ws memdb.WatchSet, target structs.ServiceName, downstreams bool, defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType) (uint64, structs.ServiceList, error) ServiceDiscoveryChain(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, req discoverychain.CompileRequest) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error) @@ -53,12 +54,6 @@ func CacheDatacenters(c *cache.Cache) proxycfg.Datacenters { return &cacheProxyDataSource[*structs.DatacentersRequest]{c, cachetype.CatalogDatacentersName} } -// CacheFederationStateListMeshGateways satisfies the proxycfg.FederationStateListMeshGateways -// interface by sourcing data from the agent cache. -func CacheFederationStateListMeshGateways(c *cache.Cache) proxycfg.FederationStateListMeshGateways { - return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.FederationStateListMeshGatewaysName} -} - // CacheHTTPChecks satisifies the proxycfg.HTTPChecks interface by sourcing // data from the agent cache. func CacheHTTPChecks(c *cache.Cache) proxycfg.HTTPChecks { From ac4ac1b0627d1e06bc2b4c2bf5969e8174df9afb Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Tue, 12 Jul 2022 11:50:48 +0100 Subject: [PATCH 010/107] Changelog entry --- .changelog/13722.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/13722.txt diff --git a/.changelog/13722.txt b/.changelog/13722.txt new file mode 100644 index 000000000..2cf90aa37 --- /dev/null +++ b/.changelog/13722.txt @@ -0,0 +1,3 @@ +```release-note:feature +streaming: Added topic that can be used to consume updates about the list of services in a datacenter +``` From 8b9a1263864a0b902a77d8e7dc2b0df473df5222 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Thu, 14 Jul 2022 20:32:53 +0200 Subject: [PATCH 011/107] ui: remove with-peers query param (#13756) * Don't request nodes/services `with-peers` anymore This will be automatic - no need for the query-param anymore. * Return peering data based on feature flag mock-api services/nodes * Update tests to reflect removed with-peers query-param * setup cookie for turning peer feature flag on in mock-api in testing * Add missing `S` for renamed PEERING feature-flag cookie --- ui/packages/consul-ui/app/adapters/node.js | 14 -------------- ui/packages/consul-ui/app/adapters/service.js | 14 -------------- .../consul-ui/mock-api/v1/internal/ui/nodes | 2 +- .../consul-ui/mock-api/v1/internal/ui/services | 2 +- .../acceptance/dc/intentions/navigation.feature | 2 +- .../tests/acceptance/dc/nodes/index.feature | 2 +- .../tests/acceptance/dc/services/list.feature | 2 +- .../tests/acceptance/page-navigation.feature | 2 +- .../tests/acceptance/token-header.feature | 4 ++-- ui/packages/consul-ui/tests/helpers/set-cookies.js | 3 +++ .../tests/integration/adapters/node-test.js | 2 +- .../tests/integration/adapters/service-test.js | 2 +- 12 files changed, 13 insertions(+), 38 deletions(-) diff --git a/ui/packages/consul-ui/app/adapters/node.js b/ui/packages/consul-ui/app/adapters/node.js index 6e30108c7..de3f3c295 100644 --- a/ui/packages/consul-ui/app/adapters/node.js +++ b/ui/packages/consul-ui/app/adapters/node.js @@ -1,5 +1,4 @@ import Adapter from './application'; -import { inject as service } from '@ember/service'; // TODO: Update to use this.formatDatacenter() @@ -11,25 +10,12 @@ import { inject as service } from '@ember/service'; // to the node. export default class NodeAdapter extends Adapter { - @service abilities; - - get peeringQuery() { - const query = {}; - - if (this.abilities.can('use peers')) { - query['with-peers'] = true; - } - - return query; - } - requestForQuery(request, { dc, ns, partition, index, id, uri }) { return request` GET /v1/internal/ui/nodes?${{ dc }} X-Request-ID: ${uri} ${{ - ...this.peeringQuery, ns, partition, index, diff --git a/ui/packages/consul-ui/app/adapters/service.js b/ui/packages/consul-ui/app/adapters/service.js index 7507c41b6..ea69f0927 100644 --- a/ui/packages/consul-ui/app/adapters/service.js +++ b/ui/packages/consul-ui/app/adapters/service.js @@ -1,19 +1,6 @@ import Adapter from './application'; -import { inject as service } from '@ember/service'; export default class ServiceAdapter extends Adapter { - @service abilities; - - get peeringQuery() { - const query = {}; - - if (this.abilities.can('use peers')) { - query['with-peers'] = true; - } - - return query; - } - requestForQuery(request, { dc, ns, partition, index, gateway, uri }) { if (typeof gateway !== 'undefined') { return request` @@ -33,7 +20,6 @@ export default class ServiceAdapter extends Adapter { X-Request-ID: ${uri} ${{ - ...this.peeringQuery, ns, partition, index, diff --git a/ui/packages/consul-ui/mock-api/v1/internal/ui/nodes b/ui/packages/consul-ui/mock-api/v1/internal/ui/nodes index 30557b643..8b63b7997 100644 --- a/ui/packages/consul-ui/mock-api/v1/internal/ui/nodes +++ b/ui/packages/consul-ui/mock-api/v1/internal/ui/nodes @@ -17,7 +17,7 @@ { "ID":"${fake.random.uuid()}", "Node":"node-${i}", - ${location.search["with-peers"] ? peerNameString : ''} + ${env('CONSUL_PEERINGS_ENABLE') ? peerNameString : ''} "Address":"${fake.internet.ip()}", "TaggedAddresses":{ "lan":"${fake.internet.ip()}", diff --git a/ui/packages/consul-ui/mock-api/v1/internal/ui/services b/ui/packages/consul-ui/mock-api/v1/internal/ui/services index a9d0c98cc..1a4a73250 100644 --- a/ui/packages/consul-ui/mock-api/v1/internal/ui/services +++ b/ui/packages/consul-ui/mock-api/v1/internal/ui/services @@ -53,7 +53,7 @@ ${typeof location.search.ns !== 'undefined' ? ` ${typeof location.search.partition !== 'undefined' ? ` "Partition": "${fake.helpers.randomize([env('CONSUL_PARTITION_EXPORTER', location.search.partition), location.search.partition])}", ` : ``} - ${location.search['with-peers'] ? peerNameString : ''} + ${env('CONSUL_PEERINGS_ENABLE') ? peerNameString : ''} "Tags": [ ${ range( diff --git a/ui/packages/consul-ui/tests/acceptance/dc/intentions/navigation.feature b/ui/packages/consul-ui/tests/acceptance/dc/intentions/navigation.feature index 8c749634d..4b1bd2318 100644 --- a/ui/packages/consul-ui/tests/acceptance/dc/intentions/navigation.feature +++ b/ui/packages/consul-ui/tests/acceptance/dc/intentions/navigation.feature @@ -27,7 +27,7 @@ Feature: dc / intentions / navigation ID: 755b72bd-f5ab-4c92-90cc-bed0e7d8e9f0 --- When I click intention on the intentionList.intentions component - Then a GET request was made to "/v1/internal/ui/services?dc=dc-1&with-peers=true&ns=*" + Then a GET request was made to "/v1/internal/ui/services?dc=dc-1&ns=*" And I click "[data-test-back]" Then the url should be /dc-1/intentions Scenario: Clicking the create button and back again diff --git a/ui/packages/consul-ui/tests/acceptance/dc/nodes/index.feature b/ui/packages/consul-ui/tests/acceptance/dc/nodes/index.feature index a56758958..735c222e9 100644 --- a/ui/packages/consul-ui/tests/acceptance/dc/nodes/index.feature +++ b/ui/packages/consul-ui/tests/acceptance/dc/nodes/index.feature @@ -45,7 +45,7 @@ Feature: dc / nodes / index --- Then the url should be /dc-1/nodes And the title should be "Nodes - Consul" - And a GET request was made to "/v1/internal/ui/nodes?dc=dc-1&with-peers=true&ns=@namespace" + And a GET request was made to "/v1/internal/ui/nodes?dc=dc-1&ns=@namespace" Then I see 3 node models Scenario: Seeing the leader in node listing Given 3 node models from yaml diff --git a/ui/packages/consul-ui/tests/acceptance/dc/services/list.feature b/ui/packages/consul-ui/tests/acceptance/dc/services/list.feature index fe1682d25..60f6ed4b8 100644 --- a/ui/packages/consul-ui/tests/acceptance/dc/services/list.feature +++ b/ui/packages/consul-ui/tests/acceptance/dc/services/list.feature @@ -16,7 +16,7 @@ Feature: dc / services / list dc: dc-1 --- Then the url should be /dc-1/services - And a GET request was made to "/v1/internal/ui/services?dc=dc-1&with-peers=true&ns=@namespace" + And a GET request was made to "/v1/internal/ui/services?dc=dc-1&ns=@namespace" Then I see 3 service models diff --git a/ui/packages/consul-ui/tests/acceptance/page-navigation.feature b/ui/packages/consul-ui/tests/acceptance/page-navigation.feature index 62d259c76..3c064b1de 100644 --- a/ui/packages/consul-ui/tests/acceptance/page-navigation.feature +++ b/ui/packages/consul-ui/tests/acceptance/page-navigation.feature @@ -22,7 +22,7 @@ Feature: page-navigation Where: --------------------------------------------------------------------------------------------------- | Link | URL | Endpoint | - | nodes | /dc1/nodes | /v1/internal/ui/nodes?dc=dc1&with-peers=true&ns=@namespace | + | nodes | /dc1/nodes | /v1/internal/ui/nodes?dc=dc1&ns=@namespace | # FIXME # | kvs | /dc1/kv | /v1/kv/?keys&dc=dc1&separator=%2F&ns=@namespace | | tokens | /dc1/acls/tokens | /v1/acl/tokens?dc=dc1&ns=@namespace | diff --git a/ui/packages/consul-ui/tests/acceptance/token-header.feature b/ui/packages/consul-ui/tests/acceptance/token-header.feature index 948611441..4fd73cd62 100644 --- a/ui/packages/consul-ui/tests/acceptance/token-header.feature +++ b/ui/packages/consul-ui/tests/acceptance/token-header.feature @@ -11,7 +11,7 @@ Feature: token-header dc: dc1 --- Then the url should be /dc1/services - And a GET request was made to "/v1/internal/ui/services?dc=dc1&with-peers=true&ns=@namespace" from yaml + And a GET request was made to "/v1/internal/ui/services?dc=dc1&ns=@namespace" from yaml --- headers: X-Consul-Token: '' @@ -35,7 +35,7 @@ Feature: token-header dc: dc1 --- Then the url should be /dc1/services - And a GET request was made to "/v1/internal/ui/services?dc=dc1&with-peers=true&ns=@namespace" from yaml + And a GET request was made to "/v1/internal/ui/services?dc=dc1&ns=@namespace" from yaml --- headers: X-Consul-Token: [Token] diff --git a/ui/packages/consul-ui/tests/helpers/set-cookies.js b/ui/packages/consul-ui/tests/helpers/set-cookies.js index 053bd9090..1a02c4a10 100644 --- a/ui/packages/consul-ui/tests/helpers/set-cookies.js +++ b/ui/packages/consul-ui/tests/helpers/set-cookies.js @@ -5,6 +5,9 @@ export default function(type, value, doc = document) { if (!doc.cookie.includes('CONSUL_ACLS_ENABLE=0')) { obj['CONSUL_ACLS_ENABLE'] = 1; } + if (!doc.cookie.includes('CONSUL_PEERINGS_ENABLE=0')) { + obj['CONSUL_PEERINGS_ENABLE'] = 1; + } switch (type) { case 'dc': key = 'CONSUL_DATACENTER_COUNT'; diff --git a/ui/packages/consul-ui/tests/integration/adapters/node-test.js b/ui/packages/consul-ui/tests/integration/adapters/node-test.js index 5904fb3f5..32f1e8eac 100644 --- a/ui/packages/consul-ui/tests/integration/adapters/node-test.js +++ b/ui/packages/consul-ui/tests/integration/adapters/node-test.js @@ -14,7 +14,7 @@ module('Integration | Adapter | node', function(hooks) { const adapter = this.owner.lookup('adapter:node'); const client = this.owner.lookup('service:client/http'); const request = client.requestParams.bind(client); - const expected = `GET /v1/internal/ui/nodes?dc=${dc}&with-peers=true${ + const expected = `GET /v1/internal/ui/nodes?dc=${dc}${ shouldHaveNspace(nspace) ? `&ns=${nspace}` : `` }`; const actual = adapter.requestForQuery(request, { diff --git a/ui/packages/consul-ui/tests/integration/adapters/service-test.js b/ui/packages/consul-ui/tests/integration/adapters/service-test.js index 853ecbff8..adf46258d 100644 --- a/ui/packages/consul-ui/tests/integration/adapters/service-test.js +++ b/ui/packages/consul-ui/tests/integration/adapters/service-test.js @@ -14,7 +14,7 @@ module('Integration | Adapter | service', function(hooks) { const adapter = this.owner.lookup('adapter:service'); const client = this.owner.lookup('service:client/http'); const request = client.requestParams.bind(client); - const expected = `GET /v1/internal/ui/services?dc=${dc}&with-peers=true${ + const expected = `GET /v1/internal/ui/services?dc=${dc}${ shouldHaveNspace(nspace) ? `&ns=${nspace}` : `` }`; let actual = adapter.requestForQuery(request, { From 084f9d708420682254c9cdf7ca8d2444be3e73ad Mon Sep 17 00:00:00 2001 From: Dan Stough Date: Thu, 14 Jul 2022 14:45:51 -0400 Subject: [PATCH 012/107] feat: connect proxy xDS for destinations Signed-off-by: Dhia Ayachi --- agent/agent.go | 4 + agent/cache-types/service_gateways.go | 52 ++ agent/cache-types/service_gateways_test.go | 57 +++ agent/consul/internal_endpoint.go | 50 ++ agent/consul/internal_endpoint_test.go | 476 ++++++++++++++++++ agent/consul/state/catalog.go | 21 +- agent/consul/state/catalog_test.go | 395 +++++++++++++++ agent/proxycfg-glue/glue.go | 12 + agent/proxycfg/connect_proxy.go | 88 ++++ agent/proxycfg/data_sources.go | 21 +- agent/proxycfg/internal/watch/watchmap.go | 15 + .../proxycfg/internal/watch/watchmap_test.go | 41 ++ agent/proxycfg/manager_test.go | 4 + agent/proxycfg/snapshot.go | 5 + agent/proxycfg/state.go | 3 + agent/proxycfg/state_test.go | 217 +++++++- agent/proxycfg/testing.go | 43 +- agent/proxycfg/testing_terminating_gateway.go | 60 ++- agent/proxycfg/testing_tproxy.go | 115 +++++ agent/xds/clusters.go | 195 ++++--- agent/xds/clusters_test.go | 6 - agent/xds/endpoints.go | 19 +- agent/xds/listeners.go | 204 ++++---- agent/xds/listeners_test.go | 6 - agent/xds/rbac.go | 14 +- agent/xds/resources_test.go | 16 + agent/xds/routes.go | 105 ++-- ...ransparent-proxy-destination.latest.golden | 255 ++++++++++ ...ng-gateway-destinations-only.latest.golden | 105 +++- ...ransparent-proxy-destination.latest.golden | 119 +++++ ...ng-gateway-destinations-only.latest.golden | 5 + ...ransparent-proxy-destination.latest.golden | 185 +++++++ ...roxy-dial-instances-directly.latest.golden | 24 +- ...nsparent-proxy-http-upstream.latest.golden | 24 +- ...ng-gateway-destinations-only.latest.golden | 202 +++++++- .../listeners/transparent-proxy.latest.golden | 24 +- ...ransparent-proxy-destination.latest.golden | 5 + ...ng-gateway-destinations-only.latest.golden | 53 ++ 38 files changed, 2897 insertions(+), 348 deletions(-) create mode 100644 agent/cache-types/service_gateways.go create mode 100644 agent/cache-types/service_gateways_test.go create mode 100644 agent/xds/testdata/clusters/transparent-proxy-destination.latest.golden create mode 100644 agent/xds/testdata/endpoints/transparent-proxy-destination.latest.golden create mode 100644 agent/xds/testdata/endpoints/transparent-proxy-terminating-gateway-destinations-only.latest.golden create mode 100644 agent/xds/testdata/listeners/transparent-proxy-destination.latest.golden create mode 100644 agent/xds/testdata/routes/transparent-proxy-destination.latest.golden create mode 100644 agent/xds/testdata/routes/transparent-proxy-terminating-gateway-destinations-only.latest.golden diff --git a/agent/agent.go b/agent/agent.go index 44157a91f..5412436e5 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -4075,6 +4075,7 @@ func (a *Agent) registerCache() { a.cache.RegisterType(cachetype.IntentionMatchName, &cachetype.IntentionMatch{RPC: a}) a.cache.RegisterType(cachetype.IntentionUpstreamsName, &cachetype.IntentionUpstreams{RPC: a}) + a.cache.RegisterType(cachetype.IntentionUpstreamsDestinationName, &cachetype.IntentionUpstreamsDestination{RPC: a}) a.cache.RegisterType(cachetype.CatalogServicesName, &cachetype.CatalogServices{RPC: a}) @@ -4097,6 +4098,7 @@ func (a *Agent) registerCache() { a.cache.RegisterType(cachetype.CompiledDiscoveryChainName, &cachetype.CompiledDiscoveryChain{RPC: a}) a.cache.RegisterType(cachetype.GatewayServicesName, &cachetype.GatewayServices{RPC: a}) + a.cache.RegisterType(cachetype.ServiceGatewaysName, &cachetype.ServiceGateways{RPC: a}) a.cache.RegisterType(cachetype.ConfigEntryListName, &cachetype.ConfigEntryList{RPC: a}) @@ -4220,10 +4222,12 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { Datacenters: proxycfgglue.CacheDatacenters(a.cache), FederationStateListMeshGateways: proxycfgglue.CacheFederationStateListMeshGateways(a.cache), GatewayServices: proxycfgglue.CacheGatewayServices(a.cache), + ServiceGateways: proxycfgglue.CacheServiceGateways(a.cache), Health: proxycfgglue.ClientHealth(a.rpcClientHealth), HTTPChecks: proxycfgglue.CacheHTTPChecks(a.cache), Intentions: proxycfgglue.CacheIntentions(a.cache), IntentionUpstreams: proxycfgglue.CacheIntentionUpstreams(a.cache), + IntentionUpstreamsDestination: proxycfgglue.CacheIntentionUpstreamsDestination(a.cache), InternalServiceDump: proxycfgglue.CacheInternalServiceDump(a.cache), LeafCertificate: proxycfgglue.CacheLeafCertificate(a.cache), PeeredUpstreams: proxycfgglue.CachePeeredUpstreams(a.cache), diff --git a/agent/cache-types/service_gateways.go b/agent/cache-types/service_gateways.go new file mode 100644 index 000000000..1c7a8e855 --- /dev/null +++ b/agent/cache-types/service_gateways.go @@ -0,0 +1,52 @@ +package cachetype + +import ( + "fmt" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" +) + +// Recommended name for registration. +const ServiceGatewaysName = "service-gateways" + +// GatewayUpstreams supports fetching upstreams for a given gateway name. +type ServiceGateways struct { + RegisterOptionsBlockingRefresh + RPC RPC +} + +func (g *ServiceGateways) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { + var result cache.FetchResult + + // The request should be a ServiceSpecificRequest. + reqReal, ok := req.(*structs.ServiceSpecificRequest) + if !ok { + return result, fmt.Errorf( + "Internal cache failure: request wrong type: %T", req) + } + + // Lightweight copy this object so that manipulating QueryOptions doesn't race. + dup := *reqReal + reqReal = &dup + + // Set the minimum query index to our current index so we block + reqReal.QueryOptions.MinQueryIndex = opts.MinIndex + reqReal.QueryOptions.MaxQueryTime = opts.Timeout + + // Always allow stale - there's no point in hitting leader if the request is + // going to be served from cache and end up arbitrarily stale anyway. This + // allows cached service-discover to automatically read scale across all + // servers too. + reqReal.AllowStale = true + + // Fetch + var reply structs.IndexedCheckServiceNodes + if err := g.RPC.RPC("Internal.ServiceGateways", reqReal, &reply); err != nil { + return result, err + } + + result.Value = &reply + result.Index = reply.QueryMeta.Index + return result, nil +} diff --git a/agent/cache-types/service_gateways_test.go b/agent/cache-types/service_gateways_test.go new file mode 100644 index 000000000..39c6b474d --- /dev/null +++ b/agent/cache-types/service_gateways_test.go @@ -0,0 +1,57 @@ +package cachetype + +import ( + "testing" + "time" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestServiceGateways(t *testing.T) { + rpc := TestRPC(t) + typ := &ServiceGateways{RPC: rpc} + + // Expect the proper RPC call. This also sets the expected value + // since that is return-by-pointer in the arguments. + var resp *structs.IndexedCheckServiceNodes + rpc.On("RPC", "Internal.ServiceGateways", mock.Anything, mock.Anything).Return(nil). + Run(func(args mock.Arguments) { + req := args.Get(1).(*structs.ServiceSpecificRequest) + require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex) + require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime) + require.True(t, req.AllowStale) + require.Equal(t, "foo", req.ServiceName) + + nodes := []structs.CheckServiceNode{ + { + Service: &structs.NodeService{ + Tags: req.ServiceTags, + }, + }, + } + + reply := args.Get(2).(*structs.IndexedCheckServiceNodes) + reply.Nodes = nodes + reply.QueryMeta.Index = 48 + resp = reply + }) + + // Fetch + resultA, err := typ.Fetch(cache.FetchOptions{ + MinIndex: 24, + Timeout: 1 * time.Second, + }, &structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "foo", + }) + require.NoError(t, err) + require.Equal(t, cache.FetchResult{ + Value: resp, + Index: 48, + }, resultA) + + rpc.AssertExpectations(t) +} diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index a041c7eeb..8f44c0f7a 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -453,6 +453,56 @@ func (m *Internal) GatewayServiceDump(args *structs.ServiceSpecificRequest, repl return err } +// ServiceGateways returns all the nodes for services associated with a gateway along with their gateway config +func (m *Internal) ServiceGateways(args *structs.ServiceSpecificRequest, reply *structs.IndexedCheckServiceNodes) error { + if done, err := m.srv.ForwardRPC("Internal.ServiceGateways", args, reply); done { + return err + } + + // Verify the arguments + if args.ServiceName == "" { + return fmt.Errorf("Must provide gateway name") + } + + var authzContext acl.AuthorizerContext + authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext) + if err != nil { + return err + } + + if err := m.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil { + return err + } + + // We need read access to the service we're trying to find gateways for, so check that first. + if err := authz.ToAllowAuthorizer().ServiceReadAllowed(args.ServiceName, &authzContext); err != nil { + return err + } + + err = m.srv.blockingQuery( + &args.QueryOptions, + &reply.QueryMeta, + func(ws memdb.WatchSet, state *state.Store) error { + var maxIdx uint64 + idx, gateways, err := state.ServiceGateways(ws, args.ServiceName, args.ServiceKind, args.EnterpriseMeta) + if err != nil { + return err + } + if idx > maxIdx { + maxIdx = idx + } + + reply.Index, reply.Nodes = maxIdx, gateways + + if err := m.srv.filterACL(args.Token, reply); err != nil { + return err + } + return nil + }) + + return err +} + // GatewayIntentions Match returns the set of intentions that match the given source/destination. func (m *Internal) GatewayIntentions(args *structs.IntentionQueryRequest, reply *structs.IndexedIntentions) error { // Forward if necessary diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index 7d7d421c8..f02150b8c 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -2811,3 +2811,479 @@ func TestInternal_PeeredUpstreams(t *testing.T) { } require.Equal(t, expect, out.Services) } + +func TestInternal_ServiceGatewayService_Terminating(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + + db := structs.NodeService{ + ID: "db2", + Service: "db", + } + + redis := structs.NodeService{ + ID: "redis", + Service: "redis", + } + + // Register gateway and two service instances that will be associated with it + { + arg := structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "10.1.2.2", + Service: &structs.NodeService{ + ID: "terminating-gateway-01", + Service: "terminating-gateway", + Kind: structs.ServiceKindTerminatingGateway, + Port: 443, + Address: "198.18.1.3", + }, + Check: &structs.HealthCheck{ + Name: "terminating connect", + Status: api.HealthPassing, + ServiceID: "terminating-gateway-01", + }, + } + var out struct{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + + arg = structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.2", + Service: &structs.NodeService{ + ID: "db", + Service: "db", + }, + Check: &structs.HealthCheck{ + Name: "db-warning", + Status: api.HealthWarning, + ServiceID: "db", + }, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + + arg = structs.RegisterRequest{ + Datacenter: "dc1", + Node: "baz", + Address: "127.0.0.3", + Service: &db, + Check: &structs.HealthCheck{ + Name: "db2-passing", + Status: api.HealthPassing, + ServiceID: "db2", + }, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + } + + // Register terminating-gateway config entry, linking it to db and redis (dne) + { + args := &structs.TerminatingGatewayConfigEntry{ + Name: "terminating-gateway", + Kind: structs.TerminatingGateway, + Services: []structs.LinkedService{ + { + Name: "db", + }, + { + Name: "redis", + CAFile: "/etc/certs/ca.pem", + CertFile: "/etc/certs/cert.pem", + KeyFile: "/etc/certs/key.pem", + }, + }, + } + + req := structs.ConfigEntryRequest{ + Op: structs.ConfigEntryUpsert, + Datacenter: "dc1", + Entry: args, + } + var configOutput bool + require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &configOutput)) + require.True(t, configOutput) + } + + var out structs.IndexedCheckServiceNodes + req := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "db", + ServiceKind: structs.ServiceKindTerminatingGateway, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceGateways", &req, &out)) + + for _, n := range out.Nodes { + n.Node.RaftIndex = structs.RaftIndex{} + n.Service.RaftIndex = structs.RaftIndex{} + for _, m := range n.Checks { + m.RaftIndex = structs.RaftIndex{} + } + } + + expect := structs.CheckServiceNodes{ + structs.CheckServiceNode{ + Node: &structs.Node{ + Node: "foo", + RaftIndex: structs.RaftIndex{}, + Address: "10.1.2.2", + Datacenter: "dc1", + Partition: acl.DefaultPartitionName, + }, + Service: &structs.NodeService{ + Kind: structs.ServiceKindTerminatingGateway, + ID: "terminating-gateway-01", + Service: "terminating-gateway", + TaggedAddresses: map[string]structs.ServiceAddress{ + "consul-virtual:" + db.CompoundServiceName().String(): {Address: "240.0.0.1"}, + "consul-virtual:" + redis.CompoundServiceName().String(): {Address: "240.0.0.2"}, + }, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + Port: 443, + Tags: []string{}, + Meta: map[string]string{}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + RaftIndex: structs.RaftIndex{}, + Address: "198.18.1.3", + }, + Checks: structs.HealthChecks{ + &structs.HealthCheck{ + Name: "terminating connect", + Node: "foo", + CheckID: "terminating connect", + Status: api.HealthPassing, + ServiceID: "terminating-gateway-01", + ServiceName: "terminating-gateway", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + }, + }, + } + + assert.Equal(t, expect, out.Nodes) +} + +func TestInternal_ServiceGatewayService_Terminating_ACL(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) + + // Create the ACL. + token, err := upsertTestTokenWithPolicyRules(codec, "root", "dc1", ` + service "db" { policy = "read" } + service "terminating-gateway" { policy = "read" } + node_prefix "" { policy = "read" }`) + require.NoError(t, err) + + // Register gateway and two service instances that will be associated with it + { + arg := structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + ID: "terminating-gateway", + Service: "terminating-gateway", + Kind: structs.ServiceKindTerminatingGateway, + Port: 443, + }, + Check: &structs.HealthCheck{ + Name: "terminating connect", + Status: api.HealthPassing, + ServiceID: "terminating-gateway", + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var out struct{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + { + arg := structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + ID: "terminating-gateway2", + Service: "terminating-gateway2", + Kind: structs.ServiceKindTerminatingGateway, + Port: 444, + }, + Check: &structs.HealthCheck{ + Name: "terminating connect", + Status: api.HealthPassing, + ServiceID: "terminating-gateway2", + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var out struct{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + } + + arg = structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.2", + Service: &structs.NodeService{ + ID: "db", + Service: "db", + }, + Check: &structs.HealthCheck{ + Name: "db-warning", + Status: api.HealthWarning, + ServiceID: "db", + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + + arg = structs.RegisterRequest{ + Datacenter: "dc1", + Node: "baz", + Address: "127.0.0.3", + Service: &structs.NodeService{ + ID: "api", + Service: "api", + }, + Check: &structs.HealthCheck{ + Name: "api-passing", + Status: api.HealthPassing, + ServiceID: "api", + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + } + + // Register terminating-gateway config entry, linking it to db and api + { + args := &structs.TerminatingGatewayConfigEntry{ + Name: "terminating-gateway", + Kind: structs.TerminatingGateway, + Services: []structs.LinkedService{ + {Name: "db"}, + {Name: "api"}, + }, + } + + req := structs.ConfigEntryRequest{ + Op: structs.ConfigEntryUpsert, + Datacenter: "dc1", + Entry: args, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var out bool + require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out)) + require.True(t, out) + } + + // Register terminating-gateway config entry, linking it to db and api + { + args := &structs.TerminatingGatewayConfigEntry{ + Name: "terminating-gateway2", + Kind: structs.TerminatingGateway, + Services: []structs.LinkedService{ + {Name: "db"}, + {Name: "api"}, + }, + } + + req := structs.ConfigEntryRequest{ + Op: structs.ConfigEntryUpsert, + Datacenter: "dc1", + Entry: args, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var out bool + require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out)) + require.True(t, out) + } + + var out structs.IndexedCheckServiceNodes + + // Not passing a token with service:read on Gateway leads to PermissionDenied + req := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "db", + ServiceKind: structs.ServiceKindTerminatingGateway, + } + err = msgpackrpc.CallWithCodec(codec, "Internal.ServiceGateways", &req, &out) + require.Error(t, err, acl.ErrPermissionDenied) + + // Passing a token without service:read on api leads to it getting filtered out + req = structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "db", + ServiceKind: structs.ServiceKindTerminatingGateway, + QueryOptions: structs.QueryOptions{Token: token.SecretID}, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceGateways", &req, &out)) + + nodes := out.Nodes + require.Len(t, nodes, 1) + require.Equal(t, "foo", nodes[0].Node.Node) + require.Equal(t, structs.ServiceKindTerminatingGateway, nodes[0].Service.Kind) + require.Equal(t, "terminating-gateway", nodes[0].Service.Service) + require.Equal(t, "terminating-gateway", nodes[0].Service.ID) + require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") +} + +func TestInternal_ServiceGatewayService_Terminating_Destination(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + + google := structs.NodeService{ + ID: "google", + Service: "google", + } + + // Register service-default with conflicting destination address + { + arg := structs.ConfigEntryRequest{ + Op: structs.ConfigEntryUpsert, + Datacenter: "dc1", + Entry: &structs.ServiceConfigEntry{ + Name: "google", + Destination: &structs.DestinationConfig{Address: "www.google.com", Port: 443}, + EnterpriseMeta: *acl.DefaultEnterpriseMeta(), + }, + } + var configOutput bool + require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &arg, &configOutput)) + require.True(t, configOutput) + } + + // Register terminating-gateway config entry, linking it to google.com + { + arg := structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + ID: "terminating-gateway", + Service: "terminating-gateway", + Kind: structs.ServiceKindTerminatingGateway, + Port: 443, + }, + Check: &structs.HealthCheck{ + Name: "terminating connect", + Status: api.HealthPassing, + ServiceID: "terminating-gateway", + }, + } + var out struct{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + } + { + args := &structs.TerminatingGatewayConfigEntry{ + Name: "terminating-gateway", + Kind: structs.TerminatingGateway, + Services: []structs.LinkedService{ + { + Name: "google", + }, + }, + } + + req := structs.ConfigEntryRequest{ + Op: structs.ConfigEntryUpsert, + Datacenter: "dc1", + Entry: args, + } + var configOutput bool + require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &configOutput)) + require.True(t, configOutput) + } + + var out structs.IndexedCheckServiceNodes + req := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "google", + ServiceKind: structs.ServiceKindTerminatingGateway, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceGateways", &req, &out)) + + nodes := out.Nodes + + for _, n := range nodes { + n.Node.RaftIndex = structs.RaftIndex{} + n.Service.RaftIndex = structs.RaftIndex{} + for _, m := range n.Checks { + m.RaftIndex = structs.RaftIndex{} + } + } + + expect := structs.CheckServiceNodes{ + structs.CheckServiceNode{ + Node: &structs.Node{ + Node: "foo", + RaftIndex: structs.RaftIndex{}, + Address: "127.0.0.1", + Datacenter: "dc1", + Partition: acl.DefaultPartitionName, + }, + Service: &structs.NodeService{ + Kind: structs.ServiceKindTerminatingGateway, + ID: "terminating-gateway", + Service: "terminating-gateway", + Weights: &structs.Weights{Passing: 1, Warning: 1}, + Port: 443, + Tags: []string{}, + Meta: map[string]string{}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + TaggedAddresses: map[string]structs.ServiceAddress{ + "consul-virtual:" + google.CompoundServiceName().String(): {Address: "240.0.0.1"}, + }, + RaftIndex: structs.RaftIndex{}, + Address: "", + }, + Checks: structs.HealthChecks{ + &structs.HealthCheck{ + Name: "terminating connect", + Node: "foo", + CheckID: "terminating connect", + Status: api.HealthPassing, + ServiceID: "terminating-gateway", + ServiceName: "terminating-gateway", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + }, + }, + } + + assert.Len(t, nodes, 1) + assert.Equal(t, expect, nodes) +} diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 2f116ab0f..622cccd35 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -2907,6 +2907,25 @@ func (s *Store) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *acl. return lib.MaxUint64(maxIdx, idx), results, nil } +// TODO: Find a way to consolidate this with CheckIngressServiceNodes +// ServiceGateways is used to query all gateways associated with a service +func (s *Store) ServiceGateways(ws memdb.WatchSet, service string, kind structs.ServiceKind, entMeta acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + // tableGatewayServices is not peer-aware, and the existence of TG/IG gateways is scrubbed during peer replication. + maxIdx, nodes, err := serviceGatewayNodes(tx, ws, service, kind, &entMeta, structs.DefaultPeerKeyword) + + // Watch for index changes to the gateway nodes + idx, chans := maxIndexAndWatchChsForServiceNodes(tx, nodes, false) + for _, ch := range chans { + ws.Add(ch) + } + maxIdx = lib.MaxUint64(maxIdx, idx) + + return parseCheckServiceNodes(tx, ws, maxIdx, nodes, &entMeta, structs.DefaultPeerKeyword, err) +} + func (s *Store) VirtualIPForService(psn structs.PeeredServiceName) (string, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -3862,7 +3881,7 @@ func (s *Store) collectGatewayServices(tx ReadTxn, ws memdb.WatchSet, iter memdb return maxIdx, results, nil } -// TODO(ingress): How to handle index rolling back when a config entry is +// TODO: How to handle index rolling back when a config entry is // deleted that references a service? // We might need something like the service_last_extinction index? func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index d2a970b07..fed3bd0ee 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -4,6 +4,7 @@ import ( "context" crand "crypto/rand" "fmt" + "github.com/hashicorp/consul/acl" "reflect" "sort" "strings" @@ -5346,6 +5347,400 @@ func TestStateStore_GatewayServices_Terminating(t *testing.T) { assert.Len(t, out, 0) } +func TestStateStore_ServiceGateways_Terminating(t *testing.T) { + s := testStateStore(t) + + // Listing with no results returns an empty list. + ws := memdb.NewWatchSet() + idx, nodes, err := s.GatewayServices(ws, "db", nil) + assert.Nil(t, err) + assert.Equal(t, uint64(0), idx) + assert.Len(t, nodes, 0) + + // Create some nodes + assert.Nil(t, s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"})) + assert.Nil(t, s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"})) + assert.Nil(t, s.EnsureNode(12, &structs.Node{Node: "baz", Address: "127.0.0.2"})) + + // Typical services and some consul services spread across two nodes + assert.Nil(t, s.EnsureService(13, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000})) + assert.Nil(t, s.EnsureService(15, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000})) + assert.Nil(t, s.EnsureService(16, "bar", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) + assert.Nil(t, s.EnsureService(17, "bar", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) + + // Add ingress gateway and a connect proxy, neither should get picked up by terminating gateway + ingressNS := &structs.NodeService{ + Kind: structs.ServiceKindIngressGateway, + ID: "ingress", + Service: "ingress", + Port: 8443, + } + assert.Nil(t, s.EnsureService(18, "baz", ingressNS)) + + proxyNS := &structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: "db proxy", + Service: "db proxy", + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: "db", + }, + Port: 8000, + } + assert.Nil(t, s.EnsureService(19, "foo", proxyNS)) + + // Register a gateway + assert.Nil(t, s.EnsureService(20, "baz", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443})) + + // Associate gateway with db and api + assert.Nil(t, s.EnsureConfigEntry(21, &structs.TerminatingGatewayConfigEntry{ + Kind: "terminating-gateway", + Name: "gateway", + Services: []structs.LinkedService{ + { + Name: "db", + }, + { + Name: "api", + }, + }, + })) + assert.True(t, watchFired(ws)) + + // Read everything back. + ws = memdb.NewWatchSet() + idx, out, err := s.ServiceGateways(ws, "db", structs.ServiceKindTerminatingGateway, *structs.DefaultEnterpriseMetaInDefaultPartition()) + assert.Nil(t, err) + assert.Equal(t, uint64(21), idx) + assert.Len(t, out, 1) + + expect := structs.CheckServiceNodes{ + { + Node: &structs.Node{ + ID: "", + Address: "127.0.0.2", + Node: "baz", + Partition: acl.DefaultPartitionName, + RaftIndex: structs.RaftIndex{ + CreateIndex: 12, + ModifyIndex: 12, + }, + }, + Service: &structs.NodeService{ + Service: "gateway", + Kind: structs.ServiceKindTerminatingGateway, + ID: "gateway", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Weights: &structs.Weights{Passing: 1, Warning: 1}, + Port: 443, + RaftIndex: structs.RaftIndex{ + CreateIndex: 20, + ModifyIndex: 20, + }, + }, + }, + } + assert.Equal(t, expect, out) + + // Check that we don't update on same exact config + assert.Nil(t, s.EnsureConfigEntry(21, &structs.TerminatingGatewayConfigEntry{ + Kind: "terminating-gateway", + Name: "gateway", + Services: []structs.LinkedService{ + { + Name: "db", + }, + { + Name: "api", + }, + }, + })) + assert.False(t, watchFired(ws)) + + idx, out, err = s.ServiceGateways(ws, "api", structs.ServiceKindTerminatingGateway, *structs.DefaultEnterpriseMetaInDefaultPartition()) + assert.Nil(t, err) + assert.Equal(t, uint64(21), idx) + assert.Len(t, out, 1) + + expect = structs.CheckServiceNodes{ + { + Node: &structs.Node{ + ID: "", + Address: "127.0.0.2", + Node: "baz", + Partition: acl.DefaultPartitionName, + RaftIndex: structs.RaftIndex{ + CreateIndex: 12, + ModifyIndex: 12, + }, + }, + Service: &structs.NodeService{ + Service: "gateway", + Kind: structs.ServiceKindTerminatingGateway, + ID: "gateway", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Weights: &structs.Weights{Passing: 1, Warning: 1}, + Port: 443, + RaftIndex: structs.RaftIndex{ + CreateIndex: 20, + ModifyIndex: 20, + }, + }, + }, + } + assert.Equal(t, expect, out) + + // Associate gateway with a wildcard and add TLS config + assert.Nil(t, s.EnsureConfigEntry(22, &structs.TerminatingGatewayConfigEntry{ + Kind: "terminating-gateway", + Name: "gateway", + Services: []structs.LinkedService{ + { + Name: "api", + CAFile: "api/ca.crt", + CertFile: "api/client.crt", + KeyFile: "api/client.key", + SNI: "my-domain", + }, + { + Name: "db", + }, + { + Name: "*", + CAFile: "ca.crt", + CertFile: "client.crt", + KeyFile: "client.key", + SNI: "my-alt-domain", + }, + }, + })) + assert.True(t, watchFired(ws)) + + // Read everything back. + ws = memdb.NewWatchSet() + idx, out, err = s.ServiceGateways(ws, "db", structs.ServiceKindTerminatingGateway, *structs.DefaultEnterpriseMetaInDefaultPartition()) + assert.Nil(t, err) + assert.Equal(t, uint64(22), idx) + assert.Len(t, out, 1) + + expect = structs.CheckServiceNodes{ + { + Node: &structs.Node{ + ID: "", + Address: "127.0.0.2", + Node: "baz", + Partition: acl.DefaultPartitionName, + RaftIndex: structs.RaftIndex{ + CreateIndex: 12, + ModifyIndex: 12, + }, + }, + Service: &structs.NodeService{ + Service: "gateway", + Kind: structs.ServiceKindTerminatingGateway, + ID: "gateway", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Weights: &structs.Weights{Passing: 1, Warning: 1}, + Port: 443, + RaftIndex: structs.RaftIndex{ + CreateIndex: 20, + ModifyIndex: 20, + }, + }, + }, + } + assert.Equal(t, expect, out) + + // Add a service covered by wildcard + assert.Nil(t, s.EnsureService(23, "bar", &structs.NodeService{ID: "redis", Service: "redis", Tags: nil, Address: "", Port: 6379})) + + ws = memdb.NewWatchSet() + idx, out, err = s.ServiceGateways(ws, "redis", structs.ServiceKindTerminatingGateway, *structs.DefaultEnterpriseMetaInDefaultPartition()) + assert.Nil(t, err) + assert.Equal(t, uint64(23), idx) + assert.Len(t, out, 1) + + expect = structs.CheckServiceNodes{ + { + Node: &structs.Node{ + ID: "", + Address: "127.0.0.2", + Node: "baz", + Partition: acl.DefaultPartitionName, + RaftIndex: structs.RaftIndex{ + CreateIndex: 12, + ModifyIndex: 12, + }, + }, + Service: &structs.NodeService{ + Service: "gateway", + Kind: structs.ServiceKindTerminatingGateway, + ID: "gateway", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Weights: &structs.Weights{Passing: 1, Warning: 1}, + Port: 443, + RaftIndex: structs.RaftIndex{ + CreateIndex: 20, + ModifyIndex: 20, + }, + }, + }, + } + assert.Equal(t, expect, out) + + // Delete a service covered by wildcard + assert.Nil(t, s.DeleteService(24, "bar", "redis", structs.DefaultEnterpriseMetaInDefaultPartition(), "")) + assert.True(t, watchFired(ws)) + + ws = memdb.NewWatchSet() + idx, out, err = s.ServiceGateways(ws, "redis", structs.ServiceKindTerminatingGateway, *structs.DefaultEnterpriseMetaInDefaultPartition()) + assert.Nil(t, err) + // TODO: wildcards don't keep the same extinction index + assert.Equal(t, uint64(0), idx) + assert.Len(t, out, 0) + + // Update the entry that only leaves one service + assert.Nil(t, s.EnsureConfigEntry(25, &structs.TerminatingGatewayConfigEntry{ + Kind: "terminating-gateway", + Name: "gateway", + Services: []structs.LinkedService{ + { + Name: "db", + }, + }, + })) + assert.True(t, watchFired(ws)) + + ws = memdb.NewWatchSet() + idx, out, err = s.ServiceGateways(ws, "db", structs.ServiceKindTerminatingGateway, *structs.DefaultEnterpriseMetaInDefaultPartition()) + assert.Nil(t, err) + assert.Equal(t, uint64(25), idx) + assert.Len(t, out, 1) + + // previously associated services should not be present + expect = structs.CheckServiceNodes{ + { + Node: &structs.Node{ + ID: "", + Address: "127.0.0.2", + Node: "baz", + Partition: acl.DefaultPartitionName, + RaftIndex: structs.RaftIndex{ + CreateIndex: 12, + ModifyIndex: 12, + }, + }, + Service: &structs.NodeService{ + Service: "gateway", + Kind: structs.ServiceKindTerminatingGateway, + ID: "gateway", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Weights: &structs.Weights{Passing: 1, Warning: 1}, + Port: 443, + RaftIndex: structs.RaftIndex{ + CreateIndex: 20, + ModifyIndex: 20, + }, + }, + }, + } + assert.Equal(t, expect, out) + + // Attempt to associate a different gateway with services that include db + assert.Nil(t, s.EnsureConfigEntry(26, &structs.TerminatingGatewayConfigEntry{ + Kind: "terminating-gateway", + Name: "gateway2", + Services: []structs.LinkedService{ + { + Name: "*", + }, + }, + })) + + // check that watchset fired for new terminating gateway node service + assert.Nil(t, s.EnsureService(20, "baz", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway2", Service: "gateway2", Port: 443})) + assert.True(t, watchFired(ws)) + + ws = memdb.NewWatchSet() + idx, out, err = s.ServiceGateways(ws, "db", structs.ServiceKindTerminatingGateway, *structs.DefaultEnterpriseMetaInDefaultPartition()) + assert.Nil(t, err) + assert.Equal(t, uint64(26), idx) + assert.Len(t, out, 2) + + expect = structs.CheckServiceNodes{ + { + Node: &structs.Node{ + ID: "", + Address: "127.0.0.2", + Node: "baz", + Partition: acl.DefaultPartitionName, + RaftIndex: structs.RaftIndex{ + CreateIndex: 12, + ModifyIndex: 12, + }, + }, + Service: &structs.NodeService{ + Service: "gateway", + Kind: structs.ServiceKindTerminatingGateway, + ID: "gateway", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Weights: &structs.Weights{Passing: 1, Warning: 1}, + Port: 443, + RaftIndex: structs.RaftIndex{ + CreateIndex: 20, + ModifyIndex: 20, + }, + }, + }, + { + Node: &structs.Node{ + ID: "", + Address: "127.0.0.2", + Node: "baz", + Partition: acl.DefaultPartitionName, + RaftIndex: structs.RaftIndex{ + CreateIndex: 12, + ModifyIndex: 12, + }, + }, + Service: &structs.NodeService{ + Service: "gateway2", + Kind: structs.ServiceKindTerminatingGateway, + ID: "gateway2", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Weights: &structs.Weights{Passing: 1, Warning: 1}, + Port: 443, + RaftIndex: structs.RaftIndex{ + CreateIndex: 20, + ModifyIndex: 20, + }, + }, + }, + } + assert.Equal(t, expect, out) + + // Deleting the all gateway's node services should trigger the watch and keep the raft index stable + assert.Nil(t, s.DeleteService(27, "baz", "gateway", structs.DefaultEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)) + assert.True(t, watchFired(ws)) + assert.Nil(t, s.DeleteService(28, "baz", "gateway2", structs.DefaultEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)) + + ws = memdb.NewWatchSet() + idx, out, err = s.ServiceGateways(ws, "db", structs.ServiceKindTerminatingGateway, *structs.DefaultEnterpriseMetaInDefaultPartition()) + assert.Nil(t, err) + assert.Equal(t, uint64(28), idx) + assert.Len(t, out, 0) + + // Deleting the config entry even with a node service should remove existing mappings + assert.Nil(t, s.EnsureService(29, "baz", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443})) + assert.Nil(t, s.DeleteConfigEntry(30, "terminating-gateway", "gateway", nil)) + assert.True(t, watchFired(ws)) + + idx, out, err = s.ServiceGateways(ws, "api", structs.ServiceKindTerminatingGateway, *structs.DefaultEnterpriseMetaInDefaultPartition()) + assert.Nil(t, err) + // TODO: similar to ingress, the index can backslide if the config is deleted. + assert.Equal(t, uint64(28), idx) + assert.Len(t, out, 0) +} + func TestStateStore_GatewayServices_ServiceDeletion(t *testing.T) { s := testStateStore(t) diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index 06798939b..7c3311f36 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -54,6 +54,12 @@ func CacheDatacenters(c *cache.Cache) proxycfg.Datacenters { return &cacheProxyDataSource[*structs.DatacentersRequest]{c, cachetype.CatalogDatacentersName} } +// CacheServiceGateways satisfies the proxycfg.ServiceGateways interface by +// sourcing data from the agent cache. +func CacheServiceGateways(c *cache.Cache) proxycfg.GatewayServices { + return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.ServiceGatewaysName} +} + // CacheHTTPChecks satisifies the proxycfg.HTTPChecks interface by sourcing // data from the agent cache. func CacheHTTPChecks(c *cache.Cache) proxycfg.HTTPChecks { @@ -66,6 +72,12 @@ func CacheIntentionUpstreams(c *cache.Cache) proxycfg.IntentionUpstreams { return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.IntentionUpstreamsName} } +// CacheIntentionUpstreamsDestination satisfies the proxycfg.IntentionUpstreamsDestination interface +// by sourcing data from the agent cache. +func CacheIntentionUpstreamsDestination(c *cache.Cache) proxycfg.IntentionUpstreams { + return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.IntentionUpstreamsDestinationName} +} + // CacheInternalServiceDump satisfies the proxycfg.InternalServiceDump // interface by sourcing data from the agent cache. func CacheInternalServiceDump(c *cache.Cache) proxycfg.InternalServiceDump { diff --git a/agent/proxycfg/connect_proxy.go b/agent/proxycfg/connect_proxy.go index 3221150db..823f7d9ef 100644 --- a/agent/proxycfg/connect_proxy.go +++ b/agent/proxycfg/connect_proxy.go @@ -28,10 +28,12 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e snap.ConnectProxy.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes) snap.ConnectProxy.WatchedServiceChecks = make(map[structs.ServiceID][]structs.CheckType) snap.ConnectProxy.PreparedQueryEndpoints = make(map[UpstreamID]structs.CheckServiceNodes) + snap.ConnectProxy.DestinationsUpstream = watch.NewMap[UpstreamID, *structs.ServiceConfigEntry]() snap.ConnectProxy.UpstreamConfig = make(map[UpstreamID]*structs.Upstream) snap.ConnectProxy.PassthroughUpstreams = make(map[UpstreamID]map[string]map[string]struct{}) snap.ConnectProxy.PassthroughIndices = make(map[string]indexedTarget) snap.ConnectProxy.PeerUpstreamEndpoints = watch.NewMap[UpstreamID, structs.CheckServiceNodes]() + snap.ConnectProxy.DestinationGateways = watch.NewMap[UpstreamID, structs.CheckServiceNodes]() snap.ConnectProxy.PeerUpstreamEndpointsUseHostnames = make(map[UpstreamID]struct{}) // Watch for root changes @@ -116,6 +118,16 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e if err != nil { return snap, err } + // We also infer upstreams from destinations (egress points) + err = s.dataSources.IntentionUpstreamsDestination.Notify(ctx, &structs.ServiceSpecificRequest{ + Datacenter: s.source.Datacenter, + QueryOptions: structs.QueryOptions{Token: s.token}, + ServiceName: s.proxyCfg.DestinationServiceName, + EnterpriseMeta: s.proxyID.EnterpriseMeta, + }, intentionUpstreamsDestinationID, s.ch) + if err != nil { + return snap, err + } } // Watch for updates to service endpoints for all upstreams @@ -508,7 +520,83 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s delete(snap.ConnectProxy.DiscoveryChain, uid) } } + case u.CorrelationID == intentionUpstreamsDestinationID: + resp, ok := u.Result.(*structs.IndexedServiceList) + if !ok { + return fmt.Errorf("invalid type for response %T", u.Result) + } + seenUpstreams := make(map[UpstreamID]struct{}) + for _, svc := range resp.Services { + uid := NewUpstreamIDFromServiceName(svc) + seenUpstreams[uid] = struct{}{} + { + childCtx, cancel := context.WithCancel(ctx) + err := s.dataSources.ConfigEntry.Notify(childCtx, &structs.ConfigEntryQuery{ + Kind: structs.ServiceDefaults, + Name: svc.Name, + Datacenter: s.source.Datacenter, + QueryOptions: structs.QueryOptions{Token: s.token}, + EnterpriseMeta: svc.EnterpriseMeta, + }, DestinationConfigEntryID+svc.String(), s.ch) + if err != nil { + cancel() + return err + } + snap.ConnectProxy.DestinationsUpstream.InitWatch(uid, cancel) + } + { + childCtx, cancel := context.WithCancel(ctx) + err := s.dataSources.ServiceGateways.Notify(childCtx, &structs.ServiceSpecificRequest{ + ServiceName: svc.Name, + Datacenter: s.source.Datacenter, + QueryOptions: structs.QueryOptions{Token: s.token}, + EnterpriseMeta: svc.EnterpriseMeta, + ServiceKind: structs.ServiceKindTerminatingGateway, + }, DestinationGatewayID+svc.String(), s.ch) + if err != nil { + cancel() + return err + } + snap.ConnectProxy.DestinationGateways.InitWatch(uid, cancel) + } + } + snap.ConnectProxy.DestinationsUpstream.ForEachKey(func(uid UpstreamID) bool { + if _, ok := seenUpstreams[uid]; !ok { + snap.ConnectProxy.DestinationsUpstream.CancelWatch(uid) + } + return true + }) + + snap.ConnectProxy.DestinationGateways.ForEachKey(func(uid UpstreamID) bool { + if _, ok := seenUpstreams[uid]; !ok { + snap.ConnectProxy.DestinationGateways.CancelWatch(uid) + } + return true + }) + case strings.HasPrefix(u.CorrelationID, DestinationConfigEntryID): + resp, ok := u.Result.(*structs.ConfigEntryResponse) + if !ok { + return fmt.Errorf("invalid type for response: %T", u.Result) + } + + pq := strings.TrimPrefix(u.CorrelationID, DestinationConfigEntryID) + uid := UpstreamIDFromString(pq) + serviceConf, ok := resp.Entry.(*structs.ServiceConfigEntry) + if !ok { + return fmt.Errorf("invalid type for service default: %T", resp.Entry.GetName()) + } + + snap.ConnectProxy.DestinationsUpstream.Set(uid, serviceConf) + case strings.HasPrefix(u.CorrelationID, DestinationGatewayID): + resp, ok := u.Result.(*structs.IndexedCheckServiceNodes) + if !ok { + return fmt.Errorf("invalid type for response: %T", u.Result) + } + + pq := strings.TrimPrefix(u.CorrelationID, DestinationGatewayID) + uid := UpstreamIDFromString(pq) + snap.ConnectProxy.DestinationGateways.Set(uid, resp.Nodes) case strings.HasPrefix(u.CorrelationID, "upstream:"+preparedQueryIDPrefix): resp, ok := u.Result.(*structs.PreparedQueryExecuteResponse) if !ok { diff --git a/agent/proxycfg/data_sources.go b/agent/proxycfg/data_sources.go index 310a4340e..3bef5e347 100644 --- a/agent/proxycfg/data_sources.go +++ b/agent/proxycfg/data_sources.go @@ -47,6 +47,10 @@ type DataSources struct { // notification channel. GatewayServices GatewayServices + // ServiceGateways provides updates about a gateway's upstream services on a + // notification channel. + ServiceGateways ServiceGateways + // Health provides service health updates on a notification channel. Health Health @@ -61,6 +65,10 @@ type DataSources struct { // notification channel. IntentionUpstreams IntentionUpstreams + // IntentionUpstreamsDestination provides intention-inferred upstream updates on a + // notification channel. + IntentionUpstreamsDestination IntentionUpstreamsDestination + // InternalServiceDump provides updates about a (gateway) service on a // notification channel. InternalServiceDump InternalServiceDump @@ -115,7 +123,7 @@ type ConfigEntry interface { Notify(ctx context.Context, req *structs.ConfigEntryQuery, correlationID string, ch chan<- UpdateEvent) error } -// ConfigEntry is the interface used to consume updates about a list of config +// ConfigEntryList is the interface used to consume updates about a list of config // entries. type ConfigEntryList interface { Notify(ctx context.Context, req *structs.ConfigEntryQuery, correlationID string, ch chan<- UpdateEvent) error @@ -139,6 +147,11 @@ type GatewayServices interface { Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- UpdateEvent) error } +// ServiceGateways is the interface used to consume updates about a service terminating gateways +type ServiceGateways interface { + Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- UpdateEvent) error +} + // Health is the interface used to consume service health updates. type Health interface { Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- UpdateEvent) error @@ -162,6 +175,12 @@ type IntentionUpstreams interface { Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- UpdateEvent) error } +// IntentionUpstreamsDestination is the interface used to consume updates about upstreams destination +// inferred from service intentions. +type IntentionUpstreamsDestination interface { + Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- UpdateEvent) error +} + // InternalServiceDump is the interface used to consume updates about a (gateway) // service via the internal ServiceDump RPC. type InternalServiceDump interface { diff --git a/agent/proxycfg/internal/watch/watchmap.go b/agent/proxycfg/internal/watch/watchmap.go index bbf42dc9a..ec676bb8f 100644 --- a/agent/proxycfg/internal/watch/watchmap.go +++ b/agent/proxycfg/internal/watch/watchmap.go @@ -106,3 +106,18 @@ func (m Map[K, V]) ForEachKey(f func(K) bool) { } } } + +// ForEachKeyE iterates through the map, calling f +// for each iteration. It is up to the caller to +// Get the value and nil-check if required. +// If a non-nil error is returned by f, iterating +// stops and the error is returned. +// Order of iteration is non-deterministic. +func (m Map[K, V]) ForEachKeyE(f func(K) error) error { + for k := range m.M { + if err := f(k); err != nil { + return err + } + } + return nil +} diff --git a/agent/proxycfg/internal/watch/watchmap_test.go b/agent/proxycfg/internal/watch/watchmap_test.go index 590351853..deb7cea08 100644 --- a/agent/proxycfg/internal/watch/watchmap_test.go +++ b/agent/proxycfg/internal/watch/watchmap_test.go @@ -1,6 +1,7 @@ package watch import ( + "errors" "testing" "github.com/stretchr/testify/require" @@ -111,3 +112,43 @@ func TestMap_ForEach(t *testing.T) { require.Equal(t, 1, count) } } + +func TestMap_ForEachE(t *testing.T) { + type testType struct { + s string + } + + m := NewMap[string, any]() + inputs := map[string]any{ + "hello": 13, + "foo": struct{}{}, + "bar": &testType{s: "wow"}, + } + for k, v := range inputs { + m.InitWatch(k, nil) + m.Set(k, v) + } + require.Equal(t, 3, m.Len()) + + // returning nil error continues iteration + { + var count int + err := m.ForEachKeyE(func(k string) error { + count++ + return nil + }) + require.Equal(t, 3, count) + require.Nil(t, err) + } + + // returning an error should exit immediately + { + var count int + err := m.ForEachKeyE(func(k string) error { + count++ + return errors.New("boooo") + }) + require.Equal(t, 1, count) + require.Errorf(t, err, "boo") + } +} diff --git a/agent/proxycfg/manager_test.go b/agent/proxycfg/manager_test.go index 184b62148..2a3cdd15f 100644 --- a/agent/proxycfg/manager_test.go +++ b/agent/proxycfg/manager_test.go @@ -236,6 +236,8 @@ func TestManager_BasicLifecycle(t *testing.T) { PeerUpstreamEndpointsUseHostnames: map[UpstreamID]struct{}{}, }, PreparedQueryEndpoints: map[UpstreamID]structs.CheckServiceNodes{}, + DestinationsUpstream: watch.NewMap[UpstreamID, *structs.ServiceConfigEntry](), + DestinationGateways: watch.NewMap[UpstreamID, structs.CheckServiceNodes](), WatchedServiceChecks: map[structs.ServiceID][]structs.CheckType{}, Intentions: TestIntentions(), IntentionsSet: true, @@ -297,6 +299,8 @@ func TestManager_BasicLifecycle(t *testing.T) { PeerUpstreamEndpointsUseHostnames: map[UpstreamID]struct{}{}, }, PreparedQueryEndpoints: map[UpstreamID]structs.CheckServiceNodes{}, + DestinationsUpstream: watch.NewMap[UpstreamID, *structs.ServiceConfigEntry](), + DestinationGateways: watch.NewMap[UpstreamID, structs.CheckServiceNodes](), WatchedServiceChecks: map[structs.ServiceID][]structs.CheckType{}, Intentions: TestIntentions(), IntentionsSet: true, diff --git a/agent/proxycfg/snapshot.go b/agent/proxycfg/snapshot.go index 6a02aad1e..b04c67c26 100644 --- a/agent/proxycfg/snapshot.go +++ b/agent/proxycfg/snapshot.go @@ -142,6 +142,9 @@ type configSnapshotConnectProxy struct { // intentions. Intentions structs.Intentions IntentionsSet bool + + DestinationsUpstream watch.Map[UpstreamID, *structs.ServiceConfigEntry] + DestinationGateways watch.Map[UpstreamID, structs.CheckServiceNodes] } // isEmpty is a test helper @@ -163,6 +166,8 @@ func (c *configSnapshotConnectProxy) isEmpty() bool { len(c.UpstreamConfig) == 0 && len(c.PassthroughUpstreams) == 0 && len(c.IntentionUpstreams) == 0 && + c.DestinationGateways.Len() == 0 && + c.DestinationsUpstream.Len() == 0 && len(c.PeeredUpstreams) == 0 && !c.InboundPeerTrustBundlesSet && !c.MeshConfigSet && diff --git a/agent/proxycfg/state.go b/agent/proxycfg/state.go index f9388cf48..13b22c4fd 100644 --- a/agent/proxycfg/state.go +++ b/agent/proxycfg/state.go @@ -37,9 +37,12 @@ const ( serviceIntentionsIDPrefix = "service-intentions:" intentionUpstreamsID = "intention-upstreams" peeredUpstreamsID = "peered-upstreams" + intentionUpstreamsDestinationID = "intention-upstreams-destination" upstreamPeerWatchIDPrefix = "upstream-peer:" exportedServiceListWatchID = "exported-service-list" meshConfigEntryID = "mesh" + DestinationConfigEntryID = "destination:" + DestinationGatewayID = "dest-gateway:" svcChecksWatchIDPrefix = cachetype.ServiceHTTPChecksName + ":" preparedQueryIDPrefix = string(structs.UpstreamDestTypePreparedQuery) + ":" defaultPreparedQueryPollInterval = 30 * time.Second diff --git a/agent/proxycfg/state_test.go b/agent/proxycfg/state_test.go index 36b641a69..662596b9b 100644 --- a/agent/proxycfg/state_test.go +++ b/agent/proxycfg/state_test.go @@ -125,10 +125,12 @@ func recordWatches(sc *stateConfig) *watchRecorder { Datacenters: typedWatchRecorder[*structs.DatacentersRequest]{wr}, FederationStateListMeshGateways: typedWatchRecorder[*structs.DCSpecificRequest]{wr}, GatewayServices: typedWatchRecorder[*structs.ServiceSpecificRequest]{wr}, + ServiceGateways: typedWatchRecorder[*structs.ServiceSpecificRequest]{wr}, Health: typedWatchRecorder[*structs.ServiceSpecificRequest]{wr}, HTTPChecks: typedWatchRecorder[*cachetype.ServiceHTTPChecksRequest]{wr}, Intentions: typedWatchRecorder[*structs.ServiceSpecificRequest]{wr}, IntentionUpstreams: typedWatchRecorder[*structs.ServiceSpecificRequest]{wr}, + IntentionUpstreamsDestination: typedWatchRecorder[*structs.ServiceSpecificRequest]{wr}, InternalServiceDump: typedWatchRecorder[*structs.ServiceDumpRequest]{wr}, LeafCertificate: typedWatchRecorder[*cachetype.ConnectCALeafRequest]{wr}, PeeredUpstreams: typedWatchRecorder[*structs.PartitionSpecificRequest]{wr}, @@ -1738,11 +1740,12 @@ func TestState_WatchesAndUpdates(t *testing.T) { stages: []verificationStage{ { requiredWatches: map[string]verifyWatchRequest{ - intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), - intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), - meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), - rootsWatchID: genVerifyDCSpecificWatch("dc1"), - leafWatchID: genVerifyLeafWatch("api", "dc1"), + intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), + intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + intentionUpstreamsDestinationID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), + rootsWatchID: genVerifyDCSpecificWatch("dc1"), + leafWatchID: genVerifyLeafWatch("api", "dc1"), }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.False(t, snap.Valid(), "proxy without roots/leaf/intentions is not valid") @@ -1823,11 +1826,12 @@ func TestState_WatchesAndUpdates(t *testing.T) { // Empty on initialization { requiredWatches: map[string]verifyWatchRequest{ - intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), - intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), - meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), - rootsWatchID: genVerifyDCSpecificWatch("dc1"), - leafWatchID: genVerifyLeafWatch("api", "dc1"), + intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), + intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + intentionUpstreamsDestinationID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), + rootsWatchID: genVerifyDCSpecificWatch("dc1"), + leafWatchID: genVerifyLeafWatch("api", "dc1"), }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.False(t, snap.Valid(), "proxy without roots/leaf/intentions is not valid") @@ -1882,10 +1886,11 @@ func TestState_WatchesAndUpdates(t *testing.T) { // Receiving an intention should lead to spinning up a discovery chain watch { requiredWatches: map[string]verifyWatchRequest{ - intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), - intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), - rootsWatchID: genVerifyDCSpecificWatch("dc1"), - leafWatchID: genVerifyLeafWatch("api", "dc1"), + intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), + intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + intentionUpstreamsDestinationID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + rootsWatchID: genVerifyDCSpecificWatch("dc1"), + leafWatchID: genVerifyLeafWatch("api", "dc1"), }, events: []UpdateEvent{ { @@ -2313,10 +2318,11 @@ func TestState_WatchesAndUpdates(t *testing.T) { { // Empty list of upstreams should clean up map keys requiredWatches: map[string]verifyWatchRequest{ - intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), - intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), - rootsWatchID: genVerifyDCSpecificWatch("dc1"), - leafWatchID: genVerifyLeafWatch("api", "dc1"), + intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), + intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + intentionUpstreamsDestinationID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + rootsWatchID: genVerifyDCSpecificWatch("dc1"), + leafWatchID: genVerifyLeafWatch("api", "dc1"), }, events: []UpdateEvent{ { @@ -2344,6 +2350,169 @@ func TestState_WatchesAndUpdates(t *testing.T) { }, }, }, + "transparent-proxy-handle-update-destination": { + ns: structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: "api-proxy", + Service: "api-proxy", + Address: "10.0.1.1", + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: "api", + Mode: structs.ProxyModeTransparent, + Upstreams: structs.Upstreams{ + { + CentrallyConfigured: true, + DestinationName: structs.WildcardSpecifier, + DestinationNamespace: structs.WildcardSpecifier, + Config: map[string]interface{}{ + "connect_timeout_ms": 6000, + }, + MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeRemote}, + }, + }, + }, + }, + sourceDC: "dc1", + stages: []verificationStage{ + // Empty on initialization + { + requiredWatches: map[string]verifyWatchRequest{ + intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), + intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + intentionUpstreamsDestinationID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), + rootsWatchID: genVerifyDCSpecificWatch("dc1"), + leafWatchID: genVerifyLeafWatch("api", "dc1"), + }, + verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { + require.False(t, snap.Valid(), "proxy without roots/leaf/intentions is not valid") + require.True(t, snap.MeshGateway.isEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) + require.True(t, snap.TerminatingGateway.isEmpty()) + + // Centrally configured upstream defaults should be stored so that upstreams from intentions can inherit them + require.Len(t, snap.ConnectProxy.UpstreamConfig, 1) + + wc := structs.NewServiceName(structs.WildcardSpecifier, structs.WildcardEnterpriseMetaInDefaultPartition()) + wcUID := NewUpstreamIDFromServiceName(wc) + require.Contains(t, snap.ConnectProxy.UpstreamConfig, wcUID) + }, + }, + // Valid snapshot after roots, leaf, and intentions + { + events: []UpdateEvent{ + rootWatchEvent(), + { + CorrelationID: leafWatchID, + Result: issuedCert, + Err: nil, + }, + { + CorrelationID: intentionsWatchID, + Result: TestIntentions(), + Err: nil, + }, + { + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TransparentProxy: structs.TransparentProxyMeshConfig{}, + }, + }, + Err: nil, + }, + }, + verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { + require.True(t, snap.Valid(), "proxy with roots/leaf/intentions is valid") + require.Equal(t, indexedRoots, snap.Roots) + require.Equal(t, issuedCert, snap.Leaf()) + require.Equal(t, TestIntentions(), snap.ConnectProxy.Intentions) + require.True(t, snap.MeshGateway.isEmpty()) + require.True(t, snap.IngressGateway.isEmpty()) + require.True(t, snap.TerminatingGateway.isEmpty()) + require.True(t, snap.ConnectProxy.MeshConfigSet) + require.NotNil(t, snap.ConnectProxy.MeshConfig) + }, + }, + // Receiving an intention should lead to spinning up a DestinationConfigEntryID + { + requiredWatches: map[string]verifyWatchRequest{ + intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), + intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + intentionUpstreamsDestinationID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + rootsWatchID: genVerifyDCSpecificWatch("dc1"), + leafWatchID: genVerifyLeafWatch("api", "dc1"), + }, + events: []UpdateEvent{ + { + CorrelationID: intentionUpstreamsDestinationID, + Result: &structs.IndexedServiceList{ + Services: structs.ServiceList{ + db, + }, + }, + Err: nil, + }, + }, + verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { + require.True(t, snap.Valid(), "should still be valid") + + // Watches have a key allocated even if the value is not set + require.Equal(t, 1, snap.ConnectProxy.DestinationsUpstream.Len()) + }, + }, + // DestinationConfigEntryID updates should be stored + { + requiredWatches: map[string]verifyWatchRequest{ + DestinationConfigEntryID + dbUID.String(): genVerifyConfigEntryWatch(structs.ServiceDefaults, db.Name, "dc1"), + }, + events: []UpdateEvent{ + { + CorrelationID: DestinationConfigEntryID + dbUID.String(), + Result: &structs.ConfigEntryResponse{ + Entry: &structs.ServiceConfigEntry{Name: "db", Destination: &structs.DestinationConfig{}}, + }, + Err: nil, + }, + { + CorrelationID: DestinationGatewayID + dbUID.String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Node: "foo", + Partition: api.PartitionOrDefault(), + Datacenter: "dc1", + }, + Service: &structs.NodeService{ + Service: "gtwy1", + TaggedAddresses: map[string]structs.ServiceAddress{ + structs.ServiceGatewayVirtualIPTag(structs.ServiceName{Name: "db", EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition()}): {Address: "172.0.0.1", Port: 443}, + }, + }, + Checks: structs.HealthChecks{}, + }, + }, + }, + Err: nil, + }, + }, + verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { + require.True(t, snap.Valid(), "should still be valid") + require.Equal(t, 1, snap.ConnectProxy.DestinationsUpstream.Len()) + require.Equal(t, 1, snap.ConnectProxy.DestinationGateways.Len()) + snap.ConnectProxy.DestinationsUpstream.ForEachKey(func(uid UpstreamID) bool { + _, ok := snap.ConnectProxy.DestinationsUpstream.Get(uid) + require.True(t, ok) + return true + }) + dbDest, ok := snap.ConnectProxy.DestinationsUpstream.Get(dbUID) + require.True(t, ok) + require.Equal(t, structs.ServiceConfigEntry{Name: "db", Destination: &structs.DestinationConfig{}}, *dbDest) + }, + }, + }, + }, // Receiving an empty upstreams from Intentions list shouldn't delete explicit upstream watches "transparent-proxy-handle-update-explicit-cross-dc": { ns: structs.NodeService{ @@ -2379,9 +2548,10 @@ func TestState_WatchesAndUpdates(t *testing.T) { // Empty on initialization { requiredWatches: map[string]verifyWatchRequest{ - intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), - intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), - meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), + intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), + intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + intentionUpstreamsDestinationID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + meshConfigEntryID: genVerifyMeshConfigWatch("dc1"), "discovery-chain:" + upstreamIDForDC2(dbUID).String(): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{ Name: "db", EvaluateInDatacenter: "dc2", @@ -2479,8 +2649,9 @@ func TestState_WatchesAndUpdates(t *testing.T) { // be deleted from the snapshot. { requiredWatches: map[string]verifyWatchRequest{ - intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), - intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + intentionsWatchID: genVerifyIntentionWatch("api", "dc1"), + intentionUpstreamsID: genVerifyServiceSpecificRequest("api", "", "dc1", false), + intentionUpstreamsDestinationID: genVerifyServiceSpecificRequest("api", "", "dc1", false), "discovery-chain:" + upstreamIDForDC2(dbUID).String(): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{ Name: "db", EvaluateInDatacenter: "dc2", diff --git a/agent/proxycfg/testing.go b/agent/proxycfg/testing.go index 744c17e18..dfde519d1 100644 --- a/agent/proxycfg/testing.go +++ b/agent/proxycfg/testing.go @@ -739,10 +739,12 @@ func testConfigSnapshotFixture( Datacenters: &noopDataSource[*structs.DatacentersRequest]{}, FederationStateListMeshGateways: &noopDataSource[*structs.DCSpecificRequest]{}, GatewayServices: &noopDataSource[*structs.ServiceSpecificRequest]{}, + ServiceGateways: &noopDataSource[*structs.ServiceSpecificRequest]{}, Health: &noopDataSource[*structs.ServiceSpecificRequest]{}, HTTPChecks: &noopDataSource[*cachetype.ServiceHTTPChecksRequest]{}, Intentions: &noopDataSource[*structs.ServiceSpecificRequest]{}, IntentionUpstreams: &noopDataSource[*structs.ServiceSpecificRequest]{}, + IntentionUpstreamsDestination: &noopDataSource[*structs.ServiceSpecificRequest]{}, InternalServiceDump: &noopDataSource[*structs.ServiceDumpRequest]{}, LeafCertificate: &noopDataSource[*cachetype.ConnectCALeafRequest]{}, PeeredUpstreams: &noopDataSource[*structs.PartitionSpecificRequest]{}, @@ -946,6 +948,7 @@ func NewTestDataSources() *TestDataSources { HTTPChecks: NewTestDataSource[*cachetype.ServiceHTTPChecksRequest, []structs.CheckType](), Intentions: NewTestDataSource[*structs.ServiceSpecificRequest, structs.Intentions](), IntentionUpstreams: NewTestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList](), + IntentionUpstreamsDestination: NewTestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList](), InternalServiceDump: NewTestDataSource[*structs.ServiceDumpRequest, *structs.IndexedNodesWithGateways](), LeafCertificate: NewTestDataSource[*cachetype.ConnectCALeafRequest, *structs.IssuedCert](), PreparedQuery: NewTestDataSource[*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExecuteResponse](), @@ -966,10 +969,12 @@ type TestDataSources struct { FederationStateListMeshGateways *TestDataSource[*structs.DCSpecificRequest, *structs.DatacenterIndexedCheckServiceNodes] Datacenters *TestDataSource[*structs.DatacentersRequest, *[]string] GatewayServices *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedGatewayServices] + ServiceGateways *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceNodes] Health *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedCheckServiceNodes] HTTPChecks *TestDataSource[*cachetype.ServiceHTTPChecksRequest, []structs.CheckType] Intentions *TestDataSource[*structs.ServiceSpecificRequest, structs.Intentions] IntentionUpstreams *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList] + IntentionUpstreamsDestination *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList] InternalServiceDump *TestDataSource[*structs.ServiceDumpRequest, *structs.IndexedNodesWithGateways] LeafCertificate *TestDataSource[*cachetype.ConnectCALeafRequest, *structs.IssuedCert] PeeredUpstreams *TestDataSource[*structs.PartitionSpecificRequest, *structs.IndexedPeeredServiceList] @@ -984,24 +989,26 @@ type TestDataSources struct { func (t *TestDataSources) ToDataSources() DataSources { ds := DataSources{ - CARoots: t.CARoots, - CompiledDiscoveryChain: t.CompiledDiscoveryChain, - ConfigEntry: t.ConfigEntry, - ConfigEntryList: t.ConfigEntryList, - Datacenters: t.Datacenters, - GatewayServices: t.GatewayServices, - Health: t.Health, - HTTPChecks: t.HTTPChecks, - Intentions: t.Intentions, - IntentionUpstreams: t.IntentionUpstreams, - InternalServiceDump: t.InternalServiceDump, - LeafCertificate: t.LeafCertificate, - PeeredUpstreams: t.PeeredUpstreams, - PreparedQuery: t.PreparedQuery, - ResolvedServiceConfig: t.ResolvedServiceConfig, - ServiceList: t.ServiceList, - TrustBundle: t.TrustBundle, - TrustBundleList: t.TrustBundleList, + CARoots: t.CARoots, + CompiledDiscoveryChain: t.CompiledDiscoveryChain, + ConfigEntry: t.ConfigEntry, + ConfigEntryList: t.ConfigEntryList, + Datacenters: t.Datacenters, + GatewayServices: t.GatewayServices, + ServiceGateways: t.ServiceGateways, + Health: t.Health, + HTTPChecks: t.HTTPChecks, + Intentions: t.Intentions, + IntentionUpstreams: t.IntentionUpstreams, + IntentionUpstreamsDestination: t.IntentionUpstreamsDestination, + InternalServiceDump: t.InternalServiceDump, + LeafCertificate: t.LeafCertificate, + PeeredUpstreams: t.PeeredUpstreams, + PreparedQuery: t.PreparedQuery, + ResolvedServiceConfig: t.ResolvedServiceConfig, + ServiceList: t.ServiceList, + TrustBundle: t.TrustBundle, + TrustBundleList: t.TrustBundleList, } t.fillEnterpriseDataSources(&ds) return ds diff --git a/agent/proxycfg/testing_terminating_gateway.go b/agent/proxycfg/testing_terminating_gateway.go index 00771433b..64a624e70 100644 --- a/agent/proxycfg/testing_terminating_gateway.go +++ b/agent/proxycfg/testing_terminating_gateway.go @@ -328,8 +328,10 @@ func TestConfigSnapshotTerminatingGatewayDestinations(t testing.T, populateDesti roots, _ := TestCerts(t) var ( - externalIPTCP = structs.NewServiceName("external-IP-TCP", nil) - externalHostnameTCP = structs.NewServiceName("external-hostname-TCP", nil) + externalIPTCP = structs.NewServiceName("external-IP-TCP", nil) + externalHostnameTCP = structs.NewServiceName("external-hostname-TCP", nil) + externalIPHTTP = structs.NewServiceName("external-IP-HTTP", nil) + externalHostnameHTTP = structs.NewServiceName("external-hostname-HTTP", nil) ) baseEvents := []UpdateEvent{ @@ -357,6 +359,14 @@ func TestConfigSnapshotTerminatingGatewayDestinations(t testing.T, populateDesti Service: externalHostnameTCP, ServiceKind: structs.GatewayServiceKindDestination, }, + &structs.GatewayService{ + Service: externalIPHTTP, + ServiceKind: structs.GatewayServiceKindDestination, + }, + &structs.GatewayService{ + Service: externalHostnameHTTP, + ServiceKind: structs.GatewayServiceKindDestination, + }, ) baseEvents = testSpliceEvents(baseEvents, []UpdateEvent{ @@ -375,6 +385,14 @@ func TestConfigSnapshotTerminatingGatewayDestinations(t testing.T, populateDesti CorrelationID: serviceIntentionsIDPrefix + externalHostnameTCP.String(), Result: structs.Intentions{}, }, + { + CorrelationID: serviceIntentionsIDPrefix + externalIPHTTP.String(), + Result: structs.Intentions{}, + }, + { + CorrelationID: serviceIntentionsIDPrefix + externalHostnameHTTP.String(), + Result: structs.Intentions{}, + }, // ======== { CorrelationID: serviceLeafIDPrefix + externalIPTCP.String(), @@ -390,6 +408,20 @@ func TestConfigSnapshotTerminatingGatewayDestinations(t testing.T, populateDesti PrivateKeyPEM: "placeholder.key", }, }, + { + CorrelationID: serviceLeafIDPrefix + externalIPHTTP.String(), + Result: &structs.IssuedCert{ + CertPEM: "placeholder.crt", + PrivateKeyPEM: "placeholder.key", + }, + }, + { + CorrelationID: serviceLeafIDPrefix + externalHostnameHTTP.String(), + Result: &structs.IssuedCert{ + CertPEM: "placeholder.crt", + PrivateKeyPEM: "placeholder.key", + }, + }, // ======== { CorrelationID: serviceConfigIDPrefix + externalIPTCP.String(), @@ -408,11 +440,33 @@ func TestConfigSnapshotTerminatingGatewayDestinations(t testing.T, populateDesti Mode: structs.ProxyModeTransparent, ProxyConfig: map[string]interface{}{"protocol": "tcp"}, Destination: structs.DestinationConfig{ - Address: "*.hashicorp.com", + Address: "api.hashicorp.com", Port: 8089, }, }, }, + { + CorrelationID: serviceConfigIDPrefix + externalIPHTTP.String(), + Result: &structs.ServiceConfigResponse{ + Mode: structs.ProxyModeTransparent, + ProxyConfig: map[string]interface{}{"protocol": "http"}, + Destination: structs.DestinationConfig{ + Address: "192.168.0.2", + Port: 80, + }, + }, + }, + { + CorrelationID: serviceConfigIDPrefix + externalHostnameHTTP.String(), + Result: &structs.ServiceConfigResponse{ + Mode: structs.ProxyModeTransparent, + ProxyConfig: map[string]interface{}{"protocol": "http"}, + Destination: structs.DestinationConfig{ + Address: "httpbin.org", + Port: 80, + }, + }, + }, }) } diff --git a/agent/proxycfg/testing_tproxy.go b/agent/proxycfg/testing_tproxy.go index b93e6c970..ab55f3313 100644 --- a/agent/proxycfg/testing_tproxy.go +++ b/agent/proxycfg/testing_tproxy.go @@ -1,6 +1,7 @@ package proxycfg import ( + "github.com/hashicorp/consul/api" "time" "github.com/mitchellh/go-testing-interface" @@ -522,3 +523,117 @@ func TestConfigSnapshotTransparentProxyTerminatingGatewayCatalogDestinationsOnly }, }) } + +func TestConfigSnapshotTransparentProxyDestination(t testing.T) *ConfigSnapshot { + // DiscoveryChain without an UpstreamConfig should yield a + // filter chain when in transparent proxy mode + var ( + google = structs.NewServiceName("google", nil) + googleUID = NewUpstreamIDFromServiceName(google) + googleCE = structs.ServiceConfigEntry{Name: "google", Destination: &structs.DestinationConfig{Address: "www.google.com", Port: 443}} + + kafka = structs.NewServiceName("kafka", nil) + kafkaUID = NewUpstreamIDFromServiceName(kafka) + kafkaCE = structs.ServiceConfigEntry{Name: "kafka", Destination: &structs.DestinationConfig{Address: "192.168.2.1", Port: 9093}} + ) + + return TestConfigSnapshot(t, func(ns *structs.NodeService) { + ns.Proxy.Mode = structs.ProxyModeTransparent + }, []UpdateEvent{ + { + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{ + Entry: &structs.MeshConfigEntry{ + TransparentProxy: structs.TransparentProxyMeshConfig{ + MeshDestinationsOnly: true, + }, + }, + }, + }, + { + CorrelationID: intentionUpstreamsDestinationID, + Result: &structs.IndexedServiceList{ + Services: structs.ServiceList{ + google, + kafka, + }, + }, + }, + { + CorrelationID: DestinationConfigEntryID + googleUID.String(), + Result: &structs.ConfigEntryResponse{ + Entry: &googleCE, + }, + }, + { + CorrelationID: DestinationConfigEntryID + kafkaUID.String(), + Result: &structs.ConfigEntryResponse{ + Entry: &kafkaCE, + }, + }, + { + CorrelationID: DestinationGatewayID + googleUID.String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Node: "node1", + Address: "172.168.0.1", + Datacenter: "dc1", + }, + Service: &structs.NodeService{ + ID: "tgtw1", + Address: "172.168.0.1", + Port: 8443, + Kind: structs.ServiceKindTerminatingGateway, + TaggedAddresses: map[string]structs.ServiceAddress{ + structs.TaggedAddressLANIPv4: {Address: "172.168.0.1", Port: 8443}, + structs.TaggedAddressVirtualIP: {Address: "240.0.0.1"}, + }, + }, + Checks: []*structs.HealthCheck{ + { + Node: "node1", + ServiceName: "tgtw", + Name: "force", + Status: api.HealthPassing, + }, + }, + }, + }, + }, + }, + { + CorrelationID: DestinationGatewayID + kafkaUID.String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Node: "node1", + Address: "172.168.0.1", + Datacenter: "dc1", + }, + Service: &structs.NodeService{ + ID: "tgtw1", + Address: "172.168.0.1", + Port: 8443, + Kind: structs.ServiceKindTerminatingGateway, + TaggedAddresses: map[string]structs.ServiceAddress{ + structs.TaggedAddressLANIPv4: {Address: "172.168.0.1", Port: 8443}, + structs.TaggedAddressVirtualIP: {Address: "240.0.0.1"}, + }, + }, + Checks: []*structs.HealthCheck{ + { + Node: "node1", + ServiceName: "tgtw", + Name: "force", + Status: api.HealthPassing, + }, + }, + }, + }, + }, + }, + }) +} diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index b4f4eea39..562e7e692 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -9,8 +9,6 @@ import ( envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - envoy_cluster_dynamic_forward_proxy_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/dynamic_forward_proxy/v3" - envoy_common_dynamic_forward_proxy_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/dynamic_forward_proxy/v3" envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" envoy_upstreams_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3" envoy_matcher_v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" @@ -29,12 +27,6 @@ import ( "github.com/hashicorp/consul/agent/structs" ) -const ( - dynamicForwardProxyClusterName = "dynamic_forward_proxy_cluster" - dynamicForwardProxyClusterTypeName = "envoy.clusters.dynamic_forward_proxy" - dynamicForwardProxyClusterDNSCacheName = "dynamic_forward_proxy_cache_config" -) - const ( meshGatewayExportedClusterNamePrefix = "exported~" ) @@ -247,28 +239,7 @@ func makePassthroughClusters(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, c.ConnectTimeout = durationpb.New(discoTarget.ConnectTimeout) } - spiffeID := connect.SpiffeIDService{ - Host: cfgSnap.Roots.TrustDomain, - Partition: uid.PartitionOrDefault(), - Namespace: uid.NamespaceOrDefault(), - Datacenter: cfgSnap.Datacenter, - Service: uid.Name, - } - - commonTLSContext := makeCommonTLSContext( - cfgSnap.Leaf(), - cfgSnap.RootPEMs(), - makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSOutgoing()), - ) - err := injectSANMatcher(commonTLSContext, spiffeID.URI().String()) - if err != nil { - return nil, fmt.Errorf("failed to inject SAN matcher rules for cluster %q: %v", sni, err) - } - tlsContext := envoy_tls_v3.UpstreamTlsContext{ - CommonTlsContext: commonTLSContext, - Sni: sni, - } - transportSocket, err := makeUpstreamTLSTransportSocket(&tlsContext) + transportSocket, err := makeMTLSTransportSocket(cfgSnap, uid, sni) if err != nil { return nil, err } @@ -277,9 +248,84 @@ func makePassthroughClusters(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, } } + err := cfgSnap.ConnectProxy.DestinationsUpstream.ForEachKeyE(func(uid proxycfg.UpstreamID) error { + name := clusterNameForDestination(cfgSnap, uid.Name, uid.NamespaceOrDefault(), uid.PartitionOrDefault()) + + c := envoy_cluster_v3.Cluster{ + Name: name, + AltStatName: name, + ConnectTimeout: durationpb.New(5 * time.Second), + CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{ + HealthyPanicThreshold: &envoy_type_v3.Percent{ + Value: 0, // disable panic threshold + }, + }, + ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_EDS}, + EdsClusterConfig: &envoy_cluster_v3.Cluster_EdsClusterConfig{ + EdsConfig: &envoy_core_v3.ConfigSource{ + ResourceApiVersion: envoy_core_v3.ApiVersion_V3, + ConfigSourceSpecifier: &envoy_core_v3.ConfigSource_Ads{ + Ads: &envoy_core_v3.AggregatedConfigSource{}, + }, + }, + }, + // Endpoints are managed separately by EDS + // Having an empty config enables outlier detection with default config. + OutlierDetection: &envoy_cluster_v3.OutlierDetection{}, + } + + // Use the cluster name as the SNI to match on in the terminating gateway + transportSocket, err := makeMTLSTransportSocket(cfgSnap, uid, name) + if err != nil { + return err + } + c.TransportSocket = transportSocket + clusters = append(clusters, &c) + return nil + }) + if err != nil { + return nil, err + } + return clusters, nil } +func makeMTLSTransportSocket(cfgSnap *proxycfg.ConfigSnapshot, uid proxycfg.UpstreamID, sni string) (*envoy_core_v3.TransportSocket, error) { + spiffeID := connect.SpiffeIDService{ + Host: cfgSnap.Roots.TrustDomain, + Partition: uid.PartitionOrDefault(), + Namespace: uid.NamespaceOrDefault(), + Datacenter: cfgSnap.Datacenter, + Service: uid.Name, + } + + commonTLSContext := makeCommonTLSContext( + cfgSnap.Leaf(), + cfgSnap.RootPEMs(), + makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSOutgoing()), + ) + err := injectSANMatcher(commonTLSContext, spiffeID.URI().String()) + if err != nil { + return nil, fmt.Errorf("failed to inject SAN matcher rules for cluster %q: %v", sni, err) + } + tlsContext := envoy_tls_v3.UpstreamTlsContext{ + CommonTlsContext: commonTLSContext, + Sni: sni, + } + transportSocket, err := makeUpstreamTLSTransportSocket(&tlsContext) + if err != nil { + return nil, err + } + return transportSocket, nil +} + +func clusterNameForDestination(cfgSnap *proxycfg.ConfigSnapshot, name string, namespace string, partition string) string { + sni := connect.ServiceSNI(name, "", namespace, partition, cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain) + + // Prefixed with destination to distinguish from non-passthrough clusters for the same upstream. + return "destination~" + sni +} + // clustersFromSnapshotMeshGateway returns the xDS API representation of the "clusters" // for a mesh gateway. This will include 1 cluster per remote datacenter as well as // 1 cluster for each service subset. @@ -475,7 +521,6 @@ func (s *ResourceGenerator) makeGatewayServiceClusters( } func (s *ResourceGenerator) makeDestinationClusters(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) { - var createDynamicForwardProxy bool serviceConfigs := cfgSnap.TerminatingGateway.ServiceConfigs clusters := make([]proto.Message, 0, len(cfgSnap.TerminatingGateway.DestinationServices)) @@ -484,31 +529,17 @@ func (s *ResourceGenerator) makeDestinationClusters(cfgSnap *proxycfg.ConfigSnap svcConfig, _ := serviceConfigs[svcName] dest := svcConfig.Destination - // If IP, create a cluster with the fake name. - if dest.HasIP() { - opts := clusterOpts{ - name: connect.ServiceSNI(svcName.Name, "", svcName.NamespaceOrDefault(), svcName.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain), - addressEndpoint: dest, - } - cluster := s.makeTerminatingIPCluster(cfgSnap, opts) - clusters = append(clusters, cluster) - continue - } - - // TODO (dans): clusters will need to be customized later when we figure out how to manage a TLS segment from the terminating gateway to the Destination. - createDynamicForwardProxy = true - } - - if createDynamicForwardProxy { opts := clusterOpts{ - name: dynamicForwardProxyClusterName, + name: clusterNameForDestination(cfgSnap, svcName.Name, svcName.NamespaceOrDefault(), svcName.PartitionOrDefault()), + addressEndpoint: dest, } - cluster := s.makeDynamicForwardProxyCluster(cfgSnap, opts) - // TODO (dans): might be relevant later for TLS addons like CA validation - // if err := s.injectGatewayServiceAddons(cfgSnap, cluster, svc, loadBalancer); err != nil { - // return nil, err - // } + var cluster *envoy_cluster_v3.Cluster + if dest.HasIP() { + cluster = s.makeTerminatingIPCluster(cfgSnap, opts) + } else { + cluster = s.makeTerminatingHostnameCluster(cfgSnap, opts) + } clusters = append(clusters, cluster) } return clusters, nil @@ -1360,7 +1391,7 @@ func configureClusterWithHostnames( } } -// makeGatewayCluster creates an Envoy cluster for a mesh or terminating gateway +// makeTerminatingIPCluster creates an Envoy cluster for a terminating gateway with an ip destination func (s *ResourceGenerator) makeTerminatingIPCluster(snap *proxycfg.ConfigSnapshot, opts clusterOpts) *envoy_cluster_v3.Cluster { cfg, err := ParseGatewayConfig(snap.Proxy.Config) if err != nil { @@ -1377,12 +1408,10 @@ func (s *ResourceGenerator) makeTerminatingIPCluster(snap *proxycfg.ConfigSnapsh ConnectTimeout: durationpb.New(opts.connectTimeout), // Having an empty config enables outlier detection with default config. - OutlierDetection: &envoy_cluster_v3.OutlierDetection{}, + OutlierDetection: &envoy_cluster_v3.OutlierDetection{}, + ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STATIC}, } - discoveryType := envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STATIC} - cluster.ClusterDiscoveryType = &discoveryType - endpoints := []*envoy_endpoint_v3.LbEndpoint{ makeEndpoint(opts.addressEndpoint.Address, opts.addressEndpoint.Port), } @@ -1398,47 +1427,49 @@ func (s *ResourceGenerator) makeTerminatingIPCluster(snap *proxycfg.ConfigSnapsh return cluster } -// makeDynamicForwardProxyCluster creates an Envoy cluster for that routes based on the SNI header received at the listener -func (s *ResourceGenerator) makeDynamicForwardProxyCluster(snap *proxycfg.ConfigSnapshot, opts clusterOpts) *envoy_cluster_v3.Cluster { +// makeTerminatingHostnameCluster creates an Envoy cluster for a terminating gateway with a hostname destination +func (s *ResourceGenerator) makeTerminatingHostnameCluster(snap *proxycfg.ConfigSnapshot, opts clusterOpts) *envoy_cluster_v3.Cluster { cfg, err := ParseGatewayConfig(snap.Proxy.Config) if err != nil { // Don't hard fail on a config typo, just warn. The parse func returns // default config if there is an error so it's safe to continue. s.Logger.Warn("failed to parse gateway config", "error", err) } - if opts.connectTimeout <= 0 { - opts.connectTimeout = time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond - } + opts.connectTimeout = time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond cluster := &envoy_cluster_v3.Cluster{ Name: opts.name, ConnectTimeout: durationpb.New(opts.connectTimeout), + + // Having an empty config enables outlier detection with default config. + OutlierDetection: &envoy_cluster_v3.OutlierDetection{}, + ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_LOGICAL_DNS}, + DnsLookupFamily: envoy_cluster_v3.Cluster_AUTO, } - dynamicForwardProxyCluster, err := anypb.New(&envoy_cluster_dynamic_forward_proxy_v3.ClusterConfig{ - DnsCacheConfig: getCommonDNSCacheConfiguration(), - }) - if err != nil { - // we should never get here since this message is static - s.Logger.Error("failed serialize dynamic forward proxy cluster config", "error", err) - } + rate := 10 * time.Second + cluster.DnsRefreshRate = durationpb.New(rate) - cluster.LbPolicy = envoy_cluster_v3.Cluster_CLUSTER_PROVIDED - cluster.ClusterDiscoveryType = &envoy_cluster_v3.Cluster_ClusterType{ - ClusterType: &envoy_cluster_v3.Cluster_CustomClusterType{ - Name: dynamicForwardProxyClusterTypeName, - TypedConfig: dynamicForwardProxyCluster, + address := makeAddress(opts.addressEndpoint.Address, opts.addressEndpoint.Port) + + endpoints := []*envoy_endpoint_v3.LbEndpoint{ + { + HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{ + Endpoint: &envoy_endpoint_v3.Endpoint{ + Address: address, + }, + }, }, } - return cluster -} - -func getCommonDNSCacheConfiguration() *envoy_common_dynamic_forward_proxy_v3.DnsCacheConfig { - return &envoy_common_dynamic_forward_proxy_v3.DnsCacheConfig{ - Name: dynamicForwardProxyClusterDNSCacheName, - DnsLookupFamily: envoy_cluster_v3.Cluster_AUTO, + cluster.LoadAssignment = &envoy_endpoint_v3.ClusterLoadAssignment{ + ClusterName: cluster.Name, + Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{{ + LbEndpoints: endpoints, + }}, } + + return cluster } func makeThresholdsIfNeeded(limits *structs.UpstreamLimits) []*envoy_cluster_v3.CircuitBreakers_Thresholds { diff --git a/agent/xds/clusters_test.go b/agent/xds/clusters_test.go index 49d333750..96e7615c7 100644 --- a/agent/xds/clusters_test.go +++ b/agent/xds/clusters_test.go @@ -621,12 +621,6 @@ func TestClustersFromSnapshot(t *testing.T) { name: "transparent-proxy-dial-instances-directly", create: proxycfg.TestConfigSnapshotTransparentProxyDialDirectly, }, - { - name: "transparent-proxy-terminating-gateway-destinations-only", - create: func(t testinf.T) *proxycfg.ConfigSnapshot { - return proxycfg.TestConfigSnapshotTerminatingGatewayDestinations(t, true, nil) - }, - }, } latestEnvoyVersion := proxysupport.EnvoyVersions[0] diff --git a/agent/xds/endpoints.go b/agent/xds/endpoints.go index 2538914dd..edfe1c616 100644 --- a/agent/xds/endpoints.go +++ b/agent/xds/endpoints.go @@ -3,7 +3,6 @@ package xds import ( "errors" "fmt" - envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" @@ -149,6 +148,24 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg. } } + // Loop over potential destinations in the mesh, then grab the gateway nodes associated with each + cfgSnap.ConnectProxy.DestinationsUpstream.ForEachKey(func(uid proxycfg.UpstreamID) bool { + name := clusterNameForDestination(cfgSnap, uid.Name, uid.NamespaceOrDefault(), uid.PartitionOrDefault()) + + endpoints, ok := cfgSnap.ConnectProxy.DestinationGateways.Get(uid) + if ok { + la := makeLoadAssignment( + name, + []loadAssignmentEndpointGroup{ + {Endpoints: endpoints}, + }, + proxycfg.GatewayKey{ /*empty so it never matches*/ }, + ) + resources = append(resources, la) + } + return true + }) + return resources, nil } diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 2f3650aa7..0ef16899f 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -22,7 +22,6 @@ import ( envoy_connection_limit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/connection_limit/v3" envoy_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" envoy_sni_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/sni_cluster/v3" - envoy_sni_dynamic_forward_proxy_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3" envoy_tcp_proxy_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3" envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" @@ -97,6 +96,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. outboundListener = makePortListener(OutboundListenerName, "127.0.0.1", port, envoy_core_v3.TrafficDirection_OUTBOUND) outboundListener.FilterChains = make([]*envoy_listener_v3.FilterChain, 0) + outboundListener.ListenerFilters = []*envoy_listener_v3.ListenerFilter{ // The original_dst filter is a listener filter that recovers the original destination // address before the iptables redirection. This filter is needed for transparent @@ -226,7 +226,44 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. outboundListener.FilterChains = append(outboundListener.FilterChains, filterChain) } } + hasDestination := false + err = cfgSnap.ConnectProxy.DestinationsUpstream.ForEachKeyE(func(uid proxycfg.UpstreamID) error { + destination, ok := cfgSnap.ConnectProxy.DestinationsUpstream.Get(uid) + + if ok && destination != nil { + upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid] + cfg := s.getAndModifyUpstreamConfigForListener(uid, upstreamCfg, nil) + + clusterName := clusterNameForDestination(cfgSnap, uid.Name, uid.NamespaceOrDefault(), uid.PartitionOrDefault()) + filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{ + routeName: uid.EnvoyID(), + clusterName: clusterName, + filterName: clusterName, + protocol: cfg.Protocol, + useRDS: cfg.Protocol != "tcp", + }) + if err != nil { + return err + } + filterChain.FilterChainMatch = makeFilterChainMatchFromAddressWithPort(destination.Destination.Address, destination.Destination.Port) + outboundListener.FilterChains = append(outboundListener.FilterChains, filterChain) + + hasDestination = len(filterChain.FilterChainMatch.ServerNames) != 0 || hasDestination + } + return nil + }) + if err != nil { + return nil, err + } + + if hasDestination { + tlsInspector, err := makeTLSInspectorListenerFilter() + if err != nil { + return nil, err + } + outboundListener.ListenerFilters = append(outboundListener.ListenerFilters, tlsInspector) + } // Looping over explicit upstreams is only needed for cross-peer because // they do not have discovery chains. for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() { @@ -325,8 +362,27 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. // Filter chains are stable sorted to avoid draining if the list is provided out of order sort.SliceStable(outboundListener.FilterChains, func(i, j int) bool { - return outboundListener.FilterChains[i].FilterChainMatch.PrefixRanges[0].AddressPrefix < - outboundListener.FilterChains[j].FilterChainMatch.PrefixRanges[0].AddressPrefix + si := "" + sj := "" + if len(outboundListener.FilterChains[i].FilterChainMatch.PrefixRanges) > 0 { + si += outboundListener.FilterChains[i].FilterChainMatch.PrefixRanges[0].AddressPrefix + + "/" + outboundListener.FilterChains[i].FilterChainMatch.PrefixRanges[0].PrefixLen.String() + + ":" + outboundListener.FilterChains[i].FilterChainMatch.DestinationPort.String() + } + if len(outboundListener.FilterChains[i].FilterChainMatch.ServerNames) > 0 { + si += outboundListener.FilterChains[i].FilterChainMatch.ServerNames[0] + } + + if len(outboundListener.FilterChains[j].FilterChainMatch.PrefixRanges) > 0 { + sj += outboundListener.FilterChains[j].FilterChainMatch.PrefixRanges[0].AddressPrefix + + "/" + outboundListener.FilterChains[j].FilterChainMatch.PrefixRanges[0].PrefixLen.String() + + ":" + outboundListener.FilterChains[j].FilterChainMatch.DestinationPort.String() + } + if len(outboundListener.FilterChains[j].FilterChainMatch.ServerNames) > 0 { + sj += outboundListener.FilterChains[j].FilterChainMatch.ServerNames[0] + } + + return si < sj }) // Add a catch-all filter chain that acts as a TCP proxy to destinations outside the mesh @@ -341,11 +397,11 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. if err != nil { return nil, err } - outboundListener.FilterChains = append(outboundListener.FilterChains, filterChain) + outboundListener.DefaultFilterChain = filterChain } // Only add the outbound listener if configured. - if len(outboundListener.FilterChains) > 0 { + if len(outboundListener.FilterChains) > 0 || outboundListener.DefaultFilterChain != nil { resources = append(resources, outboundListener) } } @@ -456,6 +512,32 @@ func makeFilterChainMatchFromAddrs(addrs map[string]struct{}) *envoy_listener_v3 } } +func makeFilterChainMatchFromAddressWithPort(address string, port int) *envoy_listener_v3.FilterChainMatch { + ranges := make([]*envoy_core_v3.CidrRange, 0) + + ip := net.ParseIP(address) + if ip == nil { + return &envoy_listener_v3.FilterChainMatch{ + ServerNames: []string{address}, + DestinationPort: &wrappers.UInt32Value{Value: uint32(port)}, + } + } + + pfxLen := uint32(32) + if ip.To4() == nil { + pfxLen = 128 + } + ranges = append(ranges, &envoy_core_v3.CidrRange{ + AddressPrefix: address, + PrefixLen: &wrappers.UInt32Value{Value: pfxLen}, + }) + + return &envoy_listener_v3.FilterChainMatch{ + PrefixRanges: ranges, + DestinationPort: &wrappers.UInt32Value{Value: uint32(port)}, + } +} + func parseCheckPath(check structs.CheckType) (structs.ExposePath, error) { var path structs.ExposePath @@ -1223,7 +1305,7 @@ func (s *ResourceGenerator) makeTerminatingGatewayListener( } for _, svc := range cfgSnap.TerminatingGateway.ValidDestinations() { - clusterName := connect.ServiceSNI(svc.Name, "", svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain) + clusterName := clusterNameForDestination(cfgSnap, svc.Name, svc.NamespaceOrDefault(), svc.PartitionOrDefault()) intentions := cfgSnap.TerminatingGateway.Intentions[svc] svcConfig := cfgSnap.TerminatingGateway.ServiceConfigs[svc] @@ -1240,11 +1322,7 @@ func (s *ResourceGenerator) makeTerminatingGatewayListener( } var dest *structs.DestinationConfig - if cfgSnap.TerminatingGateway.DestinationServices[svc].ServiceKind == structs.GatewayServiceKindDestination { - dest = &svcConfig.Destination - } else { - return nil, fmt.Errorf("invalid gateway service for destination %s", svc.Name) - } + dest = &svcConfig.Destination clusterChain, err := s.makeFilterChainTerminatingGateway(cfgSnap, clusterName, svc, intentions, cfg.Protocol, dest) if err != nil { return nil, fmt.Errorf("failed to make filter chain for cluster %q: %v", clusterName, err) @@ -1299,19 +1377,10 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg. return nil, err } - var filterChain *envoy_listener_v3.FilterChain - if dest != nil { - filterChain = &envoy_listener_v3.FilterChain{ - FilterChainMatch: makeDestinationFilterChainMatch(cluster, dest), - Filters: make([]*envoy_listener_v3.Filter, 0, 3), - TransportSocket: transportSocket, - } - } else { - filterChain = &envoy_listener_v3.FilterChain{ - FilterChainMatch: makeSNIFilterChainMatch(cluster), - Filters: make([]*envoy_listener_v3.Filter, 0, 3), - TransportSocket: transportSocket, - } + filterChain := &envoy_listener_v3.FilterChain{ + FilterChainMatch: makeSNIFilterChainMatch(cluster), + Filters: make([]*envoy_listener_v3.Filter, 0, 3), + TransportSocket: transportSocket, } // This controls if we do L4 or L7 intention checks. @@ -1335,28 +1404,16 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg. filterChain.Filters = append(filterChain.Filters, authFilter) } - // For Destinations of Hostname types, we use the dynamic forward proxy filter since this could be - // a wildcard match. We also send to the dynamic forward cluster - if dest != nil && dest.HasHostname() { - dynamicFilter, err := makeSNIDynamicForwardProxyFilter(dest.Port) - if err != nil { - return nil, err - } - filterChain.Filters = append(filterChain.Filters, dynamicFilter) - cluster = dynamicForwardProxyClusterName - } - // Lastly we setup the actual proxying component. For L4 this is a straight // tcp proxy. For L7 this is a very hands-off HTTP proxy just to inject an // HTTP filter to do intention checks here instead. opts := listenerFilterOpts{ - protocol: protocol, - filterName: fmt.Sprintf("%s.%s.%s.%s", service.Name, service.NamespaceOrDefault(), service.PartitionOrDefault(), cfgSnap.Datacenter), - routeName: cluster, // Set cluster name for route config since each will have its own - cluster: cluster, - statPrefix: "upstream.", - routePath: "", - useDynamicForwardProxy: dest != nil && dest.HasHostname(), + protocol: protocol, + filterName: fmt.Sprintf("%s.%s.%s.%s", service.Name, service.NamespaceOrDefault(), service.PartitionOrDefault(), cfgSnap.Datacenter), + routeName: cluster, // Set cluster name for route config since each will have its own + cluster: cluster, + statPrefix: "upstream.", + routePath: "", } if useHTTPFilter { @@ -1387,6 +1444,7 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg. filter, err := makeListenerFilter(opts) if err != nil { + s.Logger.Error("failed to make listener", "cluster", cluster, "error", err) return nil, err } filterChain.Filters = append(filterChain.Filters, filter) @@ -1394,23 +1452,6 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg. return filterChain, nil } -func makeDestinationFilterChainMatch(cluster string, dest *structs.DestinationConfig) *envoy_listener_v3.FilterChainMatch { - // For hostname and wildcard destinations, we match on the address. - - // For IP Destinations, use the alias SNI name to match - ip := net.ParseIP(dest.Address) - if ip != nil { - return &envoy_listener_v3.FilterChainMatch{ - ServerNames: []string{cluster}, - } - } - - // For hostname and wildcard destinations, we match on the address in the Destination - return &envoy_listener_v3.FilterChainMatch{ - ServerNames: []string{dest.Address}, - } -} - func (s *ResourceGenerator) makeMeshGatewayListener(name, addr string, port int, cfgSnap *proxycfg.ConfigSnapshot) (*envoy_listener_v3.Listener, error) { tlsInspector, err := makeTLSInspectorListenerFilter() if err != nil { @@ -1705,12 +1746,15 @@ func (s *ResourceGenerator) getAndModifyUpstreamConfigForListener( cfg.EnvoyListenerJSON = "" } } - protocol := cfg.Protocol - if protocol == "" { - protocol = chain.Protocol - } - if protocol == "" { + if chain != nil { + if protocol == "" { + protocol = chain.Protocol + } + if protocol == "" { + protocol = "tcp" + } + } else { protocol = "tcp" } @@ -1761,19 +1805,18 @@ func (s *ResourceGenerator) getAndModifyUpstreamConfigForPeeredListener( } type listenerFilterOpts struct { - useRDS bool - protocol string - filterName string - routeName string - cluster string - statPrefix string - routePath string - requestTimeoutMs *int - ingressGateway bool - httpAuthzFilter *envoy_http_v3.HttpFilter - forwardClientDetails bool - forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails - useDynamicForwardProxy bool + useRDS bool + protocol string + filterName string + routeName string + cluster string + statPrefix string + routePath string + requestTimeoutMs *int + ingressGateway bool + httpAuthzFilter *envoy_http_v3.HttpFilter + forwardClientDetails bool + forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails } func makeListenerFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) { @@ -1806,13 +1849,6 @@ func makeSNIClusterFilter() (*envoy_listener_v3.Filter, error) { return makeFilter("envoy.filters.network.sni_cluster", &envoy_sni_cluster_v3.SniCluster{}) } -func makeSNIDynamicForwardProxyFilter(upstreamPort int) (*envoy_listener_v3.Filter, error) { - return makeFilter("envoy.filters.network.sni_dynamic_forward_proxy", &envoy_sni_dynamic_forward_proxy_v3.FilterConfig{ - DnsCacheConfig: getCommonDNSCacheConfiguration(), - PortSpecifier: &envoy_sni_dynamic_forward_proxy_v3.FilterConfig_PortValue{PortValue: uint32(upstreamPort)}, - }) -} - func makeTCPProxyFilter(filterName, cluster, statPrefix string) (*envoy_listener_v3.Filter, error) { cfg := &envoy_tcp_proxy_v3.TcpProxy{ StatPrefix: makeStatPrefix(statPrefix, filterName), diff --git a/agent/xds/listeners_test.go b/agent/xds/listeners_test.go index ca6750fb5..3055b4436 100644 --- a/agent/xds/listeners_test.go +++ b/agent/xds/listeners_test.go @@ -776,12 +776,6 @@ func TestListenersFromSnapshot(t *testing.T) { name: "transparent-proxy-terminating-gateway", create: proxycfg.TestConfigSnapshotTransparentProxyTerminatingGatewayCatalogDestinationsOnly, }, - { - name: "transparent-proxy-terminating-gateway-destinations-only", - create: func(t testinf.T) *proxycfg.ConfigSnapshot { - return proxycfg.TestConfigSnapshotTerminatingGatewayDestinations(t, true, nil) - }, - }, } latestEnvoyVersion := proxysupport.EnvoyVersions[0] diff --git a/agent/xds/rbac.go b/agent/xds/rbac.go index 11b37fc32..cd1424ca9 100644 --- a/agent/xds/rbac.go +++ b/agent/xds/rbac.go @@ -24,10 +24,7 @@ func makeRBACNetworkFilter( localInfo rbacLocalInfo, peerTrustBundles []*pbpeering.PeeringTrustBundle, ) (*envoy_listener_v3.Filter, error) { - rules, err := makeRBACRules(intentions, intentionDefaultAllow, localInfo, false, peerTrustBundles) - if err != nil { - return nil, err - } + rules := makeRBACRules(intentions, intentionDefaultAllow, localInfo, false, peerTrustBundles) cfg := &envoy_network_rbac_v3.RBAC{ StatPrefix: "connect_authz", @@ -42,10 +39,7 @@ func makeRBACHTTPFilter( localInfo rbacLocalInfo, peerTrustBundles []*pbpeering.PeeringTrustBundle, ) (*envoy_http_v3.HttpFilter, error) { - rules, err := makeRBACRules(intentions, intentionDefaultAllow, localInfo, true, peerTrustBundles) - if err != nil { - return nil, err - } + rules := makeRBACRules(intentions, intentionDefaultAllow, localInfo, true, peerTrustBundles) cfg := &envoy_http_rbac_v3.RBAC{ Rules: rules, @@ -485,7 +479,7 @@ func makeRBACRules( localInfo rbacLocalInfo, isHTTP bool, peerTrustBundles []*pbpeering.PeeringTrustBundle, -) (*envoy_rbac_v3.RBAC, error) { +) *envoy_rbac_v3.RBAC { // TODO(banks,rb): Implement revocation list checking? // TODO(peering): mkeeler asked that these maps come from proxycfg instead of @@ -565,7 +559,7 @@ func makeRBACRules( if len(rbac.Policies) == 0 { rbac.Policies = nil } - return rbac, nil + return rbac } func optimizePrincipals(orig []*envoy_rbac_v3.Principal) []*envoy_rbac_v3.Principal { diff --git a/agent/xds/resources_test.go b/agent/xds/resources_test.go index 45070a8c1..983f1bb44 100644 --- a/agent/xds/resources_test.go +++ b/agent/xds/resources_test.go @@ -149,6 +149,7 @@ func TestAllResourcesFromSnapshot(t *testing.T) { create: proxycfg.TestConfigSnapshotPeering, }, } + tests = append(tests, getConnectProxyTransparentProxyGoldenTestCases()...) tests = append(tests, getMeshGatewayPeeringGoldenTestCases()...) tests = append(tests, getEnterpriseGoldenTestCases()...) @@ -166,6 +167,21 @@ func TestAllResourcesFromSnapshot(t *testing.T) { } } +func getConnectProxyTransparentProxyGoldenTestCases() []goldenTestCase { + return []goldenTestCase{ + { + name: "transparent-proxy-destination", + create: proxycfg.TestConfigSnapshotTransparentProxyDestination, + }, + { + name: "transparent-proxy-terminating-gateway-destinations-only", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotTerminatingGatewayDestinations(t, true, nil) + }, + }, + } +} + func getMeshGatewayPeeringGoldenTestCases() []goldenTestCase { return []goldenTestCase{ { diff --git a/agent/xds/routes.go b/agent/xds/routes.go index 6faa1fa67..dd0beacb2 100644 --- a/agent/xds/routes.go +++ b/agent/xds/routes.go @@ -86,47 +86,80 @@ func (s *ResourceGenerator) routesForTerminatingGateway(cfgSnap *proxycfg.Config var resources []proto.Message for _, svc := range cfgSnap.TerminatingGateway.ValidServices() { clusterName := connect.ServiceSNI(svc.Name, "", svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain) - resolver, hasResolver := cfgSnap.TerminatingGateway.ServiceResolvers[svc] - - svcConfig := cfgSnap.TerminatingGateway.ServiceConfigs[svc] - - cfg, err := ParseProxyConfig(svcConfig.ProxyConfig) + routes, err := s.makeRoutes(cfgSnap, svc, clusterName, true) if err != nil { - return nil, fmt.Errorf("failed to parse upstream config: %v", err) + return nil, err } - if !structs.IsProtocolHTTPLike(cfg.Protocol) { - // Routes can only be defined for HTTP services - continue - } - - if !hasResolver { - // Use a zero value resolver with no timeout and no subsets - resolver = &structs.ServiceResolverConfigEntry{} - } - - var lb *structs.LoadBalancer - if resolver.LoadBalancer != nil { - lb = resolver.LoadBalancer - } - route, err := makeNamedDefaultRouteWithLB(clusterName, lb, true) - if err != nil { - s.Logger.Error("failed to make route", "cluster", clusterName, "error", err) - continue - } - resources = append(resources, route) - - // If there is a service-resolver for this service then also setup routes for each subset - for name := range resolver.Subsets { - clusterName = connect.ServiceSNI(svc.Name, name, svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain) - route, err := makeNamedDefaultRouteWithLB(clusterName, lb, true) - if err != nil { - s.Logger.Error("failed to make route", "cluster", clusterName, "error", err) - continue - } - resources = append(resources, route) + if routes != nil { + resources = append(resources, routes...) } } + for _, svc := range cfgSnap.TerminatingGateway.ValidDestinations() { + clusterName := clusterNameForDestination(cfgSnap, svc.Name, svc.NamespaceOrDefault(), svc.PartitionOrDefault()) + routes, err := s.makeRoutes(cfgSnap, svc, clusterName, false) + if err != nil { + return nil, err + } + if routes != nil { + resources = append(resources, routes...) + } + } + + return resources, nil +} + +func (s *ResourceGenerator) makeRoutes( + cfgSnap *proxycfg.ConfigSnapshot, + svc structs.ServiceName, + clusterName string, + autoHostRewrite bool) ([]proto.Message, error) { + resolver, hasResolver := cfgSnap.TerminatingGateway.ServiceResolvers[svc] + + svcConfig := cfgSnap.TerminatingGateway.ServiceConfigs[svc] + + cfg, err := ParseProxyConfig(svcConfig.ProxyConfig) + if err != nil { + // Don't hard fail on a config typo, just warn. The parse func returns + // default config if there is an error so it's safe to continue. + s.Logger.Warn( + "failed to parse Proxy.Config", + "service", svc.String(), + "error", err, + ) + } + if !structs.IsProtocolHTTPLike(cfg.Protocol) { + // Routes can only be defined for HTTP services + return nil, nil + } + + if !hasResolver { + // Use a zero value resolver with no timeout and no subsets + resolver = &structs.ServiceResolverConfigEntry{} + } + + var resources []proto.Message + var lb *structs.LoadBalancer + if resolver.LoadBalancer != nil { + lb = resolver.LoadBalancer + } + route, err := makeNamedDefaultRouteWithLB(clusterName, lb, autoHostRewrite) + if err != nil { + s.Logger.Error("failed to make route", "cluster", clusterName, "error", err) + return nil, err + } + resources = append(resources, route) + + // If there is a service-resolver for this service then also setup routes for each subset + for name := range resolver.Subsets { + clusterName = connect.ServiceSNI(svc.Name, name, svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain) + route, err := makeNamedDefaultRouteWithLB(clusterName, lb, true) + if err != nil { + s.Logger.Error("failed to make route", "cluster", clusterName, "error", err) + return nil, err + } + resources = append(resources, route) + } return resources, nil } diff --git a/agent/xds/testdata/clusters/transparent-proxy-destination.latest.golden b/agent/xds/testdata/clusters/transparent-proxy-destination.latest.golden new file mode 100644 index 000000000..1a5311ab9 --- /dev/null +++ b/agent/xds/testdata/clusters/transparent-proxy-destination.latest.golden @@ -0,0 +1,255 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "destination~google.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "destination~google.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/google" + } + ] + } + }, + "sni": "destination~google.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "destination~kafka.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "destination~kafka.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/kafka" + } + ] + } + }, + "sni": "destination~kafka.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target" + }, + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target" + } + ] + } + }, + "sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "local_app", + "type": "STATIC", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "local_app", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 8080 + } + } + } + } + ] + } + ] + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/clusters/transparent-proxy-terminating-gateway-destinations-only.latest.golden b/agent/xds/testdata/clusters/transparent-proxy-terminating-gateway-destinations-only.latest.golden index cd99d12dd..3cc818f59 100644 --- a/agent/xds/testdata/clusters/transparent-proxy-terminating-gateway-destinations-only.latest.golden +++ b/agent/xds/testdata/clusters/transparent-proxy-terminating-gateway-destinations-only.latest.golden @@ -3,26 +3,39 @@ "resources": [ { "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", - "name": "dynamic_forward_proxy_cluster", - "clusterType": { - "name": "envoy.clusters.dynamic_forward_proxy", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig", - "dnsCacheConfig": { - "name": "dynamic_forward_proxy_cache_config" - } - } - }, - "connectTimeout": "5s", - "lbPolicy": "CLUSTER_PROVIDED" - }, - { - "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", - "name": "external-IP-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "name": "destination~external-IP-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", "type": "STATIC", "connectTimeout": "5s", "loadAssignment": { - "clusterName": "external-IP-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "clusterName": "destination~external-IP-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "192.168.0.2", + "portValue": 80 + } + } + } + } + ] + } + ] + }, + "outlierDetection": { + + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "destination~external-IP-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "STATIC", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "destination~external-IP-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", "endpoints": [ { "lbEndpoints": [ @@ -42,6 +55,64 @@ }, "outlierDetection": { + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "destination~external-hostname-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "destination~external-hostname-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "httpbin.org", + "portValue": 80 + } + } + } + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "outlierDetection": { + + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "destination~external-hostname-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "destination~external-hostname-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "api.hashicorp.com", + "portValue": 8089 + } + } + } + } + ] + } + ] + }, + "dnsRefreshRate": "10s", + "outlierDetection": { + } } ], diff --git a/agent/xds/testdata/endpoints/transparent-proxy-destination.latest.golden b/agent/xds/testdata/endpoints/transparent-proxy-destination.latest.golden new file mode 100644 index 000000000..d51ea93da --- /dev/null +++ b/agent/xds/testdata/endpoints/transparent-proxy-destination.latest.golden @@ -0,0 +1,119 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.1", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.2", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "destination~google.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.168.0.1", + "portValue": 8443 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "destination~kafka.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.168.0.1", + "portValue": 8443 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.1", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.20.1.2", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + } + ], + "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/transparent-proxy-terminating-gateway-destinations-only.latest.golden b/agent/xds/testdata/endpoints/transparent-proxy-terminating-gateway-destinations-only.latest.golden new file mode 100644 index 000000000..8504dae2b --- /dev/null +++ b/agent/xds/testdata/endpoints/transparent-proxy-terminating-gateway-destinations-only.latest.golden @@ -0,0 +1,5 @@ +{ + "versionInfo": "00000001", + "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/transparent-proxy-destination.latest.golden b/agent/xds/testdata/listeners/transparent-proxy-destination.latest.golden new file mode 100644 index 000000000..c61302c5e --- /dev/null +++ b/agent/xds/testdata/listeners/transparent-proxy-destination.latest.golden @@ -0,0 +1,185 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "db:127.0.0.1:9191", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9191 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "outbound_listener:127.0.0.1:15001", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 15001 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "destinationPort": 9093, + "prefixRanges": [ + { + "addressPrefix": "192.168.2.1", + "prefixLen": 32 + } + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.destination~kafka.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "cluster": "destination~kafka.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + }, + { + "filterChainMatch": { + "destinationPort": 443, + "serverNames": [ + "www.google.com" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.destination~google.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "cluster": "destination~google.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "listenerFilters": [ + { + "name": "envoy.filters.listener.original_dst", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.original_dst.v3.OriginalDst" + } + }, + { + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "prepared_query:geo-cache:127.10.10.10:8181", + "address": { + "socketAddress": { + "address": "127.10.10.10", + "portValue": 8181 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.prepared_query_geo-cache", + "cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/transparent-proxy-dial-instances-directly.latest.golden b/agent/xds/testdata/listeners/transparent-proxy-dial-instances-directly.latest.golden index d6f3ea51d..ddfaa45d5 100644 --- a/agent/xds/testdata/listeners/transparent-proxy-dial-instances-directly.latest.golden +++ b/agent/xds/testdata/listeners/transparent-proxy-dial-instances-directly.latest.golden @@ -99,20 +99,20 @@ } } ] - }, - { - "filters": [ - { - "name": "envoy.filters.network.tcp_proxy", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", - "statPrefix": "upstream.original-destination", - "cluster": "original-destination" - } - } - ] } ], + "defaultFilterChain": { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.original-destination", + "cluster": "original-destination" + } + } + ] + }, "listenerFilters": [ { "name": "envoy.filters.listener.original_dst", diff --git a/agent/xds/testdata/listeners/transparent-proxy-http-upstream.latest.golden b/agent/xds/testdata/listeners/transparent-proxy-http-upstream.latest.golden index b6f00f2cd..7c4a0a622 100644 --- a/agent/xds/testdata/listeners/transparent-proxy-http-upstream.latest.golden +++ b/agent/xds/testdata/listeners/transparent-proxy-http-upstream.latest.golden @@ -92,20 +92,20 @@ } } ] - }, - { - "filters": [ - { - "name": "envoy.filters.network.tcp_proxy", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", - "statPrefix": "upstream.original-destination", - "cluster": "original-destination" - } - } - ] } ], + "defaultFilterChain": { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.original-destination", + "cluster": "original-destination" + } + } + ] + }, "listenerFilters": [ { "name": "envoy.filters.listener.original_dst", diff --git a/agent/xds/testdata/listeners/transparent-proxy-terminating-gateway-destinations-only.latest.golden b/agent/xds/testdata/listeners/transparent-proxy-terminating-gateway-destinations-only.latest.golden index 80bfab3c9..e281eb734 100644 --- a/agent/xds/testdata/listeners/transparent-proxy-terminating-gateway-destinations-only.latest.golden +++ b/agent/xds/testdata/listeners/transparent-proxy-terminating-gateway-destinations-only.latest.golden @@ -14,36 +14,54 @@ { "filterChainMatch": { "serverNames": [ - "*.hashicorp.com" + "destination~external-IP-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" ] }, "filters": [ { - "name": "envoy.filters.network.rbac", + "name": "envoy.filters.network.http_connection_manager", "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", - "rules": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "upstream.external-IP-HTTP.default.default.dc1", + "rds": { + "configSource": { + "ads": { + }, + "resourceApiVersion": "V3" + }, + "routeConfigName": "destination~external-IP-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" }, - "statPrefix": "connect_authz" - } - }, - { - "name": "envoy.filters.network.sni_dynamic_forward_proxy", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3.FilterConfig", - "dnsCacheConfig": { - "name": "dynamic_forward_proxy_cache_config" + "httpFilters": [ + { + "name": "envoy.filters.http.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "rules": { + + } + } + }, + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "randomSampling": { + + } }, - "portValue": 8089 - } - }, - { - "name": "envoy.filters.network.tcp_proxy", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", - "statPrefix": "upstream.external-hostname-TCP.default.default.dc1", - "cluster": "dynamic_forward_proxy_cluster" + "forwardClientCertDetails": "APPEND_FORWARD", + "setCurrentClientCertDetails": { + "subject": true, + "cert": true, + "chain": true, + "dns": true, + "uri": true + } } } ], @@ -78,7 +96,7 @@ { "filterChainMatch": { "serverNames": [ - "external-IP-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + "destination~external-IP-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" ] }, "filters": [ @@ -97,7 +115,143 @@ "typedConfig": { "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", "statPrefix": "upstream.external-IP-TCP.default.default.dc1", - "cluster": "external-IP-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + "cluster": "destination~external-IP-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "placeholder.crt\n" + }, + "privateKey": { + "inlineString": "placeholder.key\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "destination~external-hostname-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "upstream.external-hostname-HTTP.default.default.dc1", + "rds": { + "configSource": { + "ads": { + + }, + "resourceApiVersion": "V3" + }, + "routeConfigName": "destination~external-hostname-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "rules": { + + } + } + }, + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "randomSampling": { + + } + }, + "forwardClientCertDetails": "APPEND_FORWARD", + "setCurrentClientCertDetails": { + "subject": true, + "cert": true, + "chain": true, + "dns": true, + "uri": true + } + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "placeholder.crt\n" + }, + "privateKey": { + "inlineString": "placeholder.key\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "destination~external-hostname-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.external-hostname-TCP.default.default.dc1", + "cluster": "destination~external-hostname-TCP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" } } ], diff --git a/agent/xds/testdata/listeners/transparent-proxy.latest.golden b/agent/xds/testdata/listeners/transparent-proxy.latest.golden index ca8b75eb2..ee4c90857 100644 --- a/agent/xds/testdata/listeners/transparent-proxy.latest.golden +++ b/agent/xds/testdata/listeners/transparent-proxy.latest.golden @@ -59,20 +59,20 @@ } } ] - }, - { - "filters": [ - { - "name": "envoy.filters.network.tcp_proxy", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", - "statPrefix": "upstream.original-destination", - "cluster": "original-destination" - } - } - ] } ], + "defaultFilterChain": { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.original-destination", + "cluster": "original-destination" + } + } + ] + }, "listenerFilters": [ { "name": "envoy.filters.listener.original_dst", diff --git a/agent/xds/testdata/routes/transparent-proxy-destination.latest.golden b/agent/xds/testdata/routes/transparent-proxy-destination.latest.golden new file mode 100644 index 000000000..9c050cbe6 --- /dev/null +++ b/agent/xds/testdata/routes/transparent-proxy-destination.latest.golden @@ -0,0 +1,5 @@ +{ + "versionInfo": "00000001", + "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/routes/transparent-proxy-terminating-gateway-destinations-only.latest.golden b/agent/xds/testdata/routes/transparent-proxy-terminating-gateway-destinations-only.latest.golden new file mode 100644 index 000000000..5eb099010 --- /dev/null +++ b/agent/xds/testdata/routes/transparent-proxy-terminating-gateway-destinations-only.latest.golden @@ -0,0 +1,53 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "name": "destination~external-IP-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "virtualHosts": [ + { + "name": "destination~external-IP-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { + "cluster": "destination~external-IP-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "validateClusters": true + }, + { + "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "name": "destination~external-hostname-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "virtualHosts": [ + { + "name": "destination~external-hostname-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { + "cluster": "destination~external-hostname-HTTP.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "validateClusters": true + } + ], + "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "nonce": "00000001" +} \ No newline at end of file From 0ffcbf020e541c2770d00cbdda57d35c145939a4 Mon Sep 17 00:00:00 2001 From: Michele Degges Date: Thu, 14 Jul 2022 13:02:13 -0700 Subject: [PATCH 013/107] [CI-only] Support fossa scanning (#13694) --- .release/ci.hcl | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.release/ci.hcl b/.release/ci.hcl index e422a9ce2..ceb11f759 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -178,6 +178,15 @@ event "promote-dev-docker" { } } +event "fossa-scan" { + depends = ["promote-dev-docker"] + action "fossa-scan" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "fossa-scan" + } +} + ## These are promotion and post-publish events ## they should be added to the end of the file after the verify event stanza. From 17565a4fcaae3b3e648499ec12cecdbf677d735e Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Fri, 15 Jul 2022 10:07:07 -0400 Subject: [PATCH 014/107] Enable partition support for peering establishment (#13772) Prior to this the dialing side of the peering would only ever work within the default partition. This commit allows properly parsing the partition field out of the API struct request body, query param and header. --- agent/peering_endpoint.go | 8 ++++++++ api/peering.go | 6 ++++-- proto/pbpeering/peering.gen.go | 2 ++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/agent/peering_endpoint.go b/agent/peering_endpoint.go index 22f4fc1ae..0d120830e 100644 --- a/agent/peering_endpoint.go +++ b/agent/peering_endpoint.go @@ -130,6 +130,14 @@ func (s *HTTPHandlers) PeeringEstablish(resp http.ResponseWriter, req *http.Requ return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "PeeringToken is required in the payload when establishing a peering."} } + var entMeta acl.EnterpriseMeta + if err := s.parseEntMetaPartition(req, &entMeta); err != nil { + return nil, err + } + if args.Partition == "" { + args.Partition = entMeta.PartitionOrEmpty() + } + out, err := s.agent.rpcClientPeering.Establish(req.Context(), args) if err != nil { return nil, err diff --git a/api/peering.go b/api/peering.go index 0cfbae9fd..b880380de 100644 --- a/api/peering.go +++ b/api/peering.go @@ -98,8 +98,10 @@ type PeeringEstablishRequest struct { PeerName string // The peering token returned from the peer's GenerateToken endpoint. PeeringToken string `json:",omitempty"` - Datacenter string `json:",omitempty"` - Token string `json:",omitempty"` + // Partition to be peered. + Partition string `json:",omitempty"` + Datacenter string `json:",omitempty"` + Token string `json:",omitempty"` // Meta is a mapping of some string value to any other string value Meta map[string]string `json:",omitempty"` } diff --git a/proto/pbpeering/peering.gen.go b/proto/pbpeering/peering.gen.go index f2194596f..3f3b400b4 100644 --- a/proto/pbpeering/peering.gen.go +++ b/proto/pbpeering/peering.gen.go @@ -10,6 +10,7 @@ func EstablishRequestToAPI(s *EstablishRequest, t *api.PeeringEstablishRequest) } t.PeerName = s.PeerName t.PeeringToken = s.PeeringToken + t.Partition = s.Partition t.Datacenter = s.Datacenter t.Token = s.Token t.Meta = s.Meta @@ -20,6 +21,7 @@ func EstablishRequestFromAPI(t *api.PeeringEstablishRequest, s *EstablishRequest } s.PeerName = t.PeerName s.PeeringToken = t.PeeringToken + s.Partition = t.Partition s.Datacenter = t.Datacenter s.Token = t.Token s.Meta = t.Meta From 7ae0c69729fff03e595a8edc07b68c982a542257 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Fri, 15 Jul 2022 10:51:38 -0400 Subject: [PATCH 015/107] Use Node Name for peering healthSnapshot instead of ID (#13773) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A Node ID is not a required field with Consul’s data model. Therefore we cannot reliably expect all uses to have it. However the node name is required and must be unique so its equally as good of a key for the internal healthSnapshot node tracking. --- .../services/peerstream/health_snapshot.go | 16 ++++++++++------ .../services/peerstream/health_snapshot_test.go | 16 ++++++++-------- .../services/peerstream/replication.go | 8 ++++---- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/agent/grpc-external/services/peerstream/health_snapshot.go b/agent/grpc-external/services/peerstream/health_snapshot.go index c6cb3243b..a9f676827 100644 --- a/agent/grpc-external/services/peerstream/health_snapshot.go +++ b/agent/grpc-external/services/peerstream/health_snapshot.go @@ -8,7 +8,11 @@ import ( // healthSnapshot represents a normalized view of a set of CheckServiceNodes // meant for easy comparison to aid in differential synchronization type healthSnapshot struct { - Nodes map[types.NodeID]*nodeSnapshot + // Nodes is a map of a node name to a nodeSnapshot. Ideally we would be able to use + // the types.NodeID and assume they are UUIDs for the map key but Consul doesn't + // require a NodeID. Therefore we must key off of the only bit of ID material + // that is required which is the node name. + Nodes map[string]*nodeSnapshot } type nodeSnapshot struct { @@ -40,20 +44,20 @@ func newHealthSnapshot(all []structs.CheckServiceNode, partition, peerName strin } snap := &healthSnapshot{ - Nodes: make(map[types.NodeID]*nodeSnapshot), + Nodes: make(map[string]*nodeSnapshot), } for _, instance := range all { - if instance.Node.ID == "" { - panic("TODO(peering): data should always have a node ID") + if instance.Node.Node == "" { + panic("TODO(peering): data should always have a node name") } - nodeSnap, ok := snap.Nodes[instance.Node.ID] + nodeSnap, ok := snap.Nodes[instance.Node.Node] if !ok { nodeSnap = &nodeSnapshot{ Node: instance.Node, Services: make(map[structs.ServiceID]*serviceSnapshot), } - snap.Nodes[instance.Node.ID] = nodeSnap + snap.Nodes[instance.Node.Node] = nodeSnap } if instance.Service.ID == "" { diff --git a/agent/grpc-external/services/peerstream/health_snapshot_test.go b/agent/grpc-external/services/peerstream/health_snapshot_test.go index 74731b55f..afd83f220 100644 --- a/agent/grpc-external/services/peerstream/health_snapshot_test.go +++ b/agent/grpc-external/services/peerstream/health_snapshot_test.go @@ -69,8 +69,8 @@ func TestHealthSnapshot(t *testing.T) { }, }, expect: &healthSnapshot{ - Nodes: map[types.NodeID]*nodeSnapshot{ - "abc-123": { + Nodes: map[string]*nodeSnapshot{ + "abc": { Node: newNode("abc-123", "abc", "my-peer"), Services: map[structs.ServiceID]*serviceSnapshot{ structs.NewServiceID("xyz-123", nil): { @@ -88,14 +88,14 @@ func TestHealthSnapshot(t *testing.T) { name: "multiple", in: []structs.CheckServiceNode{ { - Node: newNode("abc-123", "abc", ""), + Node: newNode("", "abc", ""), Service: newService("xyz-123", 8080, ""), Checks: structs.HealthChecks{ newCheck("abc", "xyz-123", ""), }, }, { - Node: newNode("abc-123", "abc", ""), + Node: newNode("", "abc", ""), Service: newService("xyz-789", 8181, ""), Checks: structs.HealthChecks{ newCheck("abc", "xyz-789", ""), @@ -110,9 +110,9 @@ func TestHealthSnapshot(t *testing.T) { }, }, expect: &healthSnapshot{ - Nodes: map[types.NodeID]*nodeSnapshot{ - "abc-123": { - Node: newNode("abc-123", "abc", "my-peer"), + Nodes: map[string]*nodeSnapshot{ + "abc": { + Node: newNode("", "abc", "my-peer"), Services: map[structs.ServiceID]*serviceSnapshot{ structs.NewServiceID("xyz-123", nil): { Service: newService("xyz-123", 8080, "my-peer"), @@ -128,7 +128,7 @@ func TestHealthSnapshot(t *testing.T) { }, }, }, - "def-456": { + "def": { Node: newNode("def-456", "def", "my-peer"), Services: map[structs.ServiceID]*serviceSnapshot{ structs.NewServiceID("xyz-456", nil): { diff --git a/agent/grpc-external/services/peerstream/replication.go b/agent/grpc-external/services/peerstream/replication.go index f9f5ce76b..7ec58b5f4 100644 --- a/agent/grpc-external/services/peerstream/replication.go +++ b/agent/grpc-external/services/peerstream/replication.go @@ -290,8 +290,8 @@ func (s *Server) handleUpdateService( deletedNodeChecks = make(map[nodeCheckTuple]struct{}) ) for _, csn := range storedInstances { - if _, ok := snap.Nodes[csn.Node.ID]; !ok { - unusedNodes[string(csn.Node.ID)] = struct{}{} + if _, ok := snap.Nodes[csn.Node.Node]; !ok { + unusedNodes[csn.Node.Node] = struct{}{} // Since the node is not in the snapshot we can know the associated service // instance is not in the snapshot either, since a service instance can't @@ -316,7 +316,7 @@ func (s *Server) handleUpdateService( // Delete the service instance if not in the snapshot. sid := csn.Service.CompoundServiceID() - if _, ok := snap.Nodes[csn.Node.ID].Services[sid]; !ok { + if _, ok := snap.Nodes[csn.Node.Node].Services[sid]; !ok { err := s.Backend.CatalogDeregister(&structs.DeregisterRequest{ Node: csn.Node.Node, ServiceID: csn.Service.ID, @@ -335,7 +335,7 @@ func (s *Server) handleUpdateService( // Reconcile checks. for _, chk := range csn.Checks { - if _, ok := snap.Nodes[csn.Node.ID].Services[sid].Checks[chk.CheckID]; !ok { + if _, ok := snap.Nodes[csn.Node.Node].Services[sid].Checks[chk.CheckID]; !ok { // Checks without a ServiceID are node checks. // If the node exists but the check does not then the check was deleted. if chk.ServiceID == "" { From 28bf578b2d19773869d886caa9fce46d3d69c272 Mon Sep 17 00:00:00 2001 From: John Murret Date: Fri, 15 Jul 2022 10:35:42 -0600 Subject: [PATCH 016/107] Made changes based on Adams suggestions (#13490) * Made changes based on Adams suggestions * updating list layout in systems integration guide. updating wan federation docs. * fixing env vars on systems integration page * fixing h3 to h2 on enterprise license page * Changed `The following steps will be performed` to `Complete the following steps` * Replaced `These steps will be repeated for each datacenter` with `Repeat the following steps for each datacenter in the cluster` * Emphasizing that kv2 secrets only need to be stored once. * Move the sentence indicating where the vault path maps to the helm chart out of the -> Note callout * remaining suggestions * Removing store the secret in Vault from server-tls page * Making the Bootstrapping the Server PKI Engine sections the same on server-tls and webhook-cert pages * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Updating VAULT_ADDR on systems-integration to get it out of the shell. * Updating intro paragraph of Overview on systems-integration.mdx to what Adamsuggested. * Putting the GKE, AKS, AKS info into tabs on the systems integration page. * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- .../data-integration/bootstrap-token.mdx | 24 +- .../vault/data-integration/connect-ca.mdx | 18 +- .../data-integration/enterprise-license.mdx | 20 +- .../vault/data-integration/gossip.mdx | 26 +- .../vault/data-integration/index.mdx | 11 +- .../data-integration/partition-token.mdx | 22 +- .../data-integration/replication-token.mdx | 22 +- .../vault/data-integration/server-tls.mdx | 207 ++++++++-------- .../snapshot-agent-config.mdx | 24 +- .../vault/data-integration/webhook-certs.mdx | 232 ++++++++---------- .../vault/systems-integration.mdx | 124 +++++----- .../k8s/installation/vault/wan-federation.mdx | 38 ++- 12 files changed, 365 insertions(+), 403 deletions(-) diff --git a/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx index aa132e4b3..3d5a3d39a 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx @@ -9,14 +9,13 @@ description: >- This topic describes how to configure the Consul Helm chart to use an ACL bootstrap token stored in Vault. ## Overview -To use an ACL bootstrap token stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: - -### One time setup in Vault +To use an ACL bootstrap token stored in Vault, follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section. +Complete the following steps once: 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### Setup per Consul datacenter +Repeat the following steps for each datacenter in the cluster: 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Update the Consul on Kubernetes helm chart. @@ -24,20 +23,20 @@ To use an ACL bootstrap token stored in Vault, we will follow the steps outlined Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: 1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). -## One time setup in Vault -### Generate and Store the Secret in Vault -First, generate and store the ACL bootstrap token in Vault: +## Store the Secret in Vault + +First, generate and store the ACL bootstrap token in Vault. You will only need to perform this action once: ```shell-session $ vault kv put secret/consul/bootstrap-token token="$(uuidgen | tr '[:upper:]' '[:lower:]')" ``` -### Create a Vault policy that authorizes the desired level of access to the secret +## Create Vault policy --> **Note:** The secret path referenced by the Vault Policy below will be your `global.acls.bootstrapToken.secretName` Helm value. +Next, you will need to create a Vault policy that allows read access to this secret. -Next, you will need to create a Vault policy that allows read access to this secret: +The path to the secret referenced in the `path` resource is the same value that you will configure in the `global.acls.bootstrapToken.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). @@ -55,8 +54,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write bootstrap-token-policy bootstrap-token-policy.hcl ``` -## Setup per Consul datacenter -### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access +## Create Vault Authorization Roles for Consul Next, you will create Kubernetes auth roles for the Consul `server-acl-init` container that runs as part of the Consul server statefulset: @@ -75,7 +73,7 @@ you can run the following `helm template` command with your Consul on Kubernetes $ helm template --release-name ${RELEASE_NAME} -s templates/server-acl-init-serviceaccount.yaml hashicorp/consul ``` -### Update the Consul on Kubernetes helm chart +## Update Consul on Kubernetes Helm chart Now that you have configured Vault, you can configure the Consul Helm chart to use the ACL bootstrap token in Vault: diff --git a/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx b/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx index 121bb3ee2..46f53ec97 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx @@ -14,12 +14,12 @@ Consul allows using Kubernetes auth methods to configure Connect CA. This allows for automatic token rotation once the renewal is no longer possible. ## Overview -To use an Vault as the Service Mesh Certificate Provider on Kubernetes, we will need to modify the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: +To use Vault as the service mesh certificate provider on Kubernetes, you will complete a modified version of the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section. -### One time setup in Vault +Complete the following steps once: 1. Create a Vault policy that authorizes the desired level of access to the secret. -### Setup per Consul datacenter +Repeat the following steps for each datacenter in the cluster: 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Update the Consul on Kubernetes helm chart. @@ -28,20 +28,14 @@ Prior to setting up the data integration between Vault and Consul on Kubernetes, 1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). -## One time setup in Vault -### Store the secret in Vault - -This step is not valid to this use case as we are not storing any secrets for Service Mesh certificate, and we instead are Leveraging Vault CA as a provider to mint certificates on an ongoing basis. - -### Create a Vault policy that authorizes the desired level of access to the secret +## Create Vault policy To configure [Vault as the provider](/docs/connect/ca/vault) for the Consul service mesh certificates, you will first need to decide on the type of policy that is suitable for you. To see the permissions that Consul would need in Vault, please see [Vault ACL policies](/docs/connect/ca/vault#vault-acl-policies) documentation. -## Setup per Consul datacenter -### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access +## Create Vault Authorization Roles for Consul Next, you will create Kubernetes auth roles for the Consul servers: @@ -60,7 +54,7 @@ you can run: $ helm template --release-name ${RELEASE_NAME} --show-only templates/server-serviceaccount.yaml hashicorp/consul ``` -### Update the Consul on Kubernetes helm chart +## Update Consul on Kubernetes Helm chart Now you can configure the Consul Helm chart to use Vault as the Connect CA provider: diff --git a/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx b/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx index 4333a7162..08d7e16f1 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx @@ -9,13 +9,13 @@ description: >- This topic describes how to configure the Consul Helm chart to use an enterprise license stored in Vault. ## Overview -To use an enterprise license stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: +Complete the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section to use an enterprise license stored in Vault. -### One time setup in Vault +Complete the following steps once: 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### Setup per Consul datacenter +Repeat the following steps for each datacenter in the cluster: 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Update the Consul on Kubernetes helm chart. @@ -24,8 +24,7 @@ Prior to setting up the data integration between Vault and Consul on Kubernetes, 1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). -## One time setup in Vault -### Store the Secret in Vault +## Store the Secret in Vault First, store the enterprise license in Vault: @@ -33,11 +32,11 @@ First, store the enterprise license in Vault: $ vault kv put secret/consul/license key="" ``` -### Create a Vault policy that authorizes the desired level of access to the secret +## Create Vault policy --> **Note:** The secret path referenced by the Vault Policy below will be your `global.enterpriseLicense.secretName` Helm value. +Next, you will need to create a policy that allows read access to this secret. -Next, you will need to create a policy that allows read access to this secret: +The path to the secret referenced in the `path` resource is the same value that you will configure in the `global.enterpriseLicense.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). @@ -55,8 +54,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write license-policy license-policy.hcl ``` -## Setup per Consul datacenter -### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access +## Create Vault Authorization Roles for Consul Next, you will create Kubernetes auth roles for the Consul server and client: @@ -89,7 +87,7 @@ you can run the following `helm template` commands with your Consul on Kubernete $ helm template --release-name ${RELEASE_NAME} -s templates/client-serviceaccount.yaml hashicorp/consul ``` -### Update the Consul on Kubernetes helm chart. +## Update Consul on Kubernetes Helm chart. Now that you have configured Vault, you can configure the Consul Helm chart to use the enterprise enterprise license in Vault: diff --git a/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx b/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx index 57480a9ec..828b19307 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx @@ -7,14 +7,16 @@ description: >- # Storing Gossip Encryption Key in Vault -## Overview -To use a gossip encryption key stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: +This topic describes how to configure the Consul Helm chart to use TLS certificates issued by Vault in the Consul controller and connect inject webhooks. -### One time setup in Vault +## Overview +Complete the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section to use a gossip encryption key stored in Vault. + +Complete the following steps once: 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### Setup per Consul datacenter +Repeat the following steps for each datacenter in the cluster: 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Update the Consul on Kubernetes helm chart. @@ -23,18 +25,17 @@ Prior to setting up the data integration between Vault and Consul on Kubernetes, 1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). -## One time setup in Vault -### Store the Secret in Vault -First, generate and store the gossip key in Vault: +## Store the Secret in Vault +First, generate and store the gossip key in Vault. You will only need to perform this action once: ```shell-session $ vault kv put secret/consul/gossip key="$(consul keygen)" ``` -### Create a Vault policy that authorizes the desired level of access to the secret +## Create Vault policy --> **Note:** The secret path referenced by the Vault Policy below will be your `global.gossipEncryption.secretName` Helm value. +Next, create a policy that allows read access to this secret. -Next, we will need to create a policy that allows read access to this secret: +The path to the secret referenced in the `path` resource is the same value that you will configure in the `global.gossipEncryption.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). @@ -52,8 +53,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write gossip-policy gossip-policy.hcl ``` -## Setup per Consul datacenter -### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access +## Create Vault Authorization Roles for Consul Next, we will create Kubernetes auth roles for the Consul server and client: @@ -86,7 +86,7 @@ you can run the following `helm template` commands with your Consul on Kubernete $ helm template --release-name ${RELEASE_NAME} -s templates/client-serviceaccount.yaml hashicorp/consul ``` -### Update the Consul on Kubernetes helm chart +## Update Consul on Kubernetes Helm chart Now that we've configured Vault, you can configure the Consul Helm chart to use the gossip key in Vault: diff --git a/website/content/docs/k8s/installation/vault/data-integration/index.mdx b/website/content/docs/k8s/installation/vault/data-integration/index.mdx index 5007fcfe9..a7669f549 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/index.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/index.mdx @@ -13,13 +13,13 @@ This topic describes an overview of how to configure Vault and Consul in order t ### General Integration Steps -Generally, for each secret you wish to store in Vault, the process to integrate the data between Vault and Consul on Kubernetes is: +You must complete two general procedures for each secret you wish to store in Vault. -#### One time setup in Vault +Complete the following steps once: 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -#### Setup per Consul datacenter +Repeat the following steps for each datacenter in the cluster: 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Update the Consul on Kubernetes helm chart. @@ -31,14 +31,13 @@ Prior to setting up the data integration between Vault and Consul on Kubernetes, Following the general integration steps, a more detailed workflow for integration of the [Gossip encryption key](/docs/k8s/installation/vault/data-integration/gossip) with the Vault Secrets backend would like the following: -#### One time setup in Vault +Complete the following steps once: 1. Store the secret in Vault. - Save the gossip encryption key in Vault at the path `secret/consul/gossip`. 1. Create a Vault policy that authorizes the desired level of access to the secret. - Create a Vault policy that you name `gossip-policy` which allows `read` access to the path `secret/consul/gossip`. -#### Setup per Consul datacenter - +Repeat the following steps for each datacenter in the cluster: 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. - Both Consul servers and Consul clients need access to the gossip encryption key, so you create two Vault Kubernetes: - A role called `consul-server` that maps the Kubernetes namespace and service account name for your consul servers to the `gossip-policy` created in [step 2](#one-time-setup-in-vault) of One time setup in Vault. diff --git a/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx index 2e7c7e68c..98c764fc5 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx @@ -10,13 +10,13 @@ description: >- This topic describes how to configure the Consul Helm chart to use an ACL partition token stored in Vault. ## Overview -To use an ACL partition token stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: +Complete the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section to use an ACL partition token stored in Vault. -### One time setup in Vault +Complete the following steps once: 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### Setup per Consul datacenter +Repeat the following steps for each datacenter in the cluster: 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Update the Consul on Kubernetes helm chart. @@ -25,20 +25,19 @@ Prior to setting up the data integration between Vault and Consul on Kubernetes, 1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). -## One time setup in Vault -### Generate and Store the Secret in Vault +## Store the Secret in Vault -First, generate and store the ACL partition token in Vault: +First, generate and store the ACL partition token in Vault. You will only need to perform this action once: ```shell-session $ vault kv put secret/consul/partition-token token="$(uuidgen | tr '[:upper:]' '[:lower:]')" ``` -### Create a Vault policy that authorizes the desired level of access to the secret +## Create Vault policy --> **Note:** The secret path referenced by the Vault Policy below will be your `global.acls.partitionToken.secretName` Helm value. +Next, you will need to create a policy that allows read access to this secret. -Next, you will need to create a policy that allows read access to this secret: +The path to the secret referenced in the `path` resource is the same value that you will configure in the `global.acls.partitionToken.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). @@ -56,8 +55,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write partition-token-policy partition-token-policy.hcl ``` -## Setup per Consul datacenter -### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access +## Create Vault Authorization Roles for Consul Next, you will create Kubernetes auth roles for the Consul `server-acl-init` job: @@ -76,7 +74,7 @@ you can run the following `helm template` command with your Consul on Kubernetes $ helm template --release-name ${RELEASE_NAME} -s templates/server-acl-init-serviceaccount.yaml hashicorp/consul ``` -### Update the Consul on Kubernetes helm chart +## Update Consul on Kubernetes Helm chart Now that you have configured Vault, you can configure the Consul Helm chart to use the ACL partition token key in Vault: diff --git a/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx index ed40fdea5..f17c3fcb7 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx @@ -9,13 +9,13 @@ description: >- This topic describes how to configure the Consul Helm chart to use an ACL replication token stored in Vault. ## Overview -To use an ACL replication token stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: +To use an ACL replication token stored in Vault, follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section. -### One time setup in Vault +Complete the following steps once: 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### Setup per Consul datacenter +Repeat the following steps for each datacenter in the cluster: 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Update the Consul on Kubernetes helm chart. @@ -24,20 +24,19 @@ Prior to setting up the data integration between Vault and Consul on Kubernetes, 1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). -## One time setup in Vault -### Generate and Store the Secret in Vault +## Store the Secret in Vault -First, generate and store the ACL replication token in Vault: +First, generate and store the ACL replication token in Vault. You will only need to perform this action once: ```shell-session $ vault kv put secret/consul/replication-token token="$(uuidgen | tr '[:upper:]' '[:lower:]')" ``` -### Create a Vault policy that authorizes the desired level of access to the secret +## Create Vault policy --> **Note:** The secret path referenced by the Vault Policy below will be your `global.acls.replicationToken.secretName` Helm value. +Next, you will need to create a policy that allows read access to this secret. -Next, you will need to create a policy that allows read access to this secret: +The path to the secret referenced in the `path` resource is the same value that you will configure in the `global.acls.replicationToken.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). @@ -55,8 +54,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write replication-token-policy replication-token-policy.hcl ``` -## Setup per Consul datacenter -### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access +## Create Vault Authorization Roles for Consul Next, you will create Kubernetes auth roles for the Consul `server-acl-init` job: @@ -75,7 +73,7 @@ you can run the following `helm template` command with your Consul on Kubernetes $ helm template --release-name ${RELEASE_NAME} -s templates/server-acl-init-serviceaccount.yaml hashicorp/consul ``` -### Update the Consul on Kubernetes helm chart +## Update Consul on Kubernetes Helm chart Now that you have configured Vault, you can configure the Consul Helm chart to use the ACL replication token key in Vault: diff --git a/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx b/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx index 38626c9dd..382a53ed9 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx @@ -8,12 +8,12 @@ description: >- # Vault as the Server TLS Certificate Provider on Kubernetes ## Overview -To use an Vault as the Server TLS Certificate Provider on Kubernetes, we will need to modify the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: +To use Vault as the server TLS certificate provider on Kubernetes, complete a modified version of the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section. -### One time setup in Vault +Complete the following steps once: 1. Create a Vault policy that authorizes the desired level of access to the secret. -### Setup per Consul datacenter +Repeat the following steps for each datacenter in the cluster: 1. (Added) Configure allowed domains for PKI certificates 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Update the Consul on Kubernetes helm chart. @@ -24,11 +24,10 @@ Prior to setting up the data integration between Vault and Consul on Kubernetes, 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 3. Complete the [Bootstrapping the PKI Engine](#bootstrapping-the-pki-engine) section. -### Bootstrapping the PKI Engine +## Bootstrapping the PKI Engine -First, we need to bootstrap the Vault cluster by enabling and configuring the PKI Secrets Engine to be able to serve -TLS certificates to Consul. The process can be as simple as the following, or more complicated such as in this [example](https://learn.hashicorp.com/tutorials/consul/vault-pki-consul-secure-tls) -which also uses an intermediate signing authority. +Issue the following commands to enable and configure the PKI Secrets Engine to server +TLS certificates to Consul. * Enable the PKI Secrets Engine: @@ -51,142 +50,128 @@ which also uses an intermediate signing authority. common_name="dc1.consul" \ ttl=87600h ``` -## One time setup in Vault -### Store the secret in Vault - -This step is not valid to this use case because we are not storing a single secret. We are configuring Vault as a provider to mint certificates on an ongoing basis. - -### Create a Vault policy that authorizes the desired level of access to the secret +## Create Vault policies To use Vault to issue Server TLS certificates, you will need to create the following: -1. Vault Policies that will allow the Consul server to access the certificate issuing url. -1. Vault Policies that will allow the Consul components, e.g. ingress gateways, controller, to access the CA url. +1. Create a policy that allows `["create", "update"]` access to the + [certificate issuing URL](https://www.vaultproject.io/api/secret/pki#generate-certificate) so the Consul servers can + fetch a new certificate/key pair. -#### Create Vault Policies for the Server TLS Certificates + The path to the secret referenced in the `path` resource is the same value that you will configure in the `server.serverCert.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). --> **Note:** The PKI secret path referenced by the Vault Policy below will be your `server.serverCert.secretName` Helm value. + -Next we will create a policy that allows `["create", "update"]` access to the -[certificate issuing URL](https://www.vaultproject.io/api/secret/pki#generate-certificate) so the Consul servers can -fetch a new certificate/key pair. + ```HCL + path "pki/issue/consul-server" { + capabilities = ["create", "update"] + } + ``` - + -```HCL -path "pki/issue/consul-server" { - capabilities = ["create", "update"] -} -``` +1. Apply the Vault policy by issuing the `vault policy write` CLI command: - + ```shell-session + $ vault policy write consul-server consul-server-policy.hcl + ``` -Apply the Vault policy by issuing the `vault policy write` CLI command: - -```shell-session -$ vault policy write consul-server consul-server-policy.hcl -``` - -#### Create Vault Policies for the CA URL - -Next, we will create a policy that allows `["read"]` access to the [CA URL](https://www.vaultproject.io/api/secret/pki#read-certificate), +1. Create a policy that allows `["read"]` access to the [CA URL](https://www.vaultproject.io/api/secret/pki#read-certificate), this is required for the Consul components to communicate with the Consul servers in order to fetch their auto-encryption certificates. + + The path to the secret referenced in the `path` resource is the same value that you will configure in the `global.tls.caCert.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). - + -```HCL -path "pki/cert/ca" { - capabilities = ["read"] -} -``` + ```HCL + path "pki/cert/ca" { + capabilities = ["read"] + } + ``` - + -```shell-session -$ vault policy write ca-policy ca-policy.hcl -``` + ```shell-session + $ vault policy write ca-policy ca-policy.hcl + ``` --> **Note:** The PKI secret path referenced by the above Policy will be your `global.tls.caCert.secretName` Helm value. +1. Configure allowed domains for PKI certificates. -## Setup per Consul datacenter -### Configure allowed domains for PKI certificates + Next, a Vault role for the PKI engine will set the default certificate issuance parameters: -Next, a Vault role for the PKI engine will set the default certificate issuance parameters: + ```shell-session + $ vault write pki/roles/consul-server \ + allowed_domains="" \ + allow_subdomains=true \ + allow_bare_domains=true \ + allow_localhost=true \ + generate_lease=true \ + max_ttl="720h" + ``` -```shell-session -$ vault write pki/roles/consul-server \ - allowed_domains="" \ - allow_subdomains=true \ - allow_bare_domains=true \ - allow_localhost=true \ - generate_lease=true \ - max_ttl="720h" -``` + To generate the `` use the following script as a template: -To generate the `` use the following script as a template: + ```shell-session + #!/bin/sh -```shell-session -#!/bin/sh + # NAME is set to either the value from `global.name` from your Consul K8s value file, or your $HELM_RELEASE_NAME-consul + export NAME=consulk8s + # NAMESPACE is where the Consul on Kubernetes is installed + export NAMESPACE=consul + # DATACENTER is the value of `global.datacenter` from your Helm values config file + export DATACENTER=dc1 -# NAME is set to either the value from `global.name` from your Consul K8s value file, or your $HELM_RELEASE_NAME-consul -export NAME=consulk8s -# NAMESPACE is where the Consul on Kubernetes is installed -export NAMESPACE=consul -# DATACENTER is the value of `global.datacenter` from your Helm values config file -export DATACENTER=dc1 + echo allowed_domains=\"$DATACENTER.consul, $NAME-server, $NAME-server.$NAMESPACE, $NAME-server.$NAMESPACE.svc\" + ``` -echo allowed_domains=\"$DATACENTER.consul, $NAME-server, $NAME-server.$NAMESPACE, $NAME-server.$NAMESPACE.svc\" -``` +1. Finally, Kubernetes auth roles need to be created for servers, clients, and components. -### Link the Vault policies to Consul workloads -Create three Vault auth roles, one for the Consul servers, one for the Consul clients, and one for Consul components, that link the policy to each Consul workload on Kubernetes service account that requires access. + Role for Consul servers: + ```shell-session + $ vault write auth/kubernetes/role/consul-server \ + bound_service_account_names= \ + bound_service_account_namespaces= \ + policies=consul-server \ + ttl=1h + ``` -Role for Consul servers: -```shell-session -$ vault write auth/kubernetes/role/consul-server \ - bound_service_account_names= \ - bound_service_account_namespaces= \ - policies=consul-server \ - ttl=1h -``` + To find out the service account name of the Consul server, + you can run: -To find out the service account name of the Consul server, -you can run: + ```shell-session + $ helm template --release-name ${RELEASE_NAME} --show-only templates/server-serviceaccount.yaml hashicorp/consul + ``` -```shell-session - $ helm template --release-name ${RELEASE_NAME} --show-only templates/server-serviceaccount.yaml hashicorp/consul -``` + Role for Consul clients: -Role for Consul clients: + ```shell-session + $ vault write auth/kubernetes/role/consul-client \ + bound_service_account_names= \ + bound_service_account_namespaces=default \ + policies=ca-policy \ + ttl=1h + ``` -```shell-session -$ vault write auth/kubernetes/role/consul-client \ - bound_service_account_names= \ - bound_service_account_namespaces=default \ - policies=ca-policy \ - ttl=1h -``` + To find out the service account name of the Consul client, use the command below. + ```shell-session + $ helm template --release-name ${RELEASE_NAME} --show-only templates/client-serviceaccount.yaml hashicorp/consul + ``` -To find out the service account name of the Consul client, use the command below. -```shell-session - $ helm template --release-name ${RELEASE_NAME} --show-only templates/client-serviceaccount.yaml hashicorp/consul -``` + Role for CA components: + ```shell-session + $ vault write auth/kubernetes/role/consul-ca \ + bound_service_account_names="*" \ + bound_service_account_namespaces= \ + policies=ca-policy \ + ttl=1h + ``` -Role for CA components: -```shell-session -$ vault write auth/kubernetes/role/consul-ca \ - bound_service_account_names="*" \ - bound_service_account_namespaces= \ - policies=ca-policy \ - ttl=1h -``` + The above Vault Roles will now be your Helm values for `global.secretsBackend.vault.consulServerRole` and + `global.secretsBackend.vault.consulCARole` respectively. -The above Vault Roles will now be your Helm values for `global.secretsBackend.vault.consulServerRole` and -`global.secretsBackend.vault.consulCARole` respectively. +## Update Consul on Kubernetes Helm chart -### Update the Consul on Kubernetes helm chart - -Now that we've configured Vault, you can configure the Consul Helm chart to -use the Server TLS certificates from Vault: +Next, configure the Consul Helm chart to +use the server TLS certificates from Vault: diff --git a/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx b/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx index b8e4a1646..6a0d913cd 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx @@ -9,15 +9,13 @@ description: >- This topic describes how to configure the Consul Helm chart to use a snapshot agent config stored in Vault. ## Overview -To use an ACL replication token stored in Vault, we will follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: - -### One time setup in Vault +To use an ACL replication token stored in Vault, follow the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section. +Complete the following steps once: 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. -### Setup per Consul datacenter - +Repeat the following steps for each datacenter in the cluster: 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Update the Consul on Kubernetes helm chart. @@ -26,8 +24,7 @@ Prior to setting up the data integration between Vault and Consul on Kubernetes, 1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). -## One time setup in Vault -### Store the Secret in Vault +## Store the Secret in Vault First, store the snapshot agent config in Vault: @@ -35,11 +32,11 @@ First, store the snapshot agent config in Vault: $ vault kv put secret/consul/snapshot-agent-config key="" ``` -### Create a Vault policy that authorizes the desired level of access to the secret +## Create Vault policy --> **Note:** The secret path referenced by the Vault Policy below will be your `client.snapshotAgent.configSecret.secretName` Helm value. +Next, you will need to create a policy that allows read access to this secret. -Next, you will need to create a policy that allows read access to this secret: +The path to the secret referenced in the `path` resource is the same values that you will configure in the `client.snapshotAgent.configSecret.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). @@ -57,8 +54,7 @@ Apply the Vault policy by issuing the `vault policy write` CLI command: $ vault policy write snapshot-agent-config-policy snapshot-agent-config-policy.hcl ``` -## Setup per Consul datacenter -### Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access +## Create Vault Authorization Roles for Consul Next, you will create a Kubernetes auth role for the Consul snapshot agent: @@ -77,10 +73,10 @@ you can run the following `helm template` command with your Consul on Kubernetes $ helm template --release-name ${RELEASE_NAME} -s templates/client-snapshot-agent-serviceaccount.yaml hashicorp/consul ``` -### Update the Consul on Kubernetes helm chart +## Update Consul on Kubernetes Helm chart Now that you have configured Vault, you can configure the Consul Helm chart to -use the snapshot agent config in Vault: +use the snapshot agent configuration in Vault: diff --git a/website/content/docs/k8s/installation/vault/data-integration/webhook-certs.mdx b/website/content/docs/k8s/installation/vault/data-integration/webhook-certs.mdx index 028ebb496..4615a040c 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/webhook-certs.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/webhook-certs.mdx @@ -21,7 +21,7 @@ When Vault is configured as the controller and connect inject Webhook Certificat To use Vault as the controller and connect inject Webhook Certificate Provider, we will need to modify the steps outlined in the [Data Integration](/docs/k8s/installation/vault/data-integration) section: -### Setup per Consul datacenter +These following steps will be repeated for each datacenter: 1. Create a Vault policy that authorizes the desired level of access to the secret. 1. (Added) Create Vault PKI roles for controller and connect inject each that establish the domains that each is allowed to issue certificates for. 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. @@ -33,11 +33,9 @@ Complete the following prerequisites prior to implementing the integration descr 1. You should be familiar with the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 1. Configure [Vault as the Server TLS Certificate Provider on Kubernetes](/docs/k8s/installation/vault/data-integration/server-tls) 1. Configure [Vault as the Service Mesh Certificate Provider on Kubernetes](/docs/k8s/installation/vault/data-integration/connect-ca) -1. Complete the [Bootstrapping the PKI Engine for Controller and Connect Inject Webhooks](#bootstrapping-the-pki-engine-for-controller-and-connect-inject-webhooks) section. -### Bootstrapping the PKI Engine for Controller and Connect Inject Webhooks - -The first step is to bootstrap the Vault cluster. Issue the following commands to enable and configure the PKI Secrets Engine to serve TLS certificates for the controller and connect inject webhooks: +## Bootstrapping the PKI Engines +Issue the following commands to enable and configure the PKI Secrets Engine to serve TLS certificates for the controller and connect inject webhooks: * Mount the PKI Secrets Engine for each: @@ -72,138 +70,124 @@ The first step is to bootstrap the Vault cluster. Issue the following commands t common_name="-connect-injector" \ ttl=87600h ``` -## Setup per Consul datacenter -You will need to preform the following steps for each datacenter that you would like to manage controller and connect inject webhook certificates in Vault. You will want to take care to create different names per datacenter for every pki mount, role, and policy. +## Create Vault Policies +1. Create a policy that allows `["create", "update"]` access to the +[certificate issuing URL](https://www.vaultproject.io/api/secret/pki#generate-certificate) so Consul controller and connect inject can fetch a new certificate/key pair and provide it to the Kubernetes `mutatingwebhookconfiguration`. -### Create a Vault policy that authorizes the desired level of access to the secret -To use Vault to issue controller or connect inject webhook certificates, you will need to create the Vault policies that will allow either controller or connect inject to access its respective certificate-issuing URL. + The path to the secret referenced in the `path` resource is the same value that you will configure in the `global.secretsBackend.vault.controller.tlsCert.secretName` and `global.secretsBackend.vault.connectInject.tlsCert.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). -#### Create Vault Policies for the Controller and Connect Inject Webhook Certificates + ```shell-session + $ vault policy write controller-tls-policy - < **Note:** The PKI secret paths referenced by the Vault Policies below will be your `global.secretsBackend.vault.controller.tlsCert.secretName` and `global.secretsBackend.vault.connectInject.tlsCert.secretName` Helm values respectively. + ```shell-session + $ vault policy write connect-inject-policy - <` for each use the following script as a template: + + ```shell-session + #!/bin/sh + + # NAME is set to either the value from `global.name` from your Consul K8s value file, or your $HELM_RELEASE_NAME-consul + export NAME=consulk8s + # NAMESPACE is where the Consul on Kubernetes is installed + export NAMESPACE=consul + # DATACENTER is the value of `global.datacenter` from your Helm values config file + export DATACENTER=dc1 + + echo allowed_domains_controller=\"${NAME}-controller-webhook,${NAME}-controller-webhook.${NAMESPACE},${NAME}-controller-webhook.${NAMESPACE}.svc,${NAME}-controller-webhook.${NAMESPACE}.svc.cluster.local\"" + + echo allowed_domains_connect_inject=\"${NAME}-connect-injector,${NAME}-connect-injector.${NAMESPACE},${NAME}-connect-injector.${NAMESPACE}.svc,${NAME}-connect-injector.${NAMESPACE}.svc.cluster.local\"" + ``` + +1. Finally, Kubernetes auth roles need to be created for controller and connect inject webhooks. -```shell-session -$ vault policy write controller-tls-policy - < \ + bound_service_account_namespaces= \ + policies=controller-ca-policy \ + ttl=1h + ``` --> **Note:** The PKI secret paths referenced by the Vault Policies below will be your `global.secretsBackend.vault.controller.caCert.secretName` and `global.secretsBackend.vault.connectInject.caCert.secretName` Helm values respectively. + To find out the service account name of the Consul controller, + you can run: -Next, create a policy that allows `["read"]` access to the [CA URL](https://www.vaultproject.io/api/secret/pki#read-certificate). The policy is required so that Consul components can communicate with the Consul servers in order to fetch their auto-encryption certificates. Issue the following commands to create the policy: + ```shell-session + $ helm template --release-name ${RELEASE_NAME} --show-only templates/controller-serviceaccount.yaml hashicorp/consul + ``` -```shell-session -$ vault policy write controller-ca-policy - < \ + bound_service_account_namespaces= \ + policies=connect-inject-ca-policy \ + ttl=1h + ``` -Issue the following command to create a Vault role for the controller PKI engine and set the default parameters for issuing certificates: + To find out the service account name of the Consul connect inject, use the command below. + ```shell-session + $ helm template --release-name ${RELEASE_NAME} --show-only templates/connect-inject-serviceaccount.yaml hashicorp/consul + ``` -```shell-session -$ vault write controller/roles/controller-role \ - allowed_domains="" \ - allow_subdomains=true \ - allow_bare_domains=true \ - allow_localhost=true \ - generate_lease=true \ - max_ttl="720h" -``` - -Issue the following command to create a Vault role for the connect inject PKI engine and set the default parameters for issuing certificates: - -```shell-session -$ vault write connect-inject/roles/connect-inject-role \ - allowed_domains="" \ - allow_subdomains=true \ - allow_bare_domains=true \ - allow_localhost=true \ - generate_lease=true \ - max_ttl="720h" -``` - -To generate the `` for each use the following script as a template: - -```shell-session -#!/bin/sh - -# NAME is set to either the value from `global.name` from your Consul K8s value file, or your $HELM_RELEASE_NAME-consul -export NAME=consulk8s -# NAMESPACE is where the Consul on Kubernetes is installed -export NAMESPACE=consul -# DATACENTER is the value of `global.datacenter` from your Helm values config file -export DATACENTER=dc1 - -echo allowed_domains_controller=\"${NAME}-controller-webhook,${NAME}-controller-webhook.${NAMESPACE},${NAME}-controller-webhook.${NAMESPACE}.svc,${NAME}-controller-webhook.${NAMESPACE}.svc.cluster.local\"" - -echo allowed_domains_connect_inject=\"${NAME}-connect-injector,${NAME}-connect-injector.${NAMESPACE},${NAME}-connect-injector.${NAMESPACE}.svc,${NAME}-connect-injector.${NAMESPACE}.svc.cluster.local\"" -``` - -### Create a Vault auth roles that link the policy to each Consul on Kubernetes service account that requires access - --> **Note:** The Vault auth roles below will be your `global.secretsBackend.vault.controllerRole` and `global.secretsBackend.vault.connectInjectRole` Helm values respectively. - - -Finally, Kubernetes auth roles need to be created for controller and connect inject webhooks. - -Role for Consul controller webhooks: -```shell-session -$ vault write auth/kubernetes/role/controller-role \ - bound_service_account_names= \ - bound_service_account_namespaces= \ - policies=controller-ca-policy \ - ttl=1h -``` - -To find out the service account name of the Consul controller, -you can run: - -```shell-session - $ helm template --release-name ${RELEASE_NAME} --show-only templates/controller-serviceaccount.yaml hashicorp/consul -``` - -Role for Consul connect inject webhooks: - -```shell-session -$ vault write auth/kubernetes/role/connect-inject-role \ - bound_service_account_names= \ - bound_service_account_namespaces= \ - policies=connect-inject-ca-policy \ - ttl=1h -``` - -To find out the service account name of the Consul connect inject, use the command below. -```shell-session - $ helm template --release-name ${RELEASE_NAME} --show-only templates/connect-inject-serviceaccount.yaml hashicorp/consul -``` - -### Update the Consul on Kubernetes helm chart +## Update Consul on Kubernetes Helm chart Now that we've configured Vault, you can configure the Consul Helm chart to use the Server TLS certificates from Vault: diff --git a/website/content/docs/k8s/installation/vault/systems-integration.mdx b/website/content/docs/k8s/installation/vault/systems-integration.mdx index 02e01b971..a48bcdb45 100644 --- a/website/content/docs/k8s/installation/vault/systems-integration.mdx +++ b/website/content/docs/k8s/installation/vault/systems-integration.mdx @@ -8,24 +8,20 @@ description: >- # Vault as the Secrets Backend - Systems Integration ## Overview -At a high level, configuring a systems integration of Vault with Consul on Kubernetes consists of 1) a one time setup on Vault and 2) a setup of the secrets backend per Consul datacenter via Helm. +Integrating Vault with Consul on Kubernetes includes a one-time setup on Vault and setting up the secrets backend for each Consul datacenter via Helm. -### One time setup on Vault +Complete the following steps once: - Enabling Vault KV Secrets Engine - Version 2 to store arbitrary secrets - Enabling Vault PKI Engine if you are choosing to store and manage either [Consul Server TLS credentials](/docs/k8s/installation/vault/data-integration/server-tls) or [Service Mesh and Consul client TLS credentials](/docs/k8s/installation/vault/data-integration/connect-ca) -### Setup per Consul datacenter +Repeat the following steps for each datacenter in the cluster: - Installing the Vault Injector within the Consul datacenter installation - Configuring a Kubernetes Auth Method in Vault to authenticate and authorize operations from the Consul datacenter - Enable Vault as the Secrets Backend in the Consul datacenter -## One time setup on Vault - -A one time setup on a Vault deployment is necessary to enable both the Vault KV Secrets Engine and the Vault PKI Engine. These docs assume that you have already setup a Vault cluster for use with Consul on Kubernetes. - Please read [Run Vault on Kubernetes](https://www.vaultproject.io/docs/platform/k8s/helm/run) if instructions on setting up a Vault cluster are needed. -### Vault KV Secrets Engine - Version 2 +## Vault KV Secrets Engine - Version 2 The following secrets can be stored in Vault KV secrets engine, which is meant to handle arbitrary secrets: - ACL Bootstrap token ([`global.acls.bootstrapToken`](/docs/k8s/helm#v-global-acls-bootstraptoken)) @@ -41,7 +37,7 @@ In order to store any of these secrets, we must enable the [Vault KV secrets eng $ vault secrets enable -path=consul kv-v2 ``` -### Vault PKI Engine +## Vault PKI Engine The Vault PKI Engine must be enabled in order to leverage Vault for issuing Consul Server TLS certificates. More details for configuring the PKI Engine is found in [Bootstrapping the PKI Engine](/docs/k8s/installation/vault/data-integration/server-tls#bootstrapping-the-pki-engine) under the Server TLS section. @@ -49,61 +45,83 @@ The Vault PKI Engine must be enabled in order to leverage Vault for issuing Cons $ vault secrets enable pki ``` -## Setup per Consul datacenter - -After configuring Vault, Consul datacenters on Kubernetes must be deployed with the Vault Agent injector and configured to leverage the Vault Kubernetes Auth Method to read secrets from a Vault cluster. - -### Set Environment Variables to ensure integration consistency +## Set Environment Variables Before installing the Vault Injector and configuring the Vault Kubernetes Auth Method, some environment variables need to be set to better ensure consistent mapping between Vault and Consul on Kubernetes. -#### DATACENTER - - - **Recommended value:** value of `global.datacenter` in your Consul Helm values file. - ```shell-session - $ export DATACENTER=dc1 - ``` -#### VAULT_AUTH_METHOD_NAME - - - **Recommended value:** a concatenation of a `kubernetes-` prefix (to denote the auth method type) with `DATACENTER` environment variable. - ```shell-session - $ export VAULT_AUTH_METHOD_NAME=kubernetes-${DATACENTER} - ``` + - DATACENTER -#### VAULT_SERVER_HOST + We recommend using the value for `global.datacenter` in your Consul Helm values file for this variable. + ```shell-session + $ export DATACENTER=dc1 + ``` - - **Recommended value:** find the external IP address of your Vault cluster. - - If Vault is installed in a Kubernetes cluster, get the external IP or DNS name of the Vault server load balancer. - - On GKE or AKS, it'll be an IP: - ```shell-session - $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - ``` - - On EKS, it'll be a hostname: - ```shell-session - $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') - ``` - - If Vault is not running on Kubernetes, utilize the `api_addr` as defined in the Vault [High Availability Parameters](https://www.vaultproject.io/docs/configuration#high-availability-parameters) configuration: - ```shell-session - $ export VAULT_SERVER_HOST= - ``` + - VAULT_AUTH_METHOD_NAME + + We recommend using a concatenation of a `kubernetes-` prefix (to denote the auth method type) with the `DATACENTER` environment variable for this variable. + ```shell-session + $ export VAULT_AUTH_METHOD_NAME=kubernetes-${DATACENTER} + ``` -#### VAULT_ADDR + - VAULT_SERVER_HOST + + We recommend using the external IP address of your Vault cluster for this variable. + + If Vault is installed in a Kubernetes cluster, get the external IP or DNS name of the Vault server load balancer. + + + + On EKS, you can get the hostname of the Vault server's load balancer with the following command: - - **Recommended value:** Connecting to port 8200 of the Vault server + ```shell-session + $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + ``` + + + + + + On GKE, you can get the IP address of the Vault server's load balancer with the following command: + + ```shell-session + $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + ``` + + + + + + On AKS, you can get the IP address of the Vault server's load balancer with the following command: + + ```shell-session + $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 --output jsonpath='{.status.loadBalancer.ingress[0].ip}') + ``` + + + + + If Vault is not running on Kubernetes, utilize the `api_addr` as defined in the Vault [High Availability Parameters](https://www.vaultproject.io/docs/configuration#high-availability-parameters) configuration: + ```shell-session + $ export VAULT_SERVER_HOST= + ``` + + - VAULT_AUTH_METHOD_NAME + + We recommend connecting to port 8200 of the Vault server. ```shell-session $ export VAULT_ADDR=http://${VAULT_SERVER_HOST}:8200 ``` - -> **Note:** If your vault installation is current exposed using SSL, this address will need to use `https` instead of `http`. You will also need to setup the [`VAULT_CACERT`](https://www.vaultproject.io/docs/commands#vault_cacert) environment variable. + If your vault installation is current exposed using SSL, this address will need to use `https` instead of `http`. You will also need to setup the [`VAULT_CACERT`](https://www.vaultproject.io/docs/commands#vault_cacert) environment variable. -#### VAULT_TOKEN - - - **Recommended value:** Your allocated Vault token. If running Vault in dev mode, this can be set to to `root`. + - VAULT_TOKEN + + We recommend using your allocated Vault token as the value for this variable. If running Vault in dev mode, this can be set to to `root`. ```shell-session - $ export VAULT_ADDR= + $ export VAULT_TOKEN= ``` -### Install Vault Injector in your Consul k8s cluster +## Install Vault Injector in Consul k8s cluster A minimal valid installation of Vault Kubernetes must include the Agent Injector which is utilized for accessing secrets from Vault. Vault servers could be deployed external to Vault on Kubernetes with the [`injector.externalVaultAddr`](https://www.vaultproject.io/docs/platform/k8s/helm/configuration#externalvaultaddr) value in the Vault Helm Configuration. @@ -125,9 +143,7 @@ Issue the Helm `install` command to install the Vault agent injector using the H $ helm install vault-${DATACENTER} -f vault-injector.yaml hashicorp/vault --wait ``` -### Configure the Kubernetes Auth Method in Vault for the datacenter - -#### Enable the Auth Method +## Configure the Kubernetes Auth Method in Vault Ensure that the Vault Kubernetes Auth method is enabled. @@ -135,8 +151,6 @@ Ensure that the Vault Kubernetes Auth method is enabled. $ vault auth enable -path=kubernetes-${DATACENTER} kubernetes ``` -#### Configure Auth Method with JWT token of service account - After enabling the Kubernetes auth method, in Vault, ensure that you have configured the Kubernetes Auth method properly as described in [Kubernetes Auth Method Configuration](https://www.vaultproject.io/docs/auth/kubernetes#configuration). First, while targeting your Consul cluster, get the externally reachable address of the Consul Kubernetes cluster. @@ -157,7 +171,7 @@ $ vault write auth/kubernetes/config \ kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt ``` -#### Enable Vault as the Secrets Backend in the Consul datacenter +## Update Vault Helm chart Finally, you will configure the Consul on Kubernetes helm chart for the datacenter to expect to receive the following values (if you have configured them) to be retrieved from Vault: - ACL Bootstrap token ([`global.acls.bootstrapToken`](/docs/k8s/helm#v-global-acls-bootstraptoken)) - ACL Partition token ([`global.acls.partitionToken`](/docs/k8s/helm#v-global-acls-partitiontoken)) diff --git a/website/content/docs/k8s/installation/vault/wan-federation.mdx b/website/content/docs/k8s/installation/vault/wan-federation.mdx index b454dbec7..37a13d64e 100644 --- a/website/content/docs/k8s/installation/vault/wan-federation.mdx +++ b/website/content/docs/k8s/installation/vault/wan-federation.mdx @@ -78,7 +78,7 @@ In this setup, you will deploy Vault server in the primary datacenter (dc1) Kube - On EKS, you can get the IP address of the Vault server's load balancer with the following command: + On EKS, you can get the hostname of the Vault server's load balancer with the following command: ```shell-session $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') @@ -114,17 +114,18 @@ In this setup, you will deploy Vault server in the primary datacenter (dc1) Kube ``` ## Systems Integration -### Overview -To use Vault as the Service Mesh Certificate Provider in Kubernetes, you must complete following systems integration actions: +There are two main procedures to enable Vault as the service mesh certificate provider in Kubernetes. -- One time setup in Vault +Complete the following steps once: 1. Enabling Vault KV Secrets Engine - Version 2. 1. Enabling Vault PKI Engine. -- Setup per Consul datacenter + +Repeat the following steps for each datacenter in the cluster: 1. Installing the Vault Injector within the Consul datacenter installation 1. Configuring a Kubernetes Auth Method in Vault to authenticate and authorize operations from the Consul datacenter 1. Enable Vault as the Secrets Backend in the Consul datacenter -### One time setup on Vault + +### Configure Vault Secrets engines 1. Enable [Vault KV secrets engine - Version 2](https://www.vaultproject.io/docs/secrets/kv/kv-v2) in order to store the [Gossip Encryption Key](/docs/k8s/helm#v-global-acls-replicationtoken) and the ACL Replication token ([`global.acls.replicationToken`](/docs/k8s/helm#v-global-acls-replicationtoken)). ```shell-session @@ -141,8 +142,7 @@ To use Vault as the Service Mesh Certificate Provider in Kubernetes, you must co $ vault secrets tune -max-lease-ttl=87600h pki ``` -### Setup per Consul datacenter -#### Primary Datacenter (dc1) +### Primary Datacenter (dc1) 1. Install the Vault Injector in your Consul Kubernetes cluster (dc1), which is used for accessing secrets from Vault. -> **Note**: In the primary datacenter (dc1), you will not have to configure `injector.externalvaultaddr` value because the Vault server is in the same primary datacenter (dc1) cluster. @@ -198,7 +198,7 @@ To use Vault as the Service Mesh Certificate Provider in Kubernetes, you must co -#### Secondary Datacenter (dc2) +### Secondary Datacenter (dc2) 1. Install the Vault Injector in the secondary datacenter (dc2). In the secondary datacenter (dc2), you will configure the `externalvaultaddr` value point to the external address of the Vault server in the primary datacenter (dc1). @@ -300,18 +300,17 @@ To use Vault as the Service Mesh Certificate Provider in Kubernetes, you must co ## Data Integration -### Overview -To use Vault as the Service Mesh Certificate Provider in Kubernetes, you must complete following data integration actions: +There are two main procedures for using Vault as the service mesh certificate provider in Kubernetes. - -- One time setup in Vault +Complete the following steps once: 1. Store the secrets in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secrets. -- Setup per Consul datacenter + +Repeat the following steps for each datacenter in the cluster: 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Update the Consul on Kubernetes helm chart. -### One time setup in Vault +### Secrets and Policies 1. Store the ACL Replication Token, Gossip Encryption Key, and Root CA certificate secrets in Vault. ```shell-session @@ -351,13 +350,12 @@ To use Vault as the Service Mesh Certificate Provider in Kubernetes, you must co EOF ``` -### Setup per Consul datacenter -#### Pre-installation for Primary Datacenter (dc1) +### Pre-installation for Primary Datacenter (dc1) 1. Change your Kubernetes context to target the primary datacenter (dc1): ```shell-session $ kubectl config use-context ``` -#### Primary Datacenter (dc1) +### Primary Datacenter (dc1) 1. Create Server TLS and Service Mesh Cert Policies ```shell-session @@ -491,7 +489,7 @@ To use Vault as the Service Mesh Certificate Provider in Kubernetes, you must co $ helm install consul-dc1 --values consul-dc1.yaml hashicorp/consul ``` -#### Pre-installation for Secondary Datacenter (dc2) +### Pre-installation for Secondary Datacenter (dc2) 1. Update the Consul on Kubernetes helm chart. For secondary datacenter (dc2), you will need to get the address of the mesh gateway from the **primary datacenter (dc1)** cluster. Keep your Kubernetes context targeting dc1 and set the `MESH_GW_HOST` environment variable that you will use in the Consul Helm chart for secondary datacenter (dc2). @@ -532,7 +530,7 @@ To use Vault as the Service Mesh Certificate Provider in Kubernetes, you must co ```shell-session $ kubectl config use-context ``` -#### Secondary Datacenter (dc2) +### Secondary Datacenter (dc2) 1. Create Server TLS and Service Mesh Cert Policies From 5eaab0efcbbfe263a1efabd93e66eb5b9048f6de Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Fri, 15 Jul 2022 09:56:33 -0700 Subject: [PATCH 017/107] peering: add warning about AllowStaleRead (#13768) --- .../tools/proto-gen-rpc-glue/e2e/source.pb.go | 5 ++ .../e2e/source.rpcglue.pb.go.golden | 44 +++++++++++++++ internal/tools/proto-gen-rpc-glue/main.go | 55 +++++++++++++++++++ proto/pbpeering/peering.pb.go | 4 +- proto/pbpeering/peering.proto | 4 +- proto/pbpeering/peering.rpcglue.pb.go | 6 ++ 6 files changed, 114 insertions(+), 4 deletions(-) diff --git a/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go b/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go index 4d24ab328..f90deb206 100644 --- a/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go +++ b/internal/tools/proto-gen-rpc-glue/e2e/source.pb.go @@ -43,6 +43,11 @@ type ExampleReadTODO struct { Value string } +// @consul-rpc-glue: LeaderReadTODO +type ExampleLeaderReadTODO struct { + Value string +} + // @consul-rpc-glue: WriteTODO type ExampleWriteTODO struct { Value string diff --git a/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden b/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden index c4c8e1e7f..4c7b1c361 100644 --- a/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden +++ b/internal/tools/proto-gen-rpc-glue/e2e/source.rpcglue.pb.go.golden @@ -308,6 +308,50 @@ func (msg *ExampleReadTODO) Token() string { return "" } +// IsRead implements structs.RPCInfo +func (msg *ExampleLeaderReadTODO) IsRead() bool { + // TODO(peering): figure out read semantics here + return true +} + +// AllowStaleRead implements structs.RPCInfo +func (msg *ExampleLeaderReadTODO) AllowStaleRead() bool { + // TODO(peering): figure out read semantics here + // TODO(peering): this needs to stay false for calls to head to the leader until we sync stream tracker information + // like ImportedServicesCount, ExportedServicesCount, as well as general Status fields thru raft to make available + // to followers as well + return false +} + +// HasTimedOut implements structs.RPCInfo +func (msg *ExampleLeaderReadTODO) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + // TODO(peering): figure out read semantics here + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *ExampleLeaderReadTODO) Timeout(rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) time.Duration { + // TODO(peering): figure out read semantics here + return rpcHoldTimeout +} + +// SetTokenSecret implements structs.RPCInfo +func (msg *ExampleLeaderReadTODO) SetTokenSecret(s string) { + // TODO(peering): figure out read semantics here +} + +// TokenSecret implements structs.RPCInfo +func (msg *ExampleLeaderReadTODO) TokenSecret() string { + // TODO(peering): figure out read semantics here + return "" +} + +// Token implements structs.RPCInfo +func (msg *ExampleLeaderReadTODO) Token() string { + // TODO(peering): figure out read semantics here + return "" +} + // IsRead implements structs.RPCInfo func (msg *ExampleWriteTODO) IsRead() bool { // TODO(peering): figure out write semantics here diff --git a/internal/tools/proto-gen-rpc-glue/main.go b/internal/tools/proto-gen-rpc-glue/main.go index cbb334039..ec1f2d93c 100644 --- a/internal/tools/proto-gen-rpc-glue/main.go +++ b/internal/tools/proto-gen-rpc-glue/main.go @@ -108,6 +108,9 @@ func processFile(path string) error { if ann.ReadTODO != "" { log.Printf(" ReadTODO from %s", ann.ReadTODO) } + if ann.LeaderReadTODO != "" { + log.Printf(" LeaderReadTODO from %s", ann.ReadTODO) + } if ann.WriteTODO != "" { log.Printf(" WriteTODO from %s", ann.WriteTODO) } @@ -157,6 +160,9 @@ var _ time.Month if typ.Annotation.Datacenter != "" { buf.WriteString(fmt.Sprintf(tmplDatacenter, typ.Name, typ.Annotation.Datacenter)) } + if typ.Annotation.LeaderReadTODO != "" { + buf.WriteString(fmt.Sprintf(tmplLeaderOnlyReadTODO, typ.Name, typ.Annotation.ReadTODO)) + } if typ.Annotation.ReadTODO != "" { buf.WriteString(fmt.Sprintf(tmplReadTODO, typ.Name, typ.Annotation.ReadTODO)) } @@ -266,6 +272,7 @@ type Annotation struct { TargetDatacenter string Datacenter string ReadTODO string + LeaderReadTODO string WriteTODO string } @@ -319,6 +326,8 @@ func getAnnotation(doc []*ast.Comment) (Annotation, error) { ann.ReadTODO = "ReadTODO" case part == "WriteTODO": ann.WriteTODO = "WriteTODO" + case part == "LeaderReadTODO": + ann.LeaderReadTODO = "LeaderReadTODO" default: return Annotation{}, fmt.Errorf("unexpected annotation part: %s", part) @@ -463,6 +472,52 @@ func (msg *%[1]s) Token() string { } ` +const tmplLeaderOnlyReadTODO = ` +// IsRead implements structs.RPCInfo +func (msg *%[1]s) IsRead() bool { + // TODO(peering): figure out read semantics here + return true +} + +// AllowStaleRead implements structs.RPCInfo +func (msg *%[1]s) AllowStaleRead() bool { + // TODO(peering): figure out read semantics here + // TODO(peering): this needs to stay false for calls to head to the leader until we sync stream tracker information + // like ImportedServicesCount, ExportedServicesCount, as well as general Status fields thru raft to make available + // to followers as well + return false +} + +// HasTimedOut implements structs.RPCInfo +func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { + // TODO(peering): figure out read semantics here + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *%[1]s) Timeout(rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) time.Duration { + // TODO(peering): figure out read semantics here + return rpcHoldTimeout +} + +// SetTokenSecret implements structs.RPCInfo +func (msg *%[1]s) SetTokenSecret(s string) { + // TODO(peering): figure out read semantics here +} + +// TokenSecret implements structs.RPCInfo +func (msg *%[1]s) TokenSecret() string { + // TODO(peering): figure out read semantics here + return "" +} + +// Token implements structs.RPCInfo +func (msg *%[1]s) Token() string { + // TODO(peering): figure out read semantics here + return "" +} +` + const tmplReadTODO = ` // IsRead implements structs.RPCInfo func (msg *%[1]s) IsRead() bool { diff --git a/proto/pbpeering/peering.pb.go b/proto/pbpeering/peering.pb.go index 336808b72..8fc14b83e 100644 --- a/proto/pbpeering/peering.pb.go +++ b/proto/pbpeering/peering.pb.go @@ -379,7 +379,7 @@ func (x *PeeringTrustBundle) GetModifyIndex() uint64 { return 0 } -// @consul-rpc-glue: Datacenter,ReadTODO +// @consul-rpc-glue: Datacenter,LeaderReadTODO type PeeringReadRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -490,7 +490,7 @@ func (x *PeeringReadResponse) GetPeering() *Peering { return nil } -// @consul-rpc-glue: Datacenter,ReadTODO +// @consul-rpc-glue: Datacenter,LeaderReadTODO type PeeringListRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/proto/pbpeering/peering.proto b/proto/pbpeering/peering.proto index 62679bbca..44df1df15 100644 --- a/proto/pbpeering/peering.proto +++ b/proto/pbpeering/peering.proto @@ -136,7 +136,7 @@ message PeeringTrustBundle { uint64 ModifyIndex = 7; } -// @consul-rpc-glue: Datacenter,ReadTODO +// @consul-rpc-glue: Datacenter,LeaderReadTODO message PeeringReadRequest { string Name = 1; string Partition = 2; @@ -152,7 +152,7 @@ message PeeringReadResponse { //TODO(peering) query metadata } -// @consul-rpc-glue: Datacenter,ReadTODO +// @consul-rpc-glue: Datacenter,LeaderReadTODO message PeeringListRequest { string Partition = 1; diff --git a/proto/pbpeering/peering.rpcglue.pb.go b/proto/pbpeering/peering.rpcglue.pb.go index 33700e591..4c3c4dff0 100644 --- a/proto/pbpeering/peering.rpcglue.pb.go +++ b/proto/pbpeering/peering.rpcglue.pb.go @@ -29,6 +29,9 @@ func (msg *PeeringReadRequest) IsRead() bool { // AllowStaleRead implements structs.RPCInfo func (msg *PeeringReadRequest) AllowStaleRead() bool { // TODO(peering): figure out read semantics here + // TODO(peering): this needs to stay false for calls to head to the leader until we sync stream tracker information + // like ImportedServicesCount, ExportedServicesCount, as well as general Status fields thru raft to make available + // to followers as well return false } @@ -78,6 +81,9 @@ func (msg *PeeringListRequest) IsRead() bool { // AllowStaleRead implements structs.RPCInfo func (msg *PeeringListRequest) AllowStaleRead() bool { // TODO(peering): figure out read semantics here + // TODO(peering): this needs to stay false for calls to head to the leader until we sync stream tracker information + // like ImportedServicesCount, ExportedServicesCount, as well as general Status fields thru raft to make available + // to followers as well return false } From d9643ca4995285b72c46cf1f9b0c8d9b891bb4c5 Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Fri, 15 Jul 2022 09:58:21 -0700 Subject: [PATCH 018/107] Latest submodule versions (#13750) --- api/go.mod | 2 +- go.mod | 4 ++-- test/integration/consul-container/go.mod | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/go.mod b/api/go.mod index 33d9c558c..5511d7d7e 100644 --- a/api/go.mod +++ b/api/go.mod @@ -6,7 +6,7 @@ replace github.com/hashicorp/consul/sdk => ../sdk require ( github.com/google/go-cmp v0.5.7 - github.com/hashicorp/consul/sdk v0.8.0 + github.com/hashicorp/consul/sdk v0.10.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-hclog v0.12.0 github.com/hashicorp/go-rootcerts v1.0.2 diff --git a/go.mod b/go.mod index 0f71dff66..84e9d1d18 100644 --- a/go.mod +++ b/go.mod @@ -26,8 +26,8 @@ require ( github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4 - github.com/hashicorp/consul/api v1.11.0 - github.com/hashicorp/consul/sdk v0.8.0 + github.com/hashicorp/consul/api v1.13.1 + github.com/hashicorp/consul/sdk v0.10.0 github.com/hashicorp/go-bexpr v0.1.2 github.com/hashicorp/go-checkpoint v0.5.0 github.com/hashicorp/go-cleanhttp v0.5.1 diff --git a/test/integration/consul-container/go.mod b/test/integration/consul-container/go.mod index 182ff2e3a..6290d8141 100644 --- a/test/integration/consul-container/go.mod +++ b/test/integration/consul-container/go.mod @@ -5,7 +5,7 @@ go 1.18 require ( github.com/docker/docker v20.10.11+incompatible github.com/hashicorp/consul/api v1.11.0 - github.com/hashicorp/consul/sdk v0.8.0 + github.com/hashicorp/consul/sdk v0.10.0 github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/hcl v1.0.0 github.com/stretchr/testify v1.7.0 From 70ad4804b6be74ebcdc6eb5e4cb1ca640601a5a7 Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Fri, 15 Jul 2022 10:20:43 -0700 Subject: [PATCH 019/107] peering: track imported services (#13718) --- agent/consul/leader_peering_test.go | 277 +++++++++++++++++- .../services/peerstream/replication.go | 33 ++- .../services/peerstream/stream_resources.go | 2 +- .../services/peerstream/stream_test.go | 77 ++++- .../services/peerstream/stream_tracker.go | 33 ++- agent/rpc/peering/service.go | 43 ++- 6 files changed, 422 insertions(+), 43 deletions(-) diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index 222b59279..1587fc30c 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" + "github.com/hashicorp/consul/types" ) func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) { @@ -309,11 +310,6 @@ func insertTestPeeringData(t *testing.T, store *state.Store, peer string, lastId Node: "aaa", PeerName: peer, }, - { - CheckID: structs.SerfCheckID, - Node: "aaa", - PeerName: peer, - }, }, })) @@ -336,11 +332,6 @@ func insertTestPeeringData(t *testing.T, store *state.Store, peer string, lastId Node: "bbb", PeerName: peer, }, - { - CheckID: structs.SerfCheckID, - Node: "bbb", - PeerName: peer, - }, }, })) @@ -363,13 +354,269 @@ func insertTestPeeringData(t *testing.T, store *state.Store, peer string, lastId Node: "ccc", PeerName: peer, }, - { - CheckID: structs.SerfCheckID, - Node: "ccc", - PeerName: peer, - }, }, })) return lastIdx } + +// TODO(peering): once we move away from leader only request for PeeringList, move this test to consul/server_test maybe +func TestLeader_Peering_ImportedServicesCount(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // TODO(peering): Configure with TLS + _, s1 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s1.dc1" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + }) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create a peering by generating a token + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + t.Cleanup(cancel) + + conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + req := pbpeering.GenerateTokenRequest{ + PeerName: "my-peer-s2", + } + resp, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + var ( + s2PeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d" + lastIdx = uint64(0) + ) + + // Bring up s2 and store s1's token so that it attempts to dial. + _, s2 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s2.dc2" + c.Datacenter = "dc2" + c.PrimaryDatacenter = "dc2" + }) + testrpc.WaitForLeader(t, s2.RPC, "dc2") + + // Simulate a peering initiation event by writing a peering with data from a peering token. + // Eventually the leader in dc2 should dial and connect to the leader in dc1. + p := &pbpeering.Peering{ + ID: s2PeerID, + Name: "my-peer-s1", + PeerID: token.PeerID, + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p.ShouldDial()) + + lastIdx++ + require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, p)) + + /// add services to S1 to be synced to S2 + lastIdx++ + require.NoError(t, s1.FSM().State().EnsureRegistration(lastIdx, &structs.RegisterRequest{ + ID: types.NodeID(generateUUID()), + Node: "aaa", + Address: "10.0.0.1", + Service: &structs.NodeService{ + Service: "a-service", + ID: "a-service-1", + Port: 8080, + }, + Checks: structs.HealthChecks{ + { + CheckID: "a-service-1-check", + ServiceName: "a-service", + ServiceID: "a-service-1", + Node: "aaa", + }, + }, + })) + + lastIdx++ + require.NoError(t, s1.FSM().State().EnsureRegistration(lastIdx, &structs.RegisterRequest{ + ID: types.NodeID(generateUUID()), + + Node: "bbb", + Address: "10.0.0.2", + Service: &structs.NodeService{ + Service: "b-service", + ID: "b-service-1", + Port: 8080, + }, + Checks: structs.HealthChecks{ + { + CheckID: "b-service-1-check", + ServiceName: "b-service", + ServiceID: "b-service-1", + Node: "bbb", + }, + }, + })) + + lastIdx++ + require.NoError(t, s1.FSM().State().EnsureRegistration(lastIdx, &structs.RegisterRequest{ + ID: types.NodeID(generateUUID()), + + Node: "ccc", + Address: "10.0.0.3", + Service: &structs.NodeService{ + Service: "c-service", + ID: "c-service-1", + Port: 8080, + }, + Checks: structs.HealthChecks{ + { + CheckID: "c-service-1-check", + ServiceName: "c-service", + ServiceID: "c-service-1", + Node: "ccc", + }, + }, + })) + /// finished adding services + + type testCase struct { + name string + description string + exportedService structs.ExportedServicesConfigEntry + expectedImportedServicesCount uint64 + } + + testCases := []testCase{ + { + name: "wildcard", + description: "for a wildcard exported services, we want to see all services synced", + exportedService: structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: structs.WildcardSpecifier, + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peer-s2", + }, + }, + }, + }, + }, + expectedImportedServicesCount: 4, // 3 services from above + the "consul" service + }, + { + name: "no sync", + description: "update the config entry to allow no service sync", + exportedService: structs.ExportedServicesConfigEntry{ + Name: "default", + }, + expectedImportedServicesCount: 0, // we want to see this decremented from 4 --> 0 + }, + { + name: "just a, b services", + description: "export just two services", + exportedService: structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "a-service", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peer-s2", + }, + }, + }, + { + Name: "b-service", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peer-s2", + }, + }, + }, + }, + }, + expectedImportedServicesCount: 2, + }, + { + name: "unexport b service", + description: "by unexporting b we want to see the count decrement eventually", + exportedService: structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "a-service", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peer-s2", + }, + }, + }, + }, + }, + expectedImportedServicesCount: 1, + }, + { + name: "export c service", + description: "now export the c service and expect the count to increment", + exportedService: structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "a-service", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peer-s2", + }, + }, + }, + { + Name: "c-service", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peer-s2", + }, + }, + }, + }, + }, + expectedImportedServicesCount: 2, + }, + } + + conn2, err := grpc.DialContext(ctx, s2.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(s2.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn2.Close() + + peeringClient2 := pbpeering.NewPeeringServiceClient(conn2) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + lastIdx++ + require.NoError(t, s1.fsm.State().EnsureConfigEntry(lastIdx, &tc.exportedService)) + + retry.Run(t, func(r *retry.R) { + resp2, err := peeringClient2.PeeringList(ctx, &pbpeering.PeeringListRequest{}) + require.NoError(r, err) + require.NotEmpty(r, resp2.Peerings) + require.Equal(r, tc.expectedImportedServicesCount, resp2.Peerings[0].ImportedServiceCount) + }) + }) + } +} diff --git a/agent/grpc-external/services/peerstream/replication.go b/agent/grpc-external/services/peerstream/replication.go index 7ec58b5f4..e21b48a63 100644 --- a/agent/grpc-external/services/peerstream/replication.go +++ b/agent/grpc-external/services/peerstream/replication.go @@ -113,7 +113,9 @@ func marshalToProtoAny[T proto.Message](in any) (*anypb.Any, T, error) { func (s *Server) processResponse( peerName string, partition string, + mutableStatus *MutableStatus, resp *pbpeerstream.ReplicationMessage_Response, + logger hclog.Logger, ) (*pbpeerstream.ReplicationMessage, error) { if !pbpeerstream.KnownTypeURL(resp.ResourceURL) { err := fmt.Errorf("received response for unknown resource type %q", resp.ResourceURL) @@ -137,7 +139,7 @@ func (s *Server) processResponse( ), err } - if err := s.handleUpsert(peerName, partition, resp.ResourceURL, resp.ResourceID, resp.Resource); err != nil { + if err := s.handleUpsert(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID, resp.Resource, logger); err != nil { return makeNACKReply( resp.ResourceURL, resp.Nonce, @@ -149,7 +151,7 @@ func (s *Server) processResponse( return makeACKReply(resp.ResourceURL, resp.Nonce), nil case pbpeerstream.Operation_OPERATION_DELETE: - if err := s.handleDelete(peerName, partition, resp.ResourceURL, resp.ResourceID); err != nil { + if err := s.handleDelete(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID, logger); err != nil { return makeNACKReply( resp.ResourceURL, resp.Nonce, @@ -178,9 +180,11 @@ func (s *Server) processResponse( func (s *Server) handleUpsert( peerName string, partition string, + mutableStatus *MutableStatus, resourceURL string, resourceID string, resource *anypb.Any, + logger hclog.Logger, ) error { switch resourceURL { case pbpeerstream.TypeURLService: @@ -192,7 +196,16 @@ func (s *Server) handleUpsert( return fmt.Errorf("failed to unmarshal resource: %w", err) } - return s.handleUpdateService(peerName, partition, sn, csn) + err := s.handleUpdateService(peerName, partition, sn, csn) + if err != nil { + logger.Error("did not increment imported services count", "service_name", sn.String(), "error", err) + return err + } + + logger.Trace("incrementing imported services count", "service_name", sn.String()) + mutableStatus.TrackImportedService(sn) + + return nil case pbpeerstream.TypeURLRoots: roots := &pbpeering.PeeringTrustBundle{} @@ -425,14 +438,26 @@ func (s *Server) handleUpsertRoots( func (s *Server) handleDelete( peerName string, partition string, + mutableStatus *MutableStatus, resourceURL string, resourceID string, + logger hclog.Logger, ) error { switch resourceURL { case pbpeerstream.TypeURLService: sn := structs.ServiceNameFromString(resourceID) sn.OverridePartition(partition) - return s.handleUpdateService(peerName, partition, sn, nil) + + err := s.handleUpdateService(peerName, partition, sn, nil) + if err != nil { + logger.Error("did not decrement imported services count", "service_name", sn.String(), "error", err) + return err + } + + logger.Trace("decrementing imported services count", "service_name", sn.String()) + mutableStatus.RemoveImportedService(sn) + + return nil default: return fmt.Errorf("unexpected resourceURL: %s", resourceURL) diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index 611340082..eabd01141 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -302,7 +302,7 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { if resp := msg.GetResponse(); resp != nil { // TODO(peering): Ensure there's a nonce - reply, err := s.processResponse(streamReq.PeerName, streamReq.Partition, resp) + reply, err := s.processResponse(streamReq.PeerName, streamReq.Partition, status, resp, logger) if err != nil { logger.Error("failed to persist resource", "resourceURL", resp.ResourceURL, "resourceID", resp.ResourceID) status.TrackReceiveError(err.Error()) diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index de1455a63..612513158 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -475,6 +475,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { LastNack: lastNack, LastNackMessage: lastNackMsg, LastReceiveSuccess: lastRecvSuccess, + ImportedServices: map[string]struct{}{"api": {}}, } retry.Run(t, func(r *retry.R) { @@ -532,6 +533,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { LastReceiveSuccess: lastRecvSuccess, LastReceiveError: lastRecvError, LastReceiveErrorMessage: lastRecvErrorMsg, + ImportedServices: map[string]struct{}{"api": {}}, } retry.Run(t, func(r *retry.R) { @@ -559,6 +561,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { LastReceiveSuccess: lastRecvSuccess, LastReceiveErrorMessage: io.EOF.Error(), LastReceiveError: lastRecvError, + ImportedServices: map[string]struct{}{"api": {}}, } retry.Run(t, func(r *retry.R) { @@ -968,6 +971,9 @@ func (b *testStreamBackend) CatalogDeregister(req *structs.DeregisterRequest) er } func Test_processResponse_Validation(t *testing.T) { + peerName := "billing" + peerID := "1fabcd52-1d46-49b0-b1d8-71559aee47f5" + type testCase struct { name string in *pbpeerstream.ReplicationMessage_Response @@ -975,10 +981,18 @@ func Test_processResponse_Validation(t *testing.T) { wantErr bool } - srv, _ := newTestServer(t, nil) + srv, store := newTestServer(t, nil) + require.NoError(t, store.PeeringWrite(31, &pbpeering.Peering{ + ID: peerID, + Name: peerName}, + )) + + // connect the stream + mst, err := srv.Tracker.Connected(peerID) + require.NoError(t, err) run := func(t *testing.T, tc testCase) { - reply, err := srv.processResponse("", "", tc.in) + reply, err := srv.processResponse(peerName, "", mst, tc.in, srv.Logger) if tc.wantErr { require.Error(t, err) } else { @@ -1218,8 +1232,8 @@ func expectReplEvents(t *testing.T, client *MockClient, checkFns ...func(t *test } } -func TestHandleUpdateService(t *testing.T) { - srv, _ := newTestServer(t, func(c *Config) { +func Test_processResponse_handleUpsert_handleDelete(t *testing.T) { + srv, store := newTestServer(t, func(c *Config) { backend := c.Backend.(*testStreamBackend) backend.leader = func() bool { return false @@ -1227,13 +1241,15 @@ func TestHandleUpdateService(t *testing.T) { }) type testCase struct { - name string - seed []*structs.RegisterRequest - input *pbservice.IndexedCheckServiceNodes - expect map[string]structs.CheckServiceNodes + name string + seed []*structs.RegisterRequest + input *pbservice.IndexedCheckServiceNodes + expect map[string]structs.CheckServiceNodes + expectedImportedServicesCount int } peerName := "billing" + peerID := "1fabcd52-1d46-49b0-b1d8-71559aee47f5" remoteMeta := pbcommon.NewEnterpriseMetaFromStructs(*structs.DefaultEnterpriseMetaInPartition("billing-ap")) // "api" service is imported from the billing-ap partition, corresponding to the billing peer. @@ -1241,14 +1257,43 @@ func TestHandleUpdateService(t *testing.T) { defaultMeta := *acl.DefaultEnterpriseMeta() apiSN := structs.NewServiceName("api", &defaultMeta) + // create a peering in the state store + require.NoError(t, store.PeeringWrite(31, &pbpeering.Peering{ + ID: peerID, + Name: peerName}, + )) + + // connect the stream + mst, err := srv.Tracker.Connected(peerID) + require.NoError(t, err) + run := func(t *testing.T, tc testCase) { // Seed the local catalog with some data to reconcile against. + // and increment the tracker's imported services count for _, reg := range tc.seed { require.NoError(t, srv.Backend.CatalogRegister(reg)) + + mst.TrackImportedService(reg.Service.CompoundServiceName()) + } + + var op pbpeerstream.Operation + if len(tc.input.Nodes) == 0 { + op = pbpeerstream.Operation_OPERATION_DELETE + } else { + op = pbpeerstream.Operation_OPERATION_UPSERT + } + + in := &pbpeerstream.ReplicationMessage_Response{ + ResourceURL: pbpeerstream.TypeURLService, + ResourceID: apiSN.String(), + Nonce: "1", + Operation: op, + Resource: makeAnyPB(t, tc.input), } // Simulate an update arriving for billing/api. - require.NoError(t, srv.handleUpdateService(peerName, acl.DefaultPartitionName, apiSN, tc.input)) + _, err = srv.processResponse(peerName, acl.DefaultPartitionName, mst, in, srv.Logger) + require.NoError(t, err) for svc, expect := range tc.expect { t.Run(svc, func(t *testing.T) { @@ -1257,6 +1302,9 @@ func TestHandleUpdateService(t *testing.T) { requireEqualInstances(t, expect, got) }) } + + // assert the imported services count modifications + require.Equal(t, tc.expectedImportedServicesCount, mst.GetImportedServicesCount()) } tt := []testCase{ @@ -1390,6 +1438,7 @@ func TestHandleUpdateService(t *testing.T) { }, }, }, + expectedImportedServicesCount: 1, }, { name: "upsert two service instances to different nodes", @@ -1521,6 +1570,7 @@ func TestHandleUpdateService(t *testing.T) { }, }, }, + expectedImportedServicesCount: 1, }, { name: "receiving a nil input leads to deleting data in the catalog", @@ -1574,10 +1624,11 @@ func TestHandleUpdateService(t *testing.T) { }, }, }, - input: nil, + input: &pbservice.IndexedCheckServiceNodes{}, expect: map[string]structs.CheckServiceNodes{ "api": {}, }, + expectedImportedServicesCount: 0, }, { name: "deleting one service name from a node does not delete other service names", @@ -1632,7 +1683,7 @@ func TestHandleUpdateService(t *testing.T) { }, }, // Nil input is for the "api" service. - input: nil, + input: &pbservice.IndexedCheckServiceNodes{}, expect: map[string]structs.CheckServiceNodes{ "api": {}, // Existing redis service was not affected by deletion. @@ -1668,6 +1719,7 @@ func TestHandleUpdateService(t *testing.T) { }, }, }, + expectedImportedServicesCount: 1, }, { name: "service checks are cleaned up when not present in a response", @@ -1738,6 +1790,7 @@ func TestHandleUpdateService(t *testing.T) { }, }, }, + expectedImportedServicesCount: 2, }, { name: "node checks are cleaned up when not present in a response", @@ -1872,6 +1925,7 @@ func TestHandleUpdateService(t *testing.T) { }, }, }, + expectedImportedServicesCount: 2, }, { name: "replacing a service instance on a node cleans up the old instance", @@ -2019,6 +2073,7 @@ func TestHandleUpdateService(t *testing.T) { }, }, }, + expectedImportedServicesCount: 2, }, } diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index 5ec0f7ebf..4d4c8746c 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -4,9 +4,11 @@ import ( "fmt" "sync" "time" + + "github.com/hashicorp/consul/agent/structs" ) -// Tracker contains a map of (PeerID -> Status). +// Tracker contains a map of (PeerID -> MutableStatus). // As streams are opened and closed we track details about their status. type Tracker struct { mu sync.RWMutex @@ -142,6 +144,10 @@ type Status struct { // - The error message when we failed to store a resource replicated FROM the peer. // - The last error message when receiving from the stream. LastReceiveErrorMessage string + + // TODO(peering): consider keeping track of imported service counts thru raft + // ImportedServices is set that keeps track of which service names are imported for the peer + ImportedServices map[string]struct{} } func newMutableStatus(now func() time.Time) *MutableStatus { @@ -222,3 +228,28 @@ func (s *MutableStatus) GetStatus() Status { return copy } + +func (s *MutableStatus) RemoveImportedService(sn structs.ServiceName) { + s.mu.Lock() + defer s.mu.Unlock() + + delete(s.ImportedServices, sn.String()) +} + +func (s *MutableStatus) TrackImportedService(sn structs.ServiceName) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.ImportedServices == nil { + s.ImportedServices = make(map[string]struct{}) + } + + s.ImportedServices[sn.String()] = struct{}{} +} + +func (s *MutableStatus) GetImportedServicesCount() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return len(s.ImportedServices) +} diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 94b7d73a3..47e39c2b7 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -337,7 +337,17 @@ func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequ if peering == nil { return &pbpeering.PeeringReadResponse{Peering: nil}, nil } + cp := copyPeeringWithNewState(peering, s.reconciledStreamStateHint(peering.ID, peering.State)) + + // add imported services count + st, found := s.Tracker.StreamStatus(peering.ID) + if !found { + s.Logger.Trace("did not find peer in stream tracker when reading peer", "peerID", peering.ID) + } else { + cp.ImportedServiceCount = uint64(len(st.ImportedServices)) + } + return &pbpeering.PeeringReadResponse{Peering: cp}, nil } @@ -369,6 +379,15 @@ func (s *Server) PeeringList(ctx context.Context, req *pbpeering.PeeringListRequ var cPeerings []*pbpeering.Peering for _, p := range peerings { cp := copyPeeringWithNewState(p, s.reconciledStreamStateHint(p.ID, p.State)) + + // add imported services count + st, found := s.Tracker.StreamStatus(p.ID) + if !found { + s.Logger.Trace("did not find peer in stream tracker when listing peers", "peerID", p.ID) + } else { + cp.ImportedServiceCount = uint64(len(st.ImportedServices)) + } + cPeerings = append(cPeerings, cp) } return &pbpeering.PeeringListResponse{Peerings: cPeerings}, nil @@ -586,17 +605,19 @@ func (s *Server) getExistingOrCreateNewPeerID(peerName, partition string) (strin func copyPeeringWithNewState(p *pbpeering.Peering, state pbpeering.PeeringState) *pbpeering.Peering { return &pbpeering.Peering{ - ID: p.ID, - Name: p.Name, - Partition: p.Partition, - DeletedAt: p.DeletedAt, - Meta: p.Meta, - PeerID: p.PeerID, - PeerCAPems: p.PeerCAPems, - PeerServerAddresses: p.PeerServerAddresses, - PeerServerName: p.PeerServerName, - CreateIndex: p.CreateIndex, - ModifyIndex: p.ModifyIndex, + ID: p.ID, + Name: p.Name, + Partition: p.Partition, + DeletedAt: p.DeletedAt, + Meta: p.Meta, + PeerID: p.PeerID, + PeerCAPems: p.PeerCAPems, + PeerServerAddresses: p.PeerServerAddresses, + PeerServerName: p.PeerServerName, + CreateIndex: p.CreateIndex, + ModifyIndex: p.ModifyIndex, + ImportedServiceCount: p.ImportedServiceCount, + ExportedServiceCount: p.ExportedServiceCount, State: state, } From 61ebb380926cf809dab4090222be53461c40106c Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Fri, 15 Jul 2022 13:15:50 -0500 Subject: [PATCH 020/107] server: ensure peer replication can successfully use TLS over external gRPC (#13733) Ensure that the peer stream replication rpc can successfully be used with TLS activated. Also: - If key material is configured for the gRPC port but HTTPS is not enabled now TLS will still be activated for the gRPC port. - peerstream replication stream opened by the establishing-side will now ignore grpc.WithBlock so that TLS errors will bubble up instead of being awkwardly delayed or suppressed --- agent/agent.go | 7 +- agent/consul/leader_peering.go | 19 ++- agent/consul/leader_peering_test.go | 152 +++++++++++++++++- agent/consul/peering_backend.go | 2 +- agent/consul/server_test.go | 4 +- .../services/peerstream/stream_tracker.go | 33 +++- agent/rpc/peering/service_test.go | 2 +- .../envoy/case-wanfed-gw/primary/common.hcl | 16 +- .../envoy/case-wanfed-gw/primary/server.hcl | 16 +- .../envoy/case-wanfed-gw/secondary/common.hcl | 16 +- tlsutil/config.go | 9 +- .../docs/agent/config/config-files.mdx | 2 - .../docs/upgrading/upgrade-specific.mdx | 12 ++ 13 files changed, 244 insertions(+), 46 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 5412436e5..4b78b69a9 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -761,12 +761,7 @@ func (a *Agent) Failed() <-chan struct{} { } func (a *Agent) buildExternalGRPCServer() { - // TLS is only enabled on the gRPC server if there's an HTTPS port configured. - var tls *tlsutil.Configurator - if a.config.HTTPSPort > 0 { - tls = a.tlsConfigurator - } - a.externalGRPCServer = external.NewServer(a.logger.Named("grpc.external"), tls) + a.externalGRPCServer = external.NewServer(a.logger.Named("grpc.external"), a.tlsConfigurator) } func (a *Agent) listenAndServeGRPC() error { diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index 49369bbf7..28a8397df 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -6,6 +6,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "time" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" @@ -14,6 +15,7 @@ import ( "golang.org/x/time/rate" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/state" @@ -225,6 +227,11 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer retryCtx, cancel := context.WithCancel(ctx) cancelFns[peer.ID] = cancel + streamStatus, err := s.peerStreamTracker.Register(peer.ID) + if err != nil { + return fmt.Errorf("failed to register stream: %v", err) + } + // Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes. go retryLoopBackoff(retryCtx, func() error { // Try a new address on each iteration by advancing the ring buffer on errors. @@ -238,8 +245,15 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer logger.Trace("dialing peer", "addr", addr) conn, err := grpc.DialContext(retryCtx, addr, - grpc.WithBlock(), + // TODO(peering): use a grpc.WithStatsHandler here?) tlsOption, + // For keep alive parameters there is a larger comment in ClientConnPool.dial about that. + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 30 * time.Second, + Timeout: 10 * time.Second, + // send keepalive pings even if there is no active streams + PermitWithoutStream: true, + }), ) if err != nil { return fmt.Errorf("failed to dial: %w", err) @@ -277,8 +291,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer return err }, func(err error) { - // TODO(peering): These errors should be reported in the peer status, otherwise they're only in the logs. - // Lockable status isn't available here though. Could report it via the peering.Service? + streamStatus.TrackSendError(err.Error()) logger.Error("error managing peering stream", "peer_id", peer.ID, "error", err) }) diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index 1587fc30c..33ef26d61 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "encoding/json" + "io/ioutil" "testing" "time" @@ -21,15 +22,27 @@ import ( ) func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) { + t.Run("without-tls", func(t *testing.T) { + testLeader_PeeringSync_Lifecycle_ClientDeletion(t, false) + }) + t.Run("with-tls", func(t *testing.T) { + testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true) + }) +} +func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) { if testing.Short() { t.Skip("too slow for testing.Short") } - // TODO(peering): Configure with TLS _, s1 := testServerWithConfig(t, func(c *Config) { - c.NodeName = "s1.dc1" + c.NodeName = "bob" c.Datacenter = "dc1" c.TLSConfig.Domain = "consul" + if enableTLS { + c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt" + c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt" + c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key" + } }) testrpc.WaitForLeader(t, s1.RPC, "dc1") @@ -69,9 +82,14 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) { // Bring up s2 and store s1's token so that it attempts to dial. _, s2 := testServerWithConfig(t, func(c *Config) { - c.NodeName = "s2.dc2" + c.NodeName = "betty" c.Datacenter = "dc2" c.PrimaryDatacenter = "dc2" + if enableTLS { + c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt" + c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt" + c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key" + } }) testrpc.WaitForLeader(t, s2.RPC, "dc2") @@ -121,15 +139,27 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) { } func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) { + t.Run("without-tls", func(t *testing.T) { + testLeader_PeeringSync_Lifecycle_ServerDeletion(t, false) + }) + t.Run("with-tls", func(t *testing.T) { + testLeader_PeeringSync_Lifecycle_ServerDeletion(t, true) + }) +} +func testLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T, enableTLS bool) { if testing.Short() { t.Skip("too slow for testing.Short") } - // TODO(peering): Configure with TLS _, s1 := testServerWithConfig(t, func(c *Config) { - c.NodeName = "s1.dc1" + c.NodeName = "bob" c.Datacenter = "dc1" c.TLSConfig.Domain = "consul" + if enableTLS { + c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt" + c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt" + c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key" + } }) testrpc.WaitForLeader(t, s1.RPC, "dc1") @@ -165,9 +195,14 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) { // Bring up s2 and store s1's token so that it attempts to dial. _, s2 := testServerWithConfig(t, func(c *Config) { - c.NodeName = "s2.dc2" + c.NodeName = "betty" c.Datacenter = "dc2" c.PrimaryDatacenter = "dc2" + if enableTLS { + c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt" + c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt" + c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key" + } }) testrpc.WaitForLeader(t, s2.RPC, "dc2") @@ -216,6 +251,111 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) { }) } +func TestLeader_PeeringSync_FailsForTLSError(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Run("server-name-validation", func(t *testing.T) { + testLeader_PeeringSync_failsForTLSError(t, func(p *pbpeering.Peering) { + p.PeerServerName = "wrong.name" + }, `transport: authentication handshake failed: x509: certificate is valid for server.dc1.consul, bob.server.dc1.consul, not wrong.name`) + }) + t.Run("bad-ca-roots", func(t *testing.T) { + wrongRoot, err := ioutil.ReadFile("../../test/client_certs/rootca.crt") + require.NoError(t, err) + + testLeader_PeeringSync_failsForTLSError(t, func(p *pbpeering.Peering) { + p.PeerCAPems = []string{string(wrongRoot)} + }, `transport: authentication handshake failed: x509: certificate signed by unknown authority`) + }) +} + +func testLeader_PeeringSync_failsForTLSError(t *testing.T, peerMutateFn func(p *pbpeering.Peering), expectErr string) { + require.NotNil(t, peerMutateFn) + + _, s1 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "bob" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + + c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt" + c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt" + c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key" + }) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create a peering by generating a token + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + req := pbpeering.GenerateTokenRequest{ + PeerName: "my-peer-s2", + } + resp, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + // S1 should not have a stream tracked for dc2 because s1 generated a token + // for baz, and therefore needs to wait to be dialed. + time.Sleep(1 * time.Second) + _, found := s1.peerStreamServer.StreamStatus(token.PeerID) + require.False(t, found) + + var ( + s2PeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d" + ) + + // Bring up s2 and store s1's token so that it attempts to dial. + _, s2 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "betty" + c.Datacenter = "dc2" + c.PrimaryDatacenter = "dc2" + + c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt" + c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt" + c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key" + }) + testrpc.WaitForLeader(t, s2.RPC, "dc2") + + // Simulate a peering initiation event by writing a peering with data from a peering token. + // Eventually the leader in dc2 should dial and connect to the leader in dc1. + p := &pbpeering.Peering{ + ID: s2PeerID, + Name: "my-peer-s1", + PeerID: token.PeerID, + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + peerMutateFn(p) + require.True(t, p.ShouldDial()) + + // We maintain a pointer to the peering on the write so that we can get the ID without needing to re-query the state store. + require.NoError(t, s2.fsm.State().PeeringWrite(1000, p)) + + retry.Run(t, func(r *retry.R) { + status, found := s2.peerStreamTracker.StreamStatus(p.ID) + require.True(r, found) + require.False(r, status.Connected) + require.Contains(r, status.LastSendErrorMessage, expectErr) + }) +} + func TestLeader_Peering_DeferredDeletion(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/consul/peering_backend.go b/agent/consul/peering_backend.go index 4014bbdd2..589b4e95b 100644 --- a/agent/consul/peering_backend.go +++ b/agent/consul/peering_backend.go @@ -52,7 +52,7 @@ func (b *PeeringBackend) GetLeaderAddress() string { // GetAgentCACertificates gets the server's raw CA data from its TLS Configurator. func (b *PeeringBackend) GetAgentCACertificates() ([]string, error) { // TODO(peering): handle empty CA pems - return b.srv.tlsConfigurator.ManualCAPems(), nil + return b.srv.tlsConfigurator.GRPCManualCAPems(), nil } // GetServerAddresses looks up server node addresses from the state store. diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 77f761f68..b9f9cc4f1 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/consul/agent/connect" + external "github.com/hashicorp/consul/agent/grpc-external" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/structs" @@ -299,8 +300,7 @@ func newServerWithDeps(t *testing.T, c *Config, deps Deps) (*Server, error) { } } - srv, err := NewServer(c, deps, grpc.NewServer()) - + srv, err := NewServer(c, deps, external.NewServer(deps.Logger.Named("grpc.external"), deps.TLSConfigurator)) if err != nil { return nil, err } diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index 4d4c8746c..4244bbe09 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -33,16 +33,37 @@ func (t *Tracker) SetClock(clock func() time.Time) { } } +// Register a stream for a given peer but do not mark it as connected. +func (t *Tracker) Register(id string) (*MutableStatus, error) { + t.mu.Lock() + defer t.mu.Unlock() + status, _, err := t.registerLocked(id, false) + return status, err +} + +func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) { + status, ok := t.streams[id] + if !ok { + status = newMutableStatus(t.timeNow, initAsConnected) + t.streams[id] = status + return status, true, nil + } + return status, false, nil +} + // Connected registers a stream for a given peer, and marks it as connected. // It also enforces that there is only one active stream for a peer. func (t *Tracker) Connected(id string) (*MutableStatus, error) { t.mu.Lock() defer t.mu.Unlock() + return t.connectedLocked(id) +} - status, ok := t.streams[id] - if !ok { - status = newMutableStatus(t.timeNow) - t.streams[id] = status +func (t *Tracker) connectedLocked(id string) (*MutableStatus, error) { + status, newlyRegistered, err := t.registerLocked(id, true) + if err != nil { + return nil, err + } else if newlyRegistered { return status, nil } @@ -150,10 +171,10 @@ type Status struct { ImportedServices map[string]struct{} } -func newMutableStatus(now func() time.Time) *MutableStatus { +func newMutableStatus(now func() time.Time, connected bool) *MutableStatus { return &MutableStatus{ Status: Status{ - Connected: true, + Connected: connected, }, timeNow: now, doneCh: make(chan struct{}), diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index 6a8f32915..e4ab2947a 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -59,7 +59,7 @@ func TestPeeringService_GenerateToken(t *testing.T) { // TODO(peering): see note on newTestServer, refactor to not use this s := newTestServer(t, func(c *consul.Config) { c.SerfLANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.1" - c.TLSConfig.InternalRPC.CAFile = cafile + c.TLSConfig.GRPC.CAFile = cafile c.DataDir = dir }) client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) diff --git a/test/integration/connect/envoy/case-wanfed-gw/primary/common.hcl b/test/integration/connect/envoy/case-wanfed-gw/primary/common.hcl index c8c086fb4..d586b280b 100644 --- a/test/integration/connect/envoy/case-wanfed-gw/primary/common.hcl +++ b/test/integration/connect/envoy/case-wanfed-gw/primary/common.hcl @@ -1,6 +1,10 @@ -ca_file = "/workdir/primary/tls/consul-agent-ca.pem" -cert_file = "/workdir/primary/tls/primary-server-consul-0.pem" -key_file = "/workdir/primary/tls/primary-server-consul-0-key.pem" -verify_incoming = true -verify_outgoing = true -verify_server_hostname = true +tls { + internal_rpc { + ca_file = "/workdir/primary/tls/consul-agent-ca.pem" + cert_file = "/workdir/primary/tls/primary-server-consul-0.pem" + key_file = "/workdir/primary/tls/primary-server-consul-0-key.pem" + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + } +} diff --git a/test/integration/connect/envoy/case-wanfed-gw/primary/server.hcl b/test/integration/connect/envoy/case-wanfed-gw/primary/server.hcl index f3c0bc000..8dd5b39dc 100644 --- a/test/integration/connect/envoy/case-wanfed-gw/primary/server.hcl +++ b/test/integration/connect/envoy/case-wanfed-gw/primary/server.hcl @@ -3,9 +3,13 @@ connect { enabled = true enable_mesh_gateway_wan_federation = true } -ca_file = "/workdir/primary/tls/consul-agent-ca.pem" -cert_file = "/workdir/primary/tls/primary-server-consul-0.pem" -key_file = "/workdir/primary/tls/primary-server-consul-0-key.pem" -verify_incoming = true -verify_outgoing = true -verify_server_hostname = true +tls { + internal_rpc { + ca_file = "/workdir/primary/tls/consul-agent-ca.pem" + cert_file = "/workdir/primary/tls/primary-server-consul-0.pem" + key_file = "/workdir/primary/tls/primary-server-consul-0-key.pem" + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + } +} diff --git a/test/integration/connect/envoy/case-wanfed-gw/secondary/common.hcl b/test/integration/connect/envoy/case-wanfed-gw/secondary/common.hcl index 36bfc41c3..8e536a87a 100644 --- a/test/integration/connect/envoy/case-wanfed-gw/secondary/common.hcl +++ b/test/integration/connect/envoy/case-wanfed-gw/secondary/common.hcl @@ -1,6 +1,10 @@ -ca_file = "/workdir/secondary/tls/consul-agent-ca.pem" -cert_file = "/workdir/secondary/tls/secondary-server-consul-0.pem" -key_file = "/workdir/secondary/tls/secondary-server-consul-0-key.pem" -verify_incoming = true -verify_outgoing = true -verify_server_hostname = true +tls { + internal_rpc { + ca_file = "/workdir/secondary/tls/consul-agent-ca.pem" + cert_file = "/workdir/secondary/tls/secondary-server-consul-0.pem" + key_file = "/workdir/secondary/tls/secondary-server-consul-0-key.pem" + verify_incoming = true + verify_outgoing = true + verify_server_hostname = true + } +} diff --git a/tlsutil/config.go b/tlsutil/config.go index da85c2e72..7c9e6d2ad 100644 --- a/tlsutil/config.go +++ b/tlsutil/config.go @@ -88,7 +88,7 @@ type ProtocolConfig struct { // certificate authority. This is used to verify authenticity of server // nodes. // - // Note: this setting doesn't apply to the gRPC configuration, as Consul + // Note: this setting doesn't apply to the external gRPC configuration, as Consul // makes no outgoing connections using this protocol. VerifyOutgoing bool @@ -233,6 +233,13 @@ func (c *Configurator) ManualCAPems() []string { return c.internalRPC.manualCAPEMs } +// GRPCManualCAPems returns the currently loaded CAs for the gRPC in PEM format. +func (c *Configurator) GRPCManualCAPems() []string { + c.lock.RLock() + defer c.lock.RUnlock() + return c.grpc.manualCAPEMs +} + // Update updates the internal configuration which is used to generate // *tls.Config. // This function acquires a write lock because it writes the new config. diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index 5fb0c849c..4506d80ff 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -1998,8 +1998,6 @@ specially crafted certificate signed by the CA can be used to gain full access t - `grpc` ((#tls_grpc)) Provides settings for the gRPC/xDS interface. To enable the gRPC interface you must define a port via [`ports.grpc`](#grpc_port). - To enable TLS on the gRPC interface you also must define an HTTPS port via - [`ports.https`](#https_port). - `ca_file` ((#tls_grpc_ca_file)) Overrides [`tls.defaults.ca_file`](#tls_defaults_ca_file). diff --git a/website/content/docs/upgrading/upgrade-specific.mdx b/website/content/docs/upgrading/upgrade-specific.mdx index a9a72c3f9..ec0cf54d5 100644 --- a/website/content/docs/upgrading/upgrade-specific.mdx +++ b/website/content/docs/upgrading/upgrade-specific.mdx @@ -16,6 +16,18 @@ upgrade flow. ## Consul 1.13.0 +### gRPC TLS + +In prior Consul versions if HTTPS was enabled for the client API and exposed +via `ports { https = NUMBER }` then the same TLS material was used to encrypt +the gRPC port used for xDS. Now this is decoupled and activating TLS on the +gRPC endpoint is controlled solely with the gRPC section of the new +[`tls` stanza](/docs/agent/config/config-files#tls-configuration-reference). + +If you have not yet switched to the new `tls` stanza and were NOT using HTTPS +for the API then updating to Consul 1.13 will activate TLS for gRPC since the +deprecated TLS settings are used as defaults. + ### 1.9 Telemetry Compatibility #### Removing configuration options From 743cecc559139d4008ff5f8cbdd16ffa72acae43 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Fri, 15 Jul 2022 13:24:22 -0500 Subject: [PATCH 021/107] test: fix flaky test TestAPI_CatalogNodes (#13779) --- api/catalog_test.go | 58 +++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 33 deletions(-) diff --git a/api/catalog_test.go b/api/catalog_test.go index 2639be6f4..2c926d199 100644 --- a/api/catalog_test.go +++ b/api/catalog_test.go @@ -36,43 +36,35 @@ func TestAPI_CatalogNodes(t *testing.T) { s.WaitForSerfCheck(t) catalog := c.Catalog() + retry.Run(t, func(r *retry.R) { nodes, meta, err := catalog.Nodes(nil) - // We're not concerned about the createIndex of an agent - // Hence we're setting it to the default value - nodes[0].CreateIndex = 0 - if err != nil { - r.Fatal(err) - } - if meta.LastIndex < 2 { - r.Fatal("Last index must be greater than 1") - } - want := []*Node{ - { - ID: s.Config.NodeID, - Node: s.Config.NodeName, - Partition: splitDefaultPartition, - Address: "127.0.0.1", - Datacenter: "dc1", - TaggedAddresses: map[string]string{ - "lan": "127.0.0.1", - "lan_ipv4": "127.0.0.1", - "wan": "127.0.0.1", - "wan_ipv4": "127.0.0.1", - }, - Meta: map[string]string{ - "consul-network-segment": "", - }, - // CreateIndex will never always be meta.LastIndex - 1 - // The purpose of this test is not to test CreateIndex value of an agent - // rather to check if the client agent can get the correct number - // of agents with a particular service, KV pair, etc... - // Hence reverting this to the default value here. - CreateIndex: 0, - ModifyIndex: meta.LastIndex, + require.NoError(r, err) + require.Len(r, nodes, 1) + require.True(r, meta.LastIndex >= 1, "Last index must be greater than 1") + + // The raft indexes are not relevant for this test. + got := nodes[0] + got.CreateIndex = 0 + got.ModifyIndex = 0 + + want := &Node{ + ID: s.Config.NodeID, + Node: s.Config.NodeName, + Partition: splitDefaultPartition, + Address: "127.0.0.1", + Datacenter: "dc1", + TaggedAddresses: map[string]string{ + "lan": "127.0.0.1", + "lan_ipv4": "127.0.0.1", + "wan": "127.0.0.1", + "wan_ipv4": "127.0.0.1", + }, + Meta: map[string]string{ + "consul-network-segment": "", }, } - require.Equal(r, want, nodes) + require.Equal(r, want, got) }) } From a8721c33c5cd785446421eb66cd981c7fbcc6665 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Fri, 15 Jul 2022 11:58:33 -0700 Subject: [PATCH 022/107] peerstream: dialer should reconnect when stream closes (#13745) * peerstream: dialer should reconnect when stream closes If the stream is closed unexpectedly (i.e. when we haven't received a terminated message), the dialer should attempt to re-establish the stream. Previously, the `HandleStream` would return `nil` when the stream was closed. The caller then assumed the stream was terminated on purpose and so didn't reconnect when instead it was stopped unexpectedly and the dialer should have attempted to reconnect. --- agent/consul/leader_peering_test.go | 115 ++++++++++++++++++ .../services/peerstream/stream_resources.go | 9 +- 2 files changed, 122 insertions(+), 2 deletions(-) diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index 33ef26d61..aa720bd6b 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/types" @@ -423,6 +424,120 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) { }) } +// Test that the dialing peer attempts to reestablish connections when the accepting peer +// shuts down without sending a Terminated message. +// +// To test this, we start the two peer servers (accepting and dialing), set up peering, and then shut down +// the accepting peer. This terminates the connection without sending a Terminated message. +// We then restart the accepting peer (we actually spin up a new server with the same config and port) and then +// assert that the dialing peer reestablishes the connection. +func TestLeader_Peering_DialerReestablishesConnectionOnError(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // Reserve a gRPC port so we can restart the accepting server with the same port. + ports := freeport.GetN(t, 1) + acceptingServerPort := ports[0] + + _, acceptingServer := testServerWithConfig(t, func(c *Config) { + c.NodeName = "acceptingServer.dc1" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + c.GRPCPort = acceptingServerPort + }) + testrpc.WaitForLeader(t, acceptingServer.RPC, "dc1") + + // Create a peering by generating a token. + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err := grpc.DialContext(ctx, acceptingServer.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(acceptingServer.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + req := pbpeering.GenerateTokenRequest{ + PeerName: "my-peer-dialing-server", + } + resp, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + var ( + dialingServerPeerID = token.PeerID + acceptingServerPeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d" + ) + + // Bring up dialingServer and store acceptingServer's token so that it attempts to dial. + _, dialingServer := testServerWithConfig(t, func(c *Config) { + c.NodeName = "dialing-server.dc2" + c.Datacenter = "dc2" + c.PrimaryDatacenter = "dc2" + }) + testrpc.WaitForLeader(t, dialingServer.RPC, "dc2") + p := &pbpeering.Peering{ + ID: acceptingServerPeerID, + Name: "my-peer-accepting-server", + PeerID: token.PeerID, + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p.ShouldDial()) + require.NoError(t, dialingServer.fsm.State().PeeringWrite(1000, p)) + + // Wait for the stream to be connected. + retry.Run(t, func(r *retry.R) { + status, found := dialingServer.peerStreamServer.StreamStatus(p.ID) + require.True(r, found) + require.True(r, status.Connected) + }) + + // Wait until the dialing server has sent its roots over. This avoids a race condition where the accepting server + // shuts down, but the dialing server is still sending messages to the stream. When this happens, an error is raised + // which causes the stream to restart. + // In this test, we want to test what happens when the stream is closed when there are _no_ messages being sent. + retry.Run(t, func(r *retry.R) { + _, bundle, err := acceptingServer.fsm.State().PeeringTrustBundleRead(nil, state.Query{Value: "my-peer-dialing-server"}) + require.NoError(r, err) + require.NotNil(r, bundle) + }) + + // Shutdown the accepting server. + require.NoError(t, acceptingServer.Shutdown()) + // Have to manually shut down the gRPC server otherwise it stays bound to the port. + acceptingServer.externalGRPCServer.Stop() + + // Mimic the server restarting by starting a new server with the same config. + _, acceptingServerRestart := testServerWithConfig(t, func(c *Config) { + c.NodeName = "acceptingServer.dc1" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + c.GRPCPort = acceptingServerPort + }) + testrpc.WaitForLeader(t, acceptingServerRestart.RPC, "dc1") + + // Re-insert the peering state. + require.NoError(t, acceptingServerRestart.fsm.State().PeeringWrite(2000, &pbpeering.Peering{ + ID: dialingServerPeerID, + Name: "my-peer-dialing-server", + State: pbpeering.PeeringState_PENDING, + })) + + // The dialing peer should eventually reconnect. + retry.Run(t, func(r *retry.R) { + connStreams := acceptingServerRestart.peerStreamServer.ConnectedStreams() + require.Contains(r, connStreams, dialingServerPeerID) + }) +} + func insertTestPeeringData(t *testing.T, store *state.Store, peer string, lastIdx uint64) uint64 { lastIdx++ require.NoError(t, store.PeeringTrustBundleWrite(lastIdx, &pbpeering.PeeringTrustBundle{ diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index eabd01141..57bc350c0 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -258,8 +258,13 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { case msg, open := <-recvChan: if !open { - logger.Trace("no longer receiving data on the stream") - return nil + // The only time we expect the stream to end is when we've received a "Terminated" message. + // We handle the case of receiving the Terminated message below and then this function exits. + // So if the channel is closed while this function is still running then we haven't received a Terminated + // message which means we want to try and reestablish the stream. + // It's the responsibility of the caller of this function to reestablish the stream on error and so that's + // why we return an error here. + return fmt.Errorf("stream ended unexpectedly") } // NOTE: this code should have similar error handling to the From 3968f21339c0f8a77bb504ad45988f3d5e58ddda Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Fri, 15 Jul 2022 12:23:05 -0700 Subject: [PATCH 023/107] Add docs for peerStreamServer vs peeringServer. (#13781) --- agent/consul/server.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/agent/consul/server.go b/agent/consul/server.go index a5708e030..d4753bb3f 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -367,8 +367,9 @@ type Server struct { // peeringBackend is shared between the external and internal gRPC services for peering peeringBackend *PeeringBackend - // peerStreamServer is a server used to handle peering streams - peerStreamServer *peerstream.Server + // peerStreamServer is a server used to handle peering streams from external clusters. + peerStreamServer *peerstream.Server + // peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens. peeringServer *peering.Server peerStreamTracker *peerstream.Tracker From 7da65c02a6425477c37dfa92d6a937b126f9af05 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Fri, 15 Jul 2022 14:43:24 -0500 Subject: [PATCH 024/107] peerstream: fix test assertions (#13780) --- .../services/peerstream/stream_test.go | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index 612513158..1074f5960 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -469,13 +469,17 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { lastRecvSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() + api := structs.NewServiceName("api", nil) + expect := Status{ Connected: true, LastAck: lastSendSuccess, LastNack: lastNack, LastNackMessage: lastNackMsg, LastReceiveSuccess: lastRecvSuccess, - ImportedServices: map[string]struct{}{"api": {}}, + ImportedServices: map[string]struct{}{ + api.String(): {}, + }, } retry.Run(t, func(r *retry.R) { @@ -525,6 +529,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { lastRecvError = it.base.Add(time.Duration(sequence) * time.Second).UTC() lastRecvErrorMsg = `unsupported operation: "OPERATION_UNSPECIFIED"` + api := structs.NewServiceName("api", nil) + expect := Status{ Connected: true, LastAck: lastSendSuccess, @@ -533,7 +539,9 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { LastReceiveSuccess: lastRecvSuccess, LastReceiveError: lastRecvError, LastReceiveErrorMessage: lastRecvErrorMsg, - ImportedServices: map[string]struct{}{"api": {}}, + ImportedServices: map[string]struct{}{ + api.String(): {}, + }, } retry.Run(t, func(r *retry.R) { @@ -552,6 +560,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { sequence++ disconnectTime := it.base.Add(time.Duration(sequence) * time.Second).UTC() + api := structs.NewServiceName("api", nil) + expect := Status{ Connected: false, LastAck: lastSendSuccess, @@ -561,7 +571,9 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { LastReceiveSuccess: lastRecvSuccess, LastReceiveErrorMessage: io.EOF.Error(), LastReceiveError: lastRecvError, - ImportedServices: map[string]struct{}{"api": {}}, + ImportedServices: map[string]struct{}{ + api.String(): {}, + }, } retry.Run(t, func(r *retry.R) { From bec4df0679ac6d5f00199a8276b3c6ebcef318c4 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Fri, 15 Jul 2022 15:03:40 -0500 Subject: [PATCH 025/107] peerstream: require a resource subscription to receive updates of that type (#13767) This mimics xDS's discovery protocol where you must request a resource explicitly for the exporting side to send those events to you. As part of this I aligned the overall ResourceURL with the TypeURL that gets embedded into the encoded protobuf Any construct. The CheckServiceNodes is now wrapped in a better named "ExportedService" struct now. --- .../services/peerstream/replication.go | 51 +-- .../services/peerstream/stream_resources.go | 196 +++++++++-- .../services/peerstream/stream_test.go | 304 ++++++++++-------- .../peerstream/subscription_blocking.go | 14 + .../peerstream/subscription_manager.go | 37 ++- .../peerstream/subscription_manager_test.go | 23 +- .../services/peerstream/testing.go | 38 ++- proto/pbpeerstream/convert.go | 25 ++ proto/pbpeerstream/peerstream.pb.binary.go | 10 + proto/pbpeerstream/peerstream.pb.go | 289 ++++++++++------- proto/pbpeerstream/peerstream.proto | 6 + proto/pbpeerstream/types.go | 12 +- proto/pbservice/convert.go | 19 -- proto/prototest/testing.go | 57 ++++ 14 files changed, 736 insertions(+), 345 deletions(-) create mode 100644 proto/pbpeerstream/convert.go diff --git a/agent/grpc-external/services/peerstream/replication.go b/agent/grpc-external/services/peerstream/replication.go index e21b48a63..c69d705d3 100644 --- a/agent/grpc-external/services/peerstream/replication.go +++ b/agent/grpc-external/services/peerstream/replication.go @@ -5,10 +5,9 @@ import ( "fmt" "strings" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" "github.com/hashicorp/go-hclog" "google.golang.org/genproto/googleapis/rpc/code" + newproto "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/agent/cache" @@ -39,7 +38,16 @@ func makeServiceResponse( logger hclog.Logger, update cache.UpdateEvent, ) (*pbpeerstream.ReplicationMessage_Response, error) { - any, csn, err := marshalToProtoAny[*pbservice.IndexedCheckServiceNodes](update.Result) + csn, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + if !ok { + return nil, fmt.Errorf("invalid type for service response: %T", update.Result) + } + + export := &pbpeerstream.ExportedService{ + Nodes: csn.Nodes, + } + + any, err := anypb.New(export) if err != nil { return nil, fmt.Errorf("failed to marshal: %w", err) } @@ -53,9 +61,9 @@ func makeServiceResponse( // // We don't distinguish when these three things occurred, but it's safe to send a DELETE Op in all cases, so we do that. // Case #1 is a no-op for the importing peer. - if len(csn.Nodes) == 0 { + if len(export.Nodes) == 0 { return &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, // TODO(peering): Nonce management Nonce: "", ResourceID: serviceName, @@ -65,7 +73,7 @@ func makeServiceResponse( // If there are nodes in the response, we push them as an UPSERT operation. return &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, // TODO(peering): Nonce management Nonce: "", ResourceID: serviceName, @@ -84,7 +92,7 @@ func makeCARootsResponse( } return &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLRoots, + ResourceURL: pbpeerstream.TypeURLPeeringTrustBundle, // TODO(peering): Nonce management Nonce: "", ResourceID: "roots", @@ -97,13 +105,13 @@ func makeCARootsResponse( // the protobuf.Any type, the asserted T type, and any errors // during marshalling or type assertion. // `in` MUST be of type T or it returns an error. -func marshalToProtoAny[T proto.Message](in any) (*anypb.Any, T, error) { +func marshalToProtoAny[T newproto.Message](in any) (*anypb.Any, T, error) { typ, ok := in.(T) if !ok { var outType T return nil, typ, fmt.Errorf("input type is not %T: %T", outType, in) } - any, err := ptypes.MarshalAny(typ) + any, err := anypb.New(typ) if err != nil { return nil, typ, err } @@ -186,20 +194,23 @@ func (s *Server) handleUpsert( resource *anypb.Any, logger hclog.Logger, ) error { + if resource.TypeUrl != resourceURL { + return fmt.Errorf("mismatched resourceURL %q and Any typeUrl %q", resourceURL, resource.TypeUrl) + } + switch resourceURL { - case pbpeerstream.TypeURLService: + case pbpeerstream.TypeURLExportedService: sn := structs.ServiceNameFromString(resourceID) sn.OverridePartition(partition) - csn := &pbservice.IndexedCheckServiceNodes{} - if err := ptypes.UnmarshalAny(resource, csn); err != nil { + export := &pbpeerstream.ExportedService{} + if err := resource.UnmarshalTo(export); err != nil { return fmt.Errorf("failed to unmarshal resource: %w", err) } - err := s.handleUpdateService(peerName, partition, sn, csn) + err := s.handleUpdateService(peerName, partition, sn, export) if err != nil { - logger.Error("did not increment imported services count", "service_name", sn.String(), "error", err) - return err + return fmt.Errorf("did not increment imported services count for service=%q: %w", sn.String(), err) } logger.Trace("incrementing imported services count", "service_name", sn.String()) @@ -207,9 +218,9 @@ func (s *Server) handleUpsert( return nil - case pbpeerstream.TypeURLRoots: + case pbpeerstream.TypeURLPeeringTrustBundle: roots := &pbpeering.PeeringTrustBundle{} - if err := ptypes.UnmarshalAny(resource, roots); err != nil { + if err := resource.UnmarshalTo(roots); err != nil { return fmt.Errorf("failed to unmarshal resource: %w", err) } @@ -232,7 +243,7 @@ func (s *Server) handleUpdateService( peerName string, partition string, sn structs.ServiceName, - pbNodes *pbservice.IndexedCheckServiceNodes, + export *pbpeerstream.ExportedService, ) error { // Capture instances in the state store for reconciliation later. _, storedInstances, err := s.GetStore().CheckServiceNodes(nil, sn.Name, &sn.EnterpriseMeta, peerName) @@ -240,7 +251,7 @@ func (s *Server) handleUpdateService( return fmt.Errorf("failed to read imported services: %w", err) } - structsNodes, err := pbNodes.CheckServiceNodesToStruct() + structsNodes, err := export.CheckServiceNodesToStruct() if err != nil { return fmt.Errorf("failed to convert protobuf instances to structs: %w", err) } @@ -444,7 +455,7 @@ func (s *Server) handleDelete( logger hclog.Logger, ) error { switch resourceURL { - case pbpeerstream.TypeURLService: + case pbpeerstream.TypeURLExportedService: sn := structs.ServiceNameFromString(resourceID) sn.OverridePartition(partition) diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index 57bc350c0..26d5a7b00 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -9,7 +9,6 @@ import ( "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" "github.com/hashicorp/go-hclog" - "google.golang.org/genproto/googleapis/rpc/code" "google.golang.org/grpc/codes" grpcstatus "google.golang.org/grpc/status" @@ -99,11 +98,12 @@ func (s *Server) StreamResources(stream pbpeerstream.PeerStreamService_StreamRes } streamReq := HandleStreamRequest{ - LocalID: p.ID, - RemoteID: "", - PeerName: p.Name, - Partition: p.Partition, - Stream: stream, + LocalID: p.ID, + RemoteID: "", + PeerName: p.Name, + Partition: p.Partition, + InitialResourceURL: req.ResourceURL, + Stream: stream, } err = s.HandleStream(streamReq) // A nil error indicates that the peering was deleted and the stream needs to be gracefully shutdown. @@ -129,6 +129,9 @@ type HandleStreamRequest struct { // Partition is the local partition associated with the peer. Partition string + // InitialResourceURL is the ResourceURL from the initial Request. + InitialResourceURL string + // Stream is the open stream to the peer cluster. Stream BidirectionalStream } @@ -183,6 +186,13 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { } } + remoteSubTracker := newResourceSubscriptionTracker() + if streamReq.InitialResourceURL != "" { + if remoteSubTracker.Subscribe(streamReq.InitialResourceURL) { + logger.Info("subscribing to resource type", "resourceURL", streamReq.InitialResourceURL) + } + } + mgr := newSubscriptionManager( streamReq.Stream.Context(), logger, @@ -190,24 +200,31 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { trustDomain, s.Backend, s.GetStore, + remoteSubTracker, ) subCh := mgr.subscribe(streamReq.Stream.Context(), streamReq.LocalID, streamReq.PeerName, streamReq.Partition) - sub := makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{ - ResourceURL: pbpeerstream.TypeURLService, - PeerID: streamReq.RemoteID, - }) - logTraceSend(logger, sub) + // Subscribe to all relevant resource types. + for _, resourceURL := range []string{ + pbpeerstream.TypeURLExportedService, + pbpeerstream.TypeURLPeeringTrustBundle, + } { + sub := makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{ + ResourceURL: resourceURL, + PeerID: streamReq.RemoteID, + }) + logTraceSend(logger, sub) - if err := streamReq.Stream.Send(sub); err != nil { - if err == io.EOF { - logger.Info("stream ended by peer") - status.TrackReceiveError(err.Error()) - return nil + if err := streamReq.Stream.Send(sub); err != nil { + if err == io.EOF { + logger.Info("stream ended by peer") + status.TrackReceiveError(err.Error()) + return nil + } + // TODO(peering) Test error handling in calls to Send/Recv + status.TrackSendError(err.Error()) + return fmt.Errorf("failed to send subscription for %q to stream: %w", resourceURL, err) } - // TODO(peering) Test error handling in calls to Send/Recv - status.TrackSendError(err.Error()) - return fmt.Errorf("failed to send to stream: %v", err) } // TODO(peering): Should this be buffered? @@ -289,17 +306,86 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { if !pbpeerstream.KnownTypeURL(req.ResourceURL) { return grpcstatus.Errorf(codes.InvalidArgument, "subscription request to unknown resource URL: %s", req.ResourceURL) } - switch { - case req.ResponseNonce == "": - // TODO(peering): This can happen on a client peer since they don't try to receive subscriptions before entering HandleStream. - // Should change that behavior or only allow it that one time. - case req.Error != nil && (req.Error.Code != int32(code.Code_OK) || req.Error.Message != ""): + // There are different formats of requests depending upon where in the stream lifecycle we are. + // + // 1. Initial Request: This is the first request being received + // FROM the establishing peer. This is handled specially in + // (*Server).StreamResources BEFORE calling + // (*Server).HandleStream. This takes care of determining what + // the PeerID is for the stream. This is ALSO treated as (2) below. + // + // 2. Subscription Request: This is the first request for a + // given ResourceURL within a stream. The Initial Request (1) + // is always one of these as well. + // + // These must contain a valid ResourceURL with no Error or + // ResponseNonce set. + // + // It is valid to subscribe to the same ResourceURL twice + // within the lifetime of a stream, but all duplicate + // subscriptions are treated as no-ops upon receipt. + // + // 3. ACK Request: This is the message sent in reaction to an + // earlier Response to indicate that the response was processed + // by the other side successfully. + // + // These must contain a ResponseNonce and no Error. + // + // 4. NACK Request: This is the message sent in reaction to an + // earlier Response to indicate that the response was NOT + // processed by the other side successfully. + // + // These must contain a ResponseNonce and an Error. + // + if !remoteSubTracker.IsSubscribed(req.ResourceURL) { + // This must be a new subscription request to add a new + // resource type, vet it like a new request. + + if !streamReq.WasDialed() { + if req.PeerID != "" && req.PeerID != streamReq.RemoteID { + // Not necessary after the first request from the dialer, + // but if provided must match. + return grpcstatus.Errorf(codes.InvalidArgument, + "initial subscription requests for a resource type must have consistent PeerID values: got=%q expected=%q", + req.PeerID, + streamReq.RemoteID, + ) + } + } + if req.ResponseNonce != "" { + return grpcstatus.Error(codes.InvalidArgument, "initial subscription requests for a resource type must not contain a nonce") + } + if req.Error != nil { + return grpcstatus.Error(codes.InvalidArgument, "initial subscription request for a resource type must not contain an error") + } + + if remoteSubTracker.Subscribe(req.ResourceURL) { + logger.Info("subscribing to resource type", "resourceURL", req.ResourceURL) + } + status.TrackAck() + continue + } + + // At this point we have a valid ResourceURL and we are subscribed to it. + + switch { + case req.ResponseNonce == "" && req.Error != nil: + return grpcstatus.Error(codes.InvalidArgument, "initial subscription request for a resource type must not contain an error") + + case req.ResponseNonce != "" && req.Error == nil: // ACK + // TODO(peering): handle ACK fully + status.TrackAck() + + case req.ResponseNonce != "" && req.Error != nil: // NACK + // TODO(peering): handle NACK fully logger.Warn("client peer was unable to apply resource", "code", req.Error.Code, "error", req.Error.Message) status.TrackNack(fmt.Sprintf("client peer was unable to apply resource: %s", req.Error.Message)) default: - status.TrackAck() + // This branch might be dead code, but it could also happen + // during a stray 're-subscribe' so just ignore the + // message. } continue @@ -425,3 +511,63 @@ func logTraceProto(logger hclog.Logger, pb proto.Message, received bool) { logger.Trace("replication message", "direction", dir, "protobuf", out) } + +// resourceSubscriptionTracker is used to keep track of the ResourceURLs that a +// stream has subscribed to and can notify you when a subscription comes in by +// closing the channels returned by SubscribedChan. +type resourceSubscriptionTracker struct { + // notifierMap keeps track of a notification channel for each resourceURL. + // Keys may exist in here even when they do not exist in 'subscribed' as + // calling SubscribedChan has to possibly create and and hand out a + // notification channel in advance of any notification. + notifierMap map[string]chan struct{} + + // subscribed is a set that keeps track of resourceURLs that are currently + // subscribed to. Keys are never deleted. If a key is present in this map + // it is also present in 'notifierMap'. + subscribed map[string]struct{} +} + +func newResourceSubscriptionTracker() *resourceSubscriptionTracker { + return &resourceSubscriptionTracker{ + subscribed: make(map[string]struct{}), + notifierMap: make(map[string]chan struct{}), + } +} + +// IsSubscribed returns true if the given ResourceURL has an active subscription. +func (t *resourceSubscriptionTracker) IsSubscribed(resourceURL string) bool { + _, ok := t.subscribed[resourceURL] + return ok +} + +// Subscribe subscribes to the given ResourceURL. It will return true if this +// was the FIRST time a subscription occurred. It will also close the +// notification channel associated with this ResourceURL. +func (t *resourceSubscriptionTracker) Subscribe(resourceURL string) bool { + if _, ok := t.subscribed[resourceURL]; ok { + return false + } + t.subscribed[resourceURL] = struct{}{} + + // and notify + ch := t.ensureNotifierChan(resourceURL) + close(ch) + + return true +} + +// SubscribedChan returns a channel that will be closed when the ResourceURL is +// subscribed using the Subscribe method. +func (t *resourceSubscriptionTracker) SubscribedChan(resourceURL string) <-chan struct{} { + return t.ensureNotifierChan(resourceURL) +} + +func (t *resourceSubscriptionTracker) ensureNotifierChan(resourceURL string) chan struct{} { + if ch, ok := t.notifierMap[resourceURL]; ok { + return ch + } + ch := make(chan struct{}) + t.notifierMap[resourceURL] = ch + return ch +} diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index 1074f5960..1e3117ecc 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -12,15 +12,14 @@ import ( "testing" "time" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "github.com/golang/protobuf/ptypes/any" "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/require" "google.golang.org/genproto/googleapis/rpc/code" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" @@ -97,18 +96,6 @@ func TestStreamResources_Server_LeaderBecomesFollower(t *testing.T) { backend.leaderAddr = "expected:address" }) - client := NewMockClient(context.Background()) - - errCh := make(chan error, 1) - client.ErrCh = errCh - - go func() { - err := srv.StreamResources(client.ReplicationStream) - if err != nil { - errCh <- err - } - }() - p := writePeeringToBeDialed(t, store, 1, "my-peer") require.Empty(t, p.PeerID, "should be empty if being dialed") peerID := p.ID @@ -116,53 +103,73 @@ func TestStreamResources_Server_LeaderBecomesFollower(t *testing.T) { // Set the initial roots and CA configuration. _, _ = writeInitialRootsAndCA(t, store) - // Receive a subscription from a peer - sub := &pbpeerstream.ReplicationMessage{ - Payload: &pbpeerstream.ReplicationMessage_Request_{ - Request: &pbpeerstream.ReplicationMessage_Request{ - PeerID: peerID, - ResourceURL: pbpeerstream.TypeURLService, + client := NewMockClient(context.Background()) + + errCh := make(chan error, 1) + client.ErrCh = errCh + + go func() { + // Pass errors from server handler into ErrCh so that they can be seen by the client on Recv(). + // This matches gRPC's behavior when an error is returned by a server. + if err := srv.StreamResources(client.ReplicationStream); err != nil { + errCh <- err + } + }() + + // Receive a subscription from a peer. This message arrives while the + // server is a leader and should work. + testutil.RunStep(t, "send subscription request to leader and consume its two requests", func(t *testing.T) { + sub := &pbpeerstream.ReplicationMessage{ + Payload: &pbpeerstream.ReplicationMessage_Request_{ + Request: &pbpeerstream.ReplicationMessage_Request{ + PeerID: peerID, + ResourceURL: pbpeerstream.TypeURLExportedService, + }, }, - }, - } - err := client.Send(sub) - require.NoError(t, err) + } + err := client.Send(sub) + require.NoError(t, err) - msg, err := client.Recv() - require.NoError(t, err) - require.NotEmpty(t, msg) + msg1, err := client.Recv() + require.NoError(t, err) + require.NotEmpty(t, msg1) - receiveRoots, err := client.Recv() - require.NoError(t, err) - require.NotNil(t, receiveRoots.GetResponse()) - require.Equal(t, pbpeerstream.TypeURLRoots, receiveRoots.GetResponse().ResourceURL) + msg2, err := client.Recv() + require.NoError(t, err) + require.NotEmpty(t, msg2) + }) - input2 := &pbpeerstream.ReplicationMessage{ - Payload: &pbpeerstream.ReplicationMessage_Request_{ - Request: &pbpeerstream.ReplicationMessage_Request{ - ResourceURL: pbpeerstream.TypeURLService, - ResponseNonce: "1", + // The ACK will be a new request but at this point the server is not the + // leader in the test and this should fail. + testutil.RunStep(t, "ack fails with non leader", func(t *testing.T) { + ack := &pbpeerstream.ReplicationMessage{ + Payload: &pbpeerstream.ReplicationMessage_Request_{ + Request: &pbpeerstream.ReplicationMessage_Request{ + ResourceURL: pbpeerstream.TypeURLExportedService, + ResponseNonce: "1", + }, }, - }, - } + } - err2 := client.Send(input2) - require.NoError(t, err2) + err := client.Send(ack) + require.NoError(t, err) - // expect error - msg2, err2 := client.Recv() - require.Nil(t, msg2) - require.Error(t, err2) - require.EqualError(t, err2, "rpc error: code = FailedPrecondition desc = node is not a leader anymore; cannot continue streaming") + // expect error + msg, err := client.Recv() + require.Nil(t, msg) + require.Error(t, err) + require.EqualError(t, err, "rpc error: code = FailedPrecondition desc = node is not a leader anymore; cannot continue streaming") - // expect a status error - st, ok := status.FromError(err2) - require.True(t, ok, "need to get back a grpc status error") - deets := st.Details() + // expect a status error + st, ok := status.FromError(err) + require.True(t, ok, "need to get back a grpc status error") - // expect a LeaderAddress message - exp := []interface{}{&pbpeerstream.LeaderAddress{Address: "expected:address"}} - prototest.AssertDeepEqual(t, exp, deets) + // expect a LeaderAddress message + expect := []interface{}{ + &pbpeerstream.LeaderAddress{Address: "expected:address"}, + } + prototest.AssertDeepEqual(t, expect, st.Details()) + }) } func TestStreamResources_Server_FirstRequest(t *testing.T) { @@ -204,7 +211,7 @@ func TestStreamResources_Server_FirstRequest(t *testing.T) { input: &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Response_{ Response: &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResourceID: "api-service", Nonce: "2", }, @@ -251,7 +258,7 @@ func TestStreamResources_Server_FirstRequest(t *testing.T) { Payload: &pbpeerstream.ReplicationMessage_Request_{ Request: &pbpeerstream.ReplicationMessage_Request{ PeerID: "63b60245-c475-426b-b314-4588d210859d", - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, }, }, }, @@ -291,7 +298,7 @@ func TestStreamResources_Server_Terminate(t *testing.T) { receiveRoots, err := client.Recv() require.NoError(t, err) require.NotNil(t, receiveRoots.GetResponse()) - require.Equal(t, pbpeerstream.TypeURLRoots, receiveRoots.GetResponse().ResourceURL) + require.Equal(t, pbpeerstream.TypeURLPeeringTrustBundle, receiveRoots.GetResponse().ResourceURL) testutil.RunStep(t, "new stream gets tracked", func(t *testing.T) { retry.Run(t, func(r *retry.R) { @@ -347,7 +354,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }) }) - var sequence uint64 var lastSendSuccess time.Time testutil.RunStep(t, "ack tracked as success", func(t *testing.T) { @@ -355,18 +361,17 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { Payload: &pbpeerstream.ReplicationMessage_Request_{ Request: &pbpeerstream.ReplicationMessage_Request{ PeerID: peerID, - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResponseNonce: "1", // Acks do not have an Error populated in the request }, }, } + + lastSendSuccess = it.FutureNow(1) err := client.Send(ack) require.NoError(t, err) - sequence++ - - lastSendSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() expect := Status{ Connected: true, @@ -388,7 +393,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { Payload: &pbpeerstream.ReplicationMessage_Request_{ Request: &pbpeerstream.ReplicationMessage_Request{ PeerID: peerID, - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResponseNonce: "2", Error: &pbstatus.Status{ Code: int32(code.Code_UNAVAILABLE), @@ -397,12 +402,12 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }, }, } + + lastNack = it.FutureNow(1) err := client.Send(nack) require.NoError(t, err) - sequence++ lastNackMsg = "client peer was unable to apply resource: bad bad not good" - lastNack = it.base.Add(time.Duration(sequence) * time.Second).UTC() expect := Status{ Connected: true, @@ -424,22 +429,22 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { resp := &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Response_{ Response: &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResourceID: "api", Nonce: "21", Operation: pbpeerstream.Operation_OPERATION_UPSERT, - Resource: makeAnyPB(t, &pbservice.IndexedCheckServiceNodes{}), + Resource: makeAnyPB(t, &pbpeerstream.ExportedService{}), }, }, } + lastRecvSuccess = it.FutureNow(1) err := client.Send(resp) require.NoError(t, err) - sequence++ expectRoots := &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Response_{ Response: &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLRoots, + ResourceURL: pbpeerstream.TypeURLPeeringTrustBundle, ResourceID: "roots", Resource: makeAnyPB(t, &pbpeering.PeeringTrustBundle{ TrustDomain: connect.TestTrustDomain, @@ -460,15 +465,13 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expectAck := &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Request_{ Request: &pbpeerstream.ReplicationMessage_Request{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResponseNonce: "21", }, }, } prototest.AssertDeepEqual(t, expectAck, ack) - lastRecvSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() - api := structs.NewServiceName("api", nil) expect := Status{ @@ -496,7 +499,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { resp := &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Response_{ Response: &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResourceID: "web", Nonce: "24", @@ -505,9 +508,9 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }, }, } + lastRecvError = it.FutureNow(1) err := client.Send(resp) require.NoError(t, err) - sequence++ ack, err := client.Recv() require.NoError(t, err) @@ -515,7 +518,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expectNack := &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Request_{ Request: &pbpeerstream.ReplicationMessage_Request{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResponseNonce: "24", Error: &pbstatus.Status{ Code: int32(code.Code_INVALID_ARGUMENT), @@ -526,7 +529,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { } prototest.AssertDeepEqual(t, expectNack, ack) - lastRecvError = it.base.Add(time.Duration(sequence) * time.Second).UTC() lastRecvErrorMsg = `unsupported operation: "OPERATION_UNSPECIFIED"` api := structs.NewServiceName("api", nil) @@ -552,14 +554,12 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }) testutil.RunStep(t, "client disconnect marks stream as disconnected", func(t *testing.T) { + lastRecvError = it.FutureNow(1) + disconnectTime := it.FutureNow(2) + lastRecvErrorMsg = io.EOF.Error() + client.Close() - sequence++ - lastRecvError := it.base.Add(time.Duration(sequence) * time.Second).UTC() - - sequence++ - disconnectTime := it.base.Add(time.Duration(sequence) * time.Second).UTC() - api := structs.NewServiceName("api", nil) expect := Status{ @@ -569,8 +569,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { LastNackMessage: lastNackMsg, DisconnectTime: disconnectTime, LastReceiveSuccess: lastRecvSuccess, - LastReceiveErrorMessage: io.EOF.Error(), LastReceiveError: lastRecvError, + LastReceiveErrorMessage: lastRecvErrorMsg, ImportedServices: map[string]struct{}{ api.String(): {}, }, @@ -654,35 +654,35 @@ func TestStreamResources_Server_ServiceUpdates(t *testing.T) { expectReplEvents(t, client, func(t *testing.T, msg *pbpeerstream.ReplicationMessage) { - require.Equal(t, pbpeerstream.TypeURLRoots, msg.GetResponse().ResourceURL) + require.Equal(t, pbpeerstream.TypeURLPeeringTrustBundle, msg.GetResponse().ResourceURL) // Roots tested in TestStreamResources_Server_CARootUpdates }, func(t *testing.T, msg *pbpeerstream.ReplicationMessage) { // no mongo instances exist - require.Equal(t, pbpeerstream.TypeURLService, msg.GetResponse().ResourceURL) + require.Equal(t, pbpeerstream.TypeURLExportedService, msg.GetResponse().ResourceURL) require.Equal(t, mongoSN, msg.GetResponse().ResourceID) require.Equal(t, pbpeerstream.Operation_OPERATION_DELETE, msg.GetResponse().Operation) require.Nil(t, msg.GetResponse().Resource) }, func(t *testing.T, msg *pbpeerstream.ReplicationMessage) { // proxies can't export because no mesh gateway exists yet - require.Equal(t, pbpeerstream.TypeURLService, msg.GetResponse().ResourceURL) + require.Equal(t, pbpeerstream.TypeURLExportedService, msg.GetResponse().ResourceURL) require.Equal(t, mongoProxySN, msg.GetResponse().ResourceID) require.Equal(t, pbpeerstream.Operation_OPERATION_DELETE, msg.GetResponse().Operation) require.Nil(t, msg.GetResponse().Resource) }, func(t *testing.T, msg *pbpeerstream.ReplicationMessage) { - require.Equal(t, pbpeerstream.TypeURLService, msg.GetResponse().ResourceURL) + require.Equal(t, pbpeerstream.TypeURLExportedService, msg.GetResponse().ResourceURL) require.Equal(t, mysqlSN, msg.GetResponse().ResourceID) require.Equal(t, pbpeerstream.Operation_OPERATION_UPSERT, msg.GetResponse().Operation) - var nodes pbservice.IndexedCheckServiceNodes - require.NoError(t, ptypes.UnmarshalAny(msg.GetResponse().Resource, &nodes)) + var nodes pbpeerstream.ExportedService + require.NoError(t, msg.GetResponse().Resource.UnmarshalTo(&nodes)) require.Len(t, nodes.Nodes, 1) }, func(t *testing.T, msg *pbpeerstream.ReplicationMessage) { // proxies can't export because no mesh gateway exists yet - require.Equal(t, pbpeerstream.TypeURLService, msg.GetResponse().ResourceURL) + require.Equal(t, pbpeerstream.TypeURLExportedService, msg.GetResponse().ResourceURL) require.Equal(t, mysqlProxySN, msg.GetResponse().ResourceID) require.Equal(t, pbpeerstream.Operation_OPERATION_DELETE, msg.GetResponse().Operation) require.Nil(t, msg.GetResponse().Resource) @@ -704,12 +704,12 @@ func TestStreamResources_Server_ServiceUpdates(t *testing.T) { expectReplEvents(t, client, func(t *testing.T, msg *pbpeerstream.ReplicationMessage) { - require.Equal(t, pbpeerstream.TypeURLService, msg.GetResponse().ResourceURL) + require.Equal(t, pbpeerstream.TypeURLExportedService, msg.GetResponse().ResourceURL) require.Equal(t, mongoProxySN, msg.GetResponse().ResourceID) require.Equal(t, pbpeerstream.Operation_OPERATION_UPSERT, msg.GetResponse().Operation) - var nodes pbservice.IndexedCheckServiceNodes - require.NoError(t, ptypes.UnmarshalAny(msg.GetResponse().Resource, &nodes)) + var nodes pbpeerstream.ExportedService + require.NoError(t, msg.GetResponse().Resource.UnmarshalTo(&nodes)) require.Len(t, nodes.Nodes, 1) pm := nodes.Nodes[0].Service.Connect.PeerMeta @@ -721,12 +721,12 @@ func TestStreamResources_Server_ServiceUpdates(t *testing.T) { require.Equal(t, spiffeIDs, pm.SpiffeID) }, func(t *testing.T, msg *pbpeerstream.ReplicationMessage) { - require.Equal(t, pbpeerstream.TypeURLService, msg.GetResponse().ResourceURL) + require.Equal(t, pbpeerstream.TypeURLExportedService, msg.GetResponse().ResourceURL) require.Equal(t, mysqlProxySN, msg.GetResponse().ResourceID) require.Equal(t, pbpeerstream.Operation_OPERATION_UPSERT, msg.GetResponse().Operation) - var nodes pbservice.IndexedCheckServiceNodes - require.NoError(t, ptypes.UnmarshalAny(msg.GetResponse().Resource, &nodes)) + var nodes pbpeerstream.ExportedService + require.NoError(t, msg.GetResponse().Resource.UnmarshalTo(&nodes)) require.Len(t, nodes.Nodes, 1) pm := nodes.Nodes[0].Service.Connect.PeerMeta @@ -758,8 +758,8 @@ func TestStreamResources_Server_ServiceUpdates(t *testing.T) { require.Equal(r, pbpeerstream.Operation_OPERATION_UPSERT, msg.GetResponse().Operation) require.Equal(r, mongo.Service.CompoundServiceName().String(), msg.GetResponse().ResourceID) - var nodes pbservice.IndexedCheckServiceNodes - require.NoError(r, ptypes.UnmarshalAny(msg.GetResponse().Resource, &nodes)) + var nodes pbpeerstream.ExportedService + require.NoError(t, msg.GetResponse().Resource.UnmarshalTo(&nodes)) require.Len(r, nodes.Nodes, 1) }) }) @@ -824,12 +824,12 @@ func TestStreamResources_Server_CARootUpdates(t *testing.T) { testutil.RunStep(t, "initial CA Roots replication", func(t *testing.T) { expectReplEvents(t, client, func(t *testing.T, msg *pbpeerstream.ReplicationMessage) { - require.Equal(t, pbpeerstream.TypeURLRoots, msg.GetResponse().ResourceURL) + require.Equal(t, pbpeerstream.TypeURLPeeringTrustBundle, msg.GetResponse().ResourceURL) require.Equal(t, "roots", msg.GetResponse().ResourceID) require.Equal(t, pbpeerstream.Operation_OPERATION_UPSERT, msg.GetResponse().Operation) var trustBundle pbpeering.PeeringTrustBundle - require.NoError(t, ptypes.UnmarshalAny(msg.GetResponse().Resource, &trustBundle)) + require.NoError(t, msg.GetResponse().Resource.UnmarshalTo(&trustBundle)) require.ElementsMatch(t, []string{rootA.RootCert}, trustBundle.RootPEMs) expect := connect.SpiffeIDSigningForCluster(clusterID).Host() @@ -853,12 +853,12 @@ func TestStreamResources_Server_CARootUpdates(t *testing.T) { expectReplEvents(t, client, func(t *testing.T, msg *pbpeerstream.ReplicationMessage) { - require.Equal(t, pbpeerstream.TypeURLRoots, msg.GetResponse().ResourceURL) + require.Equal(t, pbpeerstream.TypeURLPeeringTrustBundle, msg.GetResponse().ResourceURL) require.Equal(t, "roots", msg.GetResponse().ResourceID) require.Equal(t, pbpeerstream.Operation_OPERATION_UPSERT, msg.GetResponse().Operation) var trustBundle pbpeering.PeeringTrustBundle - require.NoError(t, ptypes.UnmarshalAny(msg.GetResponse().Resource, &trustBundle)) + require.NoError(t, msg.GetResponse().Resource.UnmarshalTo(&trustBundle)) require.ElementsMatch(t, []string{rootB.RootCert, rootC.RootCert}, trustBundle.RootPEMs) expect := connect.SpiffeIDSigningForCluster(clusterID).Host() @@ -886,33 +886,57 @@ func makeClient(t *testing.T, srv pbpeerstream.PeerStreamServiceServer, peerID s } }() - // Issue a services subscription to server - init := &pbpeerstream.ReplicationMessage{ - Payload: &pbpeerstream.ReplicationMessage_Request_{ - Request: &pbpeerstream.ReplicationMessage_Request{ - PeerID: peerID, - ResourceURL: pbpeerstream.TypeURLService, + // Issue a services and roots subscription pair to server + for _, resourceURL := range []string{ + pbpeerstream.TypeURLExportedService, + pbpeerstream.TypeURLPeeringTrustBundle, + } { + init := &pbpeerstream.ReplicationMessage{ + Payload: &pbpeerstream.ReplicationMessage_Request_{ + Request: &pbpeerstream.ReplicationMessage_Request{ + PeerID: peerID, + ResourceURL: resourceURL, + }, }, - }, + } + require.NoError(t, client.Send(init)) } - require.NoError(t, client.Send(init)) - // Receive a services subscription from server - receivedSub, err := client.Recv() + // Receive a services and roots subscription request pair from server + receivedSub1, err := client.Recv() + require.NoError(t, err) + receivedSub2, err := client.Recv() require.NoError(t, err) - expect := &pbpeerstream.ReplicationMessage{ - Payload: &pbpeerstream.ReplicationMessage_Request_{ - Request: &pbpeerstream.ReplicationMessage_Request{ - ResourceURL: pbpeerstream.TypeURLService, - // The PeerID field is only set for the messages coming FROM - // the establishing side and are going to be empty from the - // other side. - PeerID: "", + expect := []*pbpeerstream.ReplicationMessage{ + { + Payload: &pbpeerstream.ReplicationMessage_Request_{ + Request: &pbpeerstream.ReplicationMessage_Request{ + ResourceURL: pbpeerstream.TypeURLExportedService, + // The PeerID field is only set for the messages coming FROM + // the establishing side and are going to be empty from the + // other side. + PeerID: "", + }, + }, + }, + { + Payload: &pbpeerstream.ReplicationMessage_Request_{ + Request: &pbpeerstream.ReplicationMessage_Request{ + ResourceURL: pbpeerstream.TypeURLPeeringTrustBundle, + // The PeerID field is only set for the messages coming FROM + // the establishing side and are going to be empty from the + // other side. + PeerID: "", + }, }, }, } - prototest.AssertDeepEqual(t, expect, receivedSub) + got := []*pbpeerstream.ReplicationMessage{ + receivedSub1, + receivedSub2, + } + prototest.AssertElementsMatch[*pbpeerstream.ReplicationMessage](t, expect, got) return client } @@ -1017,16 +1041,16 @@ func Test_processResponse_Validation(t *testing.T) { { name: "valid upsert", in: &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResourceID: "api", Nonce: "1", Operation: pbpeerstream.Operation_OPERATION_UPSERT, - Resource: makeAnyPB(t, &pbservice.IndexedCheckServiceNodes{}), + Resource: makeAnyPB(t, &pbpeerstream.ExportedService{}), }, expect: &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Request_{ Request: &pbpeerstream.ReplicationMessage_Request{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResponseNonce: "1", }, }, @@ -1036,7 +1060,7 @@ func Test_processResponse_Validation(t *testing.T) { { name: "valid delete", in: &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResourceID: "api", Nonce: "1", Operation: pbpeerstream.Operation_OPERATION_DELETE, @@ -1044,7 +1068,7 @@ func Test_processResponse_Validation(t *testing.T) { expect: &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Request_{ Request: &pbpeerstream.ReplicationMessage_Request{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResponseNonce: "1", }, }, @@ -1075,14 +1099,14 @@ func Test_processResponse_Validation(t *testing.T) { { name: "unknown operation", in: &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, Nonce: "1", Operation: pbpeerstream.Operation_OPERATION_UNSPECIFIED, }, expect: &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Request_{ Request: &pbpeerstream.ReplicationMessage_Request{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResponseNonce: "1", Error: &pbstatus.Status{ Code: int32(code.Code_INVALID_ARGUMENT), @@ -1096,14 +1120,14 @@ func Test_processResponse_Validation(t *testing.T) { { name: "out of range operation", in: &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, Nonce: "1", Operation: pbpeerstream.Operation(100000), }, expect: &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Request_{ Request: &pbpeerstream.ReplicationMessage_Request{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResponseNonce: "1", Error: &pbstatus.Status{ Code: int32(code.Code_INVALID_ARGUMENT), @@ -1163,8 +1187,8 @@ func writeInitialRootsAndCA(t *testing.T, store *state.Store) (string, *structs. return clusterID, rootA } -func makeAnyPB(t *testing.T, pb proto.Message) *any.Any { - any, err := ptypes.MarshalAny(pb) +func makeAnyPB(t *testing.T, pb proto.Message) *anypb.Any { + any, err := anypb.New(pb) require.NoError(t, err) return any } @@ -1255,7 +1279,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) { type testCase struct { name string seed []*structs.RegisterRequest - input *pbservice.IndexedCheckServiceNodes + input *pbpeerstream.ExportedService expect map[string]structs.CheckServiceNodes expectedImportedServicesCount int } @@ -1296,7 +1320,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) { } in := &pbpeerstream.ReplicationMessage_Response{ - ResourceURL: pbpeerstream.TypeURLService, + ResourceURL: pbpeerstream.TypeURLExportedService, ResourceID: apiSN.String(), Nonce: "1", Operation: op, @@ -1322,7 +1346,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) { tt := []testCase{ { name: "upsert two service instances to the same node", - input: &pbservice.IndexedCheckServiceNodes{ + input: &pbpeerstream.ExportedService{ Nodes: []*pbservice.CheckServiceNode{ { Node: &pbservice.Node{ @@ -1454,7 +1478,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) { }, { name: "upsert two service instances to different nodes", - input: &pbservice.IndexedCheckServiceNodes{ + input: &pbpeerstream.ExportedService{ Nodes: []*pbservice.CheckServiceNode{ { Node: &pbservice.Node{ @@ -1636,7 +1660,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) { }, }, }, - input: &pbservice.IndexedCheckServiceNodes{}, + input: &pbpeerstream.ExportedService{}, expect: map[string]structs.CheckServiceNodes{ "api": {}, }, @@ -1695,7 +1719,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) { }, }, // Nil input is for the "api" service. - input: &pbservice.IndexedCheckServiceNodes{}, + input: &pbpeerstream.ExportedService{}, expect: map[string]structs.CheckServiceNodes{ "api": {}, // Existing redis service was not affected by deletion. @@ -1761,7 +1785,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) { }, }, }, - input: &pbservice.IndexedCheckServiceNodes{ + input: &pbpeerstream.ExportedService{ Nodes: []*pbservice.CheckServiceNode{ { Node: &pbservice.Node{ @@ -1856,7 +1880,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) { }, }, }, - input: &pbservice.IndexedCheckServiceNodes{ + input: &pbpeerstream.ExportedService{ Nodes: []*pbservice.CheckServiceNode{ { Node: &pbservice.Node{ @@ -1991,7 +2015,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) { }, }, }, - input: &pbservice.IndexedCheckServiceNodes{ + input: &pbpeerstream.ExportedService{ Nodes: []*pbservice.CheckServiceNode{ { Node: &pbservice.Node{ diff --git a/agent/grpc-external/services/peerstream/subscription_blocking.go b/agent/grpc-external/services/peerstream/subscription_blocking.go index c2720dcdb..d11e03d55 100644 --- a/agent/grpc-external/services/peerstream/subscription_blocking.go +++ b/agent/grpc-external/services/peerstream/subscription_blocking.go @@ -19,6 +19,13 @@ import ( // streaming machinery instead to be cheaper. func (m *subscriptionManager) notifyExportedServicesForPeerID(ctx context.Context, state *subscriptionState, peerID string) { + // Wait until this is subscribed-to. + select { + case <-m.serviceSubReady: + case <-ctx.Done(): + return + } + // syncSubscriptionsAndBlock ensures that the subscriptions to the subscription backend // match the list of services exported to the peer. m.syncViaBlockingQuery(ctx, "exported-services", func(ctx context.Context, store StateStore, ws memdb.WatchSet) (interface{}, error) { @@ -34,6 +41,13 @@ func (m *subscriptionManager) notifyExportedServicesForPeerID(ctx context.Contex // TODO: add a new streaming subscription type to list-by-kind-and-partition since we're getting evictions func (m *subscriptionManager) notifyMeshGatewaysForPartition(ctx context.Context, state *subscriptionState, partition string) { + // Wait until this is subscribed-to. + select { + case <-m.serviceSubReady: + case <-ctx.Done(): + return + } + m.syncViaBlockingQuery(ctx, "mesh-gateways", func(ctx context.Context, store StateStore, ws memdb.WatchSet) (interface{}, error) { // Fetch our current list of all mesh gateways. entMeta := structs.DefaultEnterpriseMetaInPartition(partition) diff --git a/agent/grpc-external/services/peerstream/subscription_manager.go b/agent/grpc-external/services/peerstream/subscription_manager.go index 33726a216..0c69b0338 100644 --- a/agent/grpc-external/services/peerstream/subscription_manager.go +++ b/agent/grpc-external/services/peerstream/subscription_manager.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/proto/pbcommon" "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/proto/pbpeerstream" "github.com/hashicorp/consul/proto/pbservice" ) @@ -33,12 +34,14 @@ type SubscriptionBackend interface { // subscriptionManager handlers requests to subscribe to events from an events publisher. type subscriptionManager struct { - logger hclog.Logger - config Config - trustDomain string - viewStore MaterializedViewStore - backend SubscriptionBackend - getStore func() StateStore + logger hclog.Logger + config Config + trustDomain string + viewStore MaterializedViewStore + backend SubscriptionBackend + getStore func() StateStore + serviceSubReady <-chan struct{} + trustBundlesSubReady <-chan struct{} } // TODO(peering): Maybe centralize so that there is a single manager per datacenter, rather than per peering. @@ -49,18 +52,21 @@ func newSubscriptionManager( trustDomain string, backend SubscriptionBackend, getStore func() StateStore, + remoteSubTracker *resourceSubscriptionTracker, ) *subscriptionManager { logger = logger.Named("subscriptions") store := submatview.NewStore(logger.Named("viewstore")) go store.Run(ctx) return &subscriptionManager{ - logger: logger, - config: config, - trustDomain: trustDomain, - viewStore: store, - backend: backend, - getStore: getStore, + logger: logger, + config: config, + trustDomain: trustDomain, + viewStore: store, + backend: backend, + getStore: getStore, + serviceSubReady: remoteSubTracker.SubscribedChan(pbpeerstream.TypeURLExportedService), + trustBundlesSubReady: remoteSubTracker.SubscribedChan(pbpeerstream.TypeURLPeeringTrustBundle), } } @@ -297,6 +303,13 @@ func (m *subscriptionManager) notifyRootCAUpdatesForPartition( updateCh chan<- cache.UpdateEvent, partition string, ) { + // Wait until this is subscribed-to. + select { + case <-m.trustBundlesSubReady: + case <-ctx.Done(): + return + } + var idx uint64 // TODO(peering): retry logic; fail past a threshold for { diff --git a/agent/grpc-external/services/peerstream/subscription_manager_test.go b/agent/grpc-external/services/peerstream/subscription_manager_test.go index cd12b2c22..1a5269817 100644 --- a/agent/grpc-external/services/peerstream/subscription_manager_test.go +++ b/agent/grpc-external/services/peerstream/subscription_manager_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto/pbcommon" "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/proto/pbpeerstream" "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/consul/sdk/testutil" @@ -32,12 +33,16 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) { _, id := backend.ensurePeering(t, "my-peering") partition := acl.DefaultEnterpriseMeta().PartitionOrEmpty() + // Only configure a tracker for catalog events. + tracker := newResourceSubscriptionTracker() + tracker.Subscribe(pbpeerstream.TypeURLExportedService) + mgr := newSubscriptionManager(ctx, testutil.Logger(t), Config{ Datacenter: "dc1", ConnectEnabled: true, }, connect.TestTrustDomain, backend, func() StateStore { return backend.store - }) + }, tracker) subCh := mgr.subscribe(ctx, id, "my-peering", partition) var ( @@ -442,12 +447,16 @@ func TestSubscriptionManager_InitialSnapshot(t *testing.T) { _, id := backend.ensurePeering(t, "my-peering") partition := acl.DefaultEnterpriseMeta().PartitionOrEmpty() + // Only configure a tracker for catalog events. + tracker := newResourceSubscriptionTracker() + tracker.Subscribe(pbpeerstream.TypeURLExportedService) + mgr := newSubscriptionManager(ctx, testutil.Logger(t), Config{ Datacenter: "dc1", ConnectEnabled: true, }, connect.TestTrustDomain, backend, func() StateStore { return backend.store - }) + }, tracker) subCh := mgr.subscribe(ctx, id, "my-peering", partition) // Register two services that are not yet exported @@ -571,21 +580,21 @@ func TestSubscriptionManager_CARoots(t *testing.T) { _, id := backend.ensurePeering(t, "my-peering") partition := acl.DefaultEnterpriseMeta().PartitionOrEmpty() + // Only configure a tracker for CA roots events. + tracker := newResourceSubscriptionTracker() + tracker.Subscribe(pbpeerstream.TypeURLPeeringTrustBundle) + mgr := newSubscriptionManager(ctx, testutil.Logger(t), Config{ Datacenter: "dc1", ConnectEnabled: true, }, connect.TestTrustDomain, backend, func() StateStore { return backend.store - }) + }, tracker) subCh := mgr.subscribe(ctx, id, "my-peering", partition) testutil.RunStep(t, "initial events contain trust bundle", func(t *testing.T) { // events are ordered so we can expect a deterministic list expectEvents(t, subCh, - func(t *testing.T, got cache.UpdateEvent) { - // mesh-gateway assertions are done in other tests - require.Equal(t, subMeshGateway+partition, got.CorrelationID) - }, func(t *testing.T, got cache.UpdateEvent) { require.Equal(t, subCARoot, got.CorrelationID) roots, ok := got.Result.(*pbpeering.PeeringTrustBundle) diff --git a/agent/grpc-external/services/peerstream/testing.go b/agent/grpc-external/services/peerstream/testing.go index 939c38dfa..1f85b2b78 100644 --- a/agent/grpc-external/services/peerstream/testing.go +++ b/agent/grpc-external/services/peerstream/testing.go @@ -2,6 +2,7 @@ package peerstream import ( "context" + "fmt" "io" "sync" "time" @@ -24,14 +25,7 @@ func (c *MockClient) Send(r *pbpeerstream.ReplicationMessage) error { } func (c *MockClient) Recv() (*pbpeerstream.ReplicationMessage, error) { - select { - case err := <-c.ErrCh: - return nil, err - case r := <-c.ReplicationStream.sendCh: - return r, nil - case <-time.After(10 * time.Millisecond): - return nil, io.EOF - } + return c.RecvWithTimeout(10 * time.Millisecond) } func (c *MockClient) RecvWithTimeout(dur time.Duration) (*pbpeerstream.ReplicationMessage, error) { @@ -61,7 +55,6 @@ type MockStream struct { recvCh chan *pbpeerstream.ReplicationMessage ctx context.Context - mu sync.Mutex } var _ pbpeerstream.PeerStreamService_StreamResourcesServer = (*MockStream)(nil) @@ -117,12 +110,37 @@ func (s *MockStream) SendHeader(metadata.MD) error { // SetTrailer implements grpc.ServerStream func (s *MockStream) SetTrailer(metadata.MD) {} +// incrementalTime is an artificial clock used during testing. For those +// scenarios you would pass around the method pointer for `Now` in places where +// you would be using `time.Now`. type incrementalTime struct { base time.Time next uint64 + mu sync.Mutex } +// Now advances the internal clock by 1 second and returns that value. func (t *incrementalTime) Now() time.Time { + t.mu.Lock() + defer t.mu.Unlock() t.next++ - return t.base.Add(time.Duration(t.next) * time.Second) + + dur := time.Duration(t.next) * time.Second + + return t.base.Add(dur) +} + +// FutureNow will return a given future value of the Now() function. +// The numerical argument indicates which future Now value you wanted. The +// value must be > 0. +func (t *incrementalTime) FutureNow(n int) time.Time { + if n < 1 { + panic(fmt.Sprintf("argument must be > 1 but was %d", n)) + } + t.mu.Lock() + defer t.mu.Unlock() + + dur := time.Duration(t.next+uint64(n)) * time.Second + + return t.base.Add(dur) } diff --git a/proto/pbpeerstream/convert.go b/proto/pbpeerstream/convert.go new file mode 100644 index 000000000..b0df6c42a --- /dev/null +++ b/proto/pbpeerstream/convert.go @@ -0,0 +1,25 @@ +package pbpeerstream + +import ( + "fmt" + + "github.com/hashicorp/consul/agent/structs" + pbservice "github.com/hashicorp/consul/proto/pbservice" +) + +// CheckServiceNodesToStruct converts the contained CheckServiceNodes to their structs equivalent. +func (s *ExportedService) CheckServiceNodesToStruct() ([]structs.CheckServiceNode, error) { + if s == nil { + return nil, nil + } + + resp := make([]structs.CheckServiceNode, 0, len(s.Nodes)) + for _, pb := range s.Nodes { + instance, err := pbservice.CheckServiceNodeToStructs(pb) + if err != nil { + return resp, fmt.Errorf("failed to convert instance: %w", err) + } + resp = append(resp, *instance) + } + return resp, nil +} diff --git a/proto/pbpeerstream/peerstream.pb.binary.go b/proto/pbpeerstream/peerstream.pb.binary.go index 39dbdb814..c5d928949 100644 --- a/proto/pbpeerstream/peerstream.pb.binary.go +++ b/proto/pbpeerstream/peerstream.pb.binary.go @@ -56,3 +56,13 @@ func (msg *LeaderAddress) MarshalBinary() ([]byte, error) { func (msg *LeaderAddress) UnmarshalBinary(b []byte) error { return proto.Unmarshal(b, msg) } + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ExportedService) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ExportedService) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto/pbpeerstream/peerstream.pb.go b/proto/pbpeerstream/peerstream.pb.go index e9da9cc56..8b71b4e8c 100644 --- a/proto/pbpeerstream/peerstream.pb.go +++ b/proto/pbpeerstream/peerstream.pb.go @@ -7,6 +7,7 @@ package pbpeerstream import ( + pbservice "github.com/hashicorp/consul/proto/pbservice" pbstatus "github.com/hashicorp/consul/proto/pbstatus" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -220,6 +221,54 @@ func (x *LeaderAddress) GetAddress() string { return "" } +// ExportedService is one of the types of data returned via peer stream replication. +type ExportedService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nodes []*pbservice.CheckServiceNode `protobuf:"bytes,1,rep,name=Nodes,proto3" json:"Nodes,omitempty"` +} + +func (x *ExportedService) Reset() { + *x = ExportedService{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportedService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportedService) ProtoMessage() {} + +func (x *ExportedService) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportedService.ProtoReflect.Descriptor instead. +func (*ExportedService) Descriptor() ([]byte, []int) { + return file_proto_pbpeerstream_peerstream_proto_rawDescGZIP(), []int{2} +} + +func (x *ExportedService) GetNodes() []*pbservice.CheckServiceNode { + if x != nil { + return x.Nodes + } + return nil +} + // A Request requests to subscribe to a resource of a given type. type ReplicationMessage_Request struct { state protoimpl.MessageState @@ -244,7 +293,7 @@ type ReplicationMessage_Request struct { func (x *ReplicationMessage_Request) Reset() { *x = ReplicationMessage_Request{} if protoimpl.UnsafeEnabled { - mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[2] + mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -257,7 +306,7 @@ func (x *ReplicationMessage_Request) String() string { func (*ReplicationMessage_Request) ProtoMessage() {} func (x *ReplicationMessage_Request) ProtoReflect() protoreflect.Message { - mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[2] + mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -323,7 +372,7 @@ type ReplicationMessage_Response struct { func (x *ReplicationMessage_Response) Reset() { *x = ReplicationMessage_Response{} if protoimpl.UnsafeEnabled { - mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[3] + mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -336,7 +385,7 @@ func (x *ReplicationMessage_Response) String() string { func (*ReplicationMessage_Response) ProtoMessage() {} func (x *ReplicationMessage_Response) ProtoReflect() protoreflect.Message { - mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[3] + mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -398,7 +447,7 @@ type ReplicationMessage_Terminated struct { func (x *ReplicationMessage_Terminated) Reset() { *x = ReplicationMessage_Terminated{} if protoimpl.UnsafeEnabled { - mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[4] + mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -411,7 +460,7 @@ func (x *ReplicationMessage_Terminated) String() string { func (*ReplicationMessage_Terminated) ProtoMessage() {} func (x *ReplicationMessage_Terminated) ProtoReflect() protoreflect.Message { - mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[4] + mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -436,92 +485,99 @@ var file_proto_pbpeerstream_peerstream_proto_rawDesc = []byte{ 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x05, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x5c, 0x0a, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, - 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5f, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, - 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0a, 0x74, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, - 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, - 0x65, 0x64, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, - 0x1a, 0xa9, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, - 0x65, 0x72, 0x49, 0x44, 0x12, 0x24, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x3e, 0x0a, 0x05, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0xe3, 0x01, 0x0a, - 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, - 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, - 0x4c, 0x12, 0x1e, 0x0a, 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, - 0x44, 0x12, 0x30, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xe5, 0x05, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x5c, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x5f, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x0c, 0x0a, 0x0a, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, - 0x42, 0x09, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x29, 0x0a, 0x0d, 0x4c, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2a, 0x52, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, - 0x0a, 0x10, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x50, 0x53, 0x45, - 0x52, 0x54, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x32, 0x9f, 0x01, 0x0a, 0x11, 0x50, - 0x65, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x89, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0a, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x2e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x48, 0x00, + 0x52, 0x0a, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xa9, 0x01, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, + 0x12, 0x24, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4e, 0x6f, 0x6e, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x3e, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0xe3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x1e, 0x0a, + 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x12, 0x30, 0x0a, + 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x4d, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, + 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x0c, + 0x0a, 0x0a, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x42, 0x09, 0x0a, 0x07, + 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x29, 0x0a, 0x0d, 0x4c, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x22, 0x5c, 0x0a, 0x0f, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x05, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x38, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x4e, 0x6f, 0x64, 0x65, 0x73, + 0x2a, 0x52, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, + 0x15, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x4f, 0x50, 0x45, 0x52, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x45, + 0x54, 0x45, 0x10, 0x02, 0x32, 0x9f, 0x01, 0x0a, 0x11, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x89, 0x01, 0x0a, 0x0f, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x9f, 0x02, 0x0a, - 0x28, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x0f, 0x50, 0x65, 0x65, 0x72, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xa2, 0x02, 0x04, 0x48, - 0x43, 0x49, 0x50, 0xaa, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, - 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xca, 0x02, 0x24, 0x48, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0xe2, 0x02, 0x30, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x50, 0x65, - 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x27, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x3a, 0x3a, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x9f, 0x02, 0x0a, 0x28, 0x63, 0x6f, 0x6d, 0x2e, 0x68, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x42, 0x0f, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, 0x50, 0xaa, 0x02, 0x24, + 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0xca, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x5c, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xe2, 0x02, 0x30, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x27, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x50, 0x65, + 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -537,31 +593,34 @@ func file_proto_pbpeerstream_peerstream_proto_rawDescGZIP() []byte { } var file_proto_pbpeerstream_peerstream_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_proto_pbpeerstream_peerstream_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_proto_pbpeerstream_peerstream_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_proto_pbpeerstream_peerstream_proto_goTypes = []interface{}{ (Operation)(0), // 0: hashicorp.consul.internal.peerstream.Operation (*ReplicationMessage)(nil), // 1: hashicorp.consul.internal.peerstream.ReplicationMessage (*LeaderAddress)(nil), // 2: hashicorp.consul.internal.peerstream.LeaderAddress - (*ReplicationMessage_Request)(nil), // 3: hashicorp.consul.internal.peerstream.ReplicationMessage.Request - (*ReplicationMessage_Response)(nil), // 4: hashicorp.consul.internal.peerstream.ReplicationMessage.Response - (*ReplicationMessage_Terminated)(nil), // 5: hashicorp.consul.internal.peerstream.ReplicationMessage.Terminated - (*pbstatus.Status)(nil), // 6: hashicorp.consul.internal.status.Status - (*anypb.Any)(nil), // 7: google.protobuf.Any + (*ExportedService)(nil), // 3: hashicorp.consul.internal.peerstream.ExportedService + (*ReplicationMessage_Request)(nil), // 4: hashicorp.consul.internal.peerstream.ReplicationMessage.Request + (*ReplicationMessage_Response)(nil), // 5: hashicorp.consul.internal.peerstream.ReplicationMessage.Response + (*ReplicationMessage_Terminated)(nil), // 6: hashicorp.consul.internal.peerstream.ReplicationMessage.Terminated + (*pbservice.CheckServiceNode)(nil), // 7: hashicorp.consul.internal.service.CheckServiceNode + (*pbstatus.Status)(nil), // 8: hashicorp.consul.internal.status.Status + (*anypb.Any)(nil), // 9: google.protobuf.Any } var file_proto_pbpeerstream_peerstream_proto_depIdxs = []int32{ - 3, // 0: hashicorp.consul.internal.peerstream.ReplicationMessage.request:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Request - 4, // 1: hashicorp.consul.internal.peerstream.ReplicationMessage.response:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Response - 5, // 2: hashicorp.consul.internal.peerstream.ReplicationMessage.terminated:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Terminated - 6, // 3: hashicorp.consul.internal.peerstream.ReplicationMessage.Request.Error:type_name -> hashicorp.consul.internal.status.Status - 7, // 4: hashicorp.consul.internal.peerstream.ReplicationMessage.Response.Resource:type_name -> google.protobuf.Any - 0, // 5: hashicorp.consul.internal.peerstream.ReplicationMessage.Response.operation:type_name -> hashicorp.consul.internal.peerstream.Operation - 1, // 6: hashicorp.consul.internal.peerstream.PeerStreamService.StreamResources:input_type -> hashicorp.consul.internal.peerstream.ReplicationMessage - 1, // 7: hashicorp.consul.internal.peerstream.PeerStreamService.StreamResources:output_type -> hashicorp.consul.internal.peerstream.ReplicationMessage - 7, // [7:8] is the sub-list for method output_type - 6, // [6:7] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 4, // 0: hashicorp.consul.internal.peerstream.ReplicationMessage.request:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Request + 5, // 1: hashicorp.consul.internal.peerstream.ReplicationMessage.response:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Response + 6, // 2: hashicorp.consul.internal.peerstream.ReplicationMessage.terminated:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Terminated + 7, // 3: hashicorp.consul.internal.peerstream.ExportedService.Nodes:type_name -> hashicorp.consul.internal.service.CheckServiceNode + 8, // 4: hashicorp.consul.internal.peerstream.ReplicationMessage.Request.Error:type_name -> hashicorp.consul.internal.status.Status + 9, // 5: hashicorp.consul.internal.peerstream.ReplicationMessage.Response.Resource:type_name -> google.protobuf.Any + 0, // 6: hashicorp.consul.internal.peerstream.ReplicationMessage.Response.operation:type_name -> hashicorp.consul.internal.peerstream.Operation + 1, // 7: hashicorp.consul.internal.peerstream.PeerStreamService.StreamResources:input_type -> hashicorp.consul.internal.peerstream.ReplicationMessage + 1, // 8: hashicorp.consul.internal.peerstream.PeerStreamService.StreamResources:output_type -> hashicorp.consul.internal.peerstream.ReplicationMessage + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_proto_pbpeerstream_peerstream_proto_init() } @@ -595,7 +654,7 @@ func file_proto_pbpeerstream_peerstream_proto_init() { } } file_proto_pbpeerstream_peerstream_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicationMessage_Request); i { + switch v := v.(*ExportedService); i { case 0: return &v.state case 1: @@ -607,7 +666,7 @@ func file_proto_pbpeerstream_peerstream_proto_init() { } } file_proto_pbpeerstream_peerstream_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicationMessage_Response); i { + switch v := v.(*ReplicationMessage_Request); i { case 0: return &v.state case 1: @@ -619,6 +678,18 @@ func file_proto_pbpeerstream_peerstream_proto_init() { } } file_proto_pbpeerstream_peerstream_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationMessage_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeerstream_peerstream_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReplicationMessage_Terminated); i { case 0: return &v.state @@ -642,7 +713,7 @@ func file_proto_pbpeerstream_peerstream_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proto_pbpeerstream_peerstream_proto_rawDesc, NumEnums: 1, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/pbpeerstream/peerstream.proto b/proto/pbpeerstream/peerstream.proto index ee19a2df7..54be6e4b7 100644 --- a/proto/pbpeerstream/peerstream.proto +++ b/proto/pbpeerstream/peerstream.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package hashicorp.consul.internal.peerstream; import "google/protobuf/any.proto"; +import "proto/pbservice/node.proto"; // TODO(peering): Handle this some other way import "proto/pbstatus/status.proto"; @@ -89,3 +90,8 @@ message LeaderAddress { // address is an ip:port best effort hint at what could be the cluster leader's address string address = 1; } + +// ExportedService is one of the types of data returned via peer stream replication. +message ExportedService { + repeated hashicorp.consul.internal.service.CheckServiceNode Nodes = 1; +} diff --git a/proto/pbpeerstream/types.go b/proto/pbpeerstream/types.go index 52f32487d..df300cccd 100644 --- a/proto/pbpeerstream/types.go +++ b/proto/pbpeerstream/types.go @@ -1,10 +1,16 @@ package pbpeerstream const ( - TypeURLService = "type.googleapis.com/consul.api.Service" - TypeURLRoots = "type.googleapis.com/consul.api.CARoots" + apiTypePrefix = "type.googleapis.com/" + + TypeURLExportedService = apiTypePrefix + "hashicorp.consul.internal.peerstream.ExportedService" + TypeURLPeeringTrustBundle = apiTypePrefix + "hashicorp.consul.internal.peering.PeeringTrustBundle" ) func KnownTypeURL(s string) bool { - return s == TypeURLService || s == TypeURLRoots + switch s { + case TypeURLExportedService, TypeURLPeeringTrustBundle: + return true + } + return false } diff --git a/proto/pbservice/convert.go b/proto/pbservice/convert.go index 02895adf9..d5233dd99 100644 --- a/proto/pbservice/convert.go +++ b/proto/pbservice/convert.go @@ -1,8 +1,6 @@ package pbservice import ( - "fmt" - "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto/pbcommon" "github.com/hashicorp/consul/types" @@ -44,23 +42,6 @@ func NewMapHeadersFromStructs(t map[string][]string) map[string]*HeaderValue { return s } -// CheckServiceNodesToStruct converts the contained CheckServiceNodes to their structs equivalent. -func (s *IndexedCheckServiceNodes) CheckServiceNodesToStruct() ([]structs.CheckServiceNode, error) { - if s == nil { - return nil, nil - } - - resp := make([]structs.CheckServiceNode, 0, len(s.Nodes)) - for _, pb := range s.Nodes { - instance, err := CheckServiceNodeToStructs(pb) - if err != nil { - return resp, fmt.Errorf("failed to convert instance: %w", err) - } - resp = append(resp, *instance) - } - return resp, nil -} - // TODO: use mog once it supports pointers and slices func CheckServiceNodeToStructs(s *CheckServiceNode) (*structs.CheckServiceNode, error) { if s == nil { diff --git a/proto/prototest/testing.go b/proto/prototest/testing.go index c196d77b3..275d8502b 100644 --- a/proto/prototest/testing.go +++ b/proto/prototest/testing.go @@ -16,3 +16,60 @@ func AssertDeepEqual(t testing.TB, x, y interface{}, opts ...cmp.Option) { t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) } } + +// AssertElementsMatch asserts that the specified listX(array, slice...) is +// equal to specified listY(array, slice...) ignoring the order of the +// elements. If there are duplicate elements, the number of appearances of each +// of them in both lists should match. +// +// prototest.AssertElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func AssertElementsMatch[V any]( + t testing.TB, listX, listY []V, opts ...cmp.Option, +) { + t.Helper() + + if len(listX) == 0 && len(listY) == 0 { + return + } + + opts = append(opts, protocmp.Transform()) + + // dump into a map keyed by sliceID + mapX := make(map[int]V) + for i, val := range listX { + mapX[i] = val + } + + mapY := make(map[int]V) + for i, val := range listY { + mapY[i] = val + } + + var outX, outY []V + for i, itemX := range mapX { + for j, itemY := range mapY { + if diff := cmp.Diff(itemX, itemY, opts...); diff == "" { + outX = append(outX, itemX) + outY = append(outY, itemY) + delete(mapX, i) + delete(mapY, j) + } + } + } + + if len(outX) == len(outY) && len(outX) == len(listX) { + return // matches + } + + // dump remainder into the slice so we can generate a useful error + for _, itemX := range mapX { + outX = append(outX, itemX) + } + for _, itemY := range mapY { + outY = append(outY, itemY) + } + + if diff := cmp.Diff(outX, outY, opts...); diff != "" { + t.Fatalf("assertion failed: slices do not have matching elements\n--- expected\n+++ actual\n%v", diff) + } +} From 40c0519d4614a74b1a6dc8ab596179892cfd9352 Mon Sep 17 00:00:00 2001 From: Krastin Krastev Date: Mon, 18 Jul 2022 13:44:50 +0300 Subject: [PATCH 026/107] docs: clean-up expanded service def --- .../connect/registration/sidecar-service.mdx | 46 ++++++++++--------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/website/content/docs/connect/registration/sidecar-service.mdx b/website/content/docs/connect/registration/sidecar-service.mdx index a795d61e1..7021265bc 100644 --- a/website/content/docs/connect/registration/sidecar-service.mdx +++ b/website/content/docs/connect/registration/sidecar-service.mdx @@ -48,32 +48,34 @@ definitions: ```json { - "service": { - "name": "web", - "port": 8080 - } -} -{ - "name": "web-sidecar-proxy", - "port": 20000, - "kind": "connect-proxy", - "checks": [ + "services": [ { - "Name": "Connect Sidecar Listening", - "TCP": "127.0.0.1:20000", - "Interval": "10s" + "name": "web", + "port": 8080 }, { - "name": "Connect Sidecar Aliasing web", - "alias_service": "web" + "name": "web-sidecar-proxy", + "port": 20000, + "kind": "connect-proxy", + "checks": [ + { + "Name": "Connect Sidecar Listening", + "TCP": "127.0.0.1:20000", + "Interval": "10s" + }, + { + "name": "Connect Sidecar Aliasing web", + "alias_service": "web" + } + ], + "proxy": { + "destination_service_name": "web", + "destination_service_id": "web", + "local_service_address": "127.0.0.1", + "local_service_port": 8080 + } } - ], - "proxy": { - "destination_service_name": "web", - "destination_service_id": "web", - "local_service_address": "127.0.0.1", - "local_service_port": 8080, - } + ] } ``` From b3488abc1154c9a36dc9164536137053bd3e8469 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Mon, 18 Jul 2022 14:22:17 +0200 Subject: [PATCH 027/107] ui: wan federation message dc-dropdown (#13753) * Only display dc dropdown when more than one dc is available * Add wan federation message to dc dropdown * Add test for conditionally displaying dc dropdown * Move single datacenter indicator into datacenter selector * Add `DATACENTERS` seperator dc dropdown * "fix" unnecessary margin-top in dc dropdown --- .../consul/datacenter/selector/index.hbs | 97 +++++++++++-------- .../app/components/hashicorp-consul/index.hbs | 12 +-- .../components/hashicorp-consul/index.scss | 11 +++ .../components/main-nav-vertical/layout.scss | 2 +- .../consul/datacenter/selector-test.js | 49 ++++++++++ .../components/hashicorp-consul-test.js | 3 +- 6 files changed, 123 insertions(+), 51 deletions(-) create mode 100644 ui/packages/consul-ui/tests/integration/components/consul/datacenter/selector-test.js diff --git a/ui/packages/consul-ui/app/components/consul/datacenter/selector/index.hbs b/ui/packages/consul-ui/app/components/consul/datacenter/selector/index.hbs index 97194bf29..f54b4606b 100644 --- a/ui/packages/consul-ui/app/components/consul/datacenter/selector/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/datacenter/selector/index.hbs @@ -2,49 +2,60 @@ class="dcs" data-test-datacenter-menu > - - - {{@dc.Name}} - - - - - {{#each menu.items as |item|}} - - + + {{@dc.Name}} + + + +

+ Datacenters shown in this dropdown are available through WAN Federation. +

+ + + DATACENTERS + + {{#each menu.items as |item|}} + - {{item.Name}} - {{#if item.Primary}} - Primary - {{/if}} - {{#if item.Local}} - Local - {{/if}} -
-
- {{/each}} -
-
-
+ + {{item.Name}} + {{#if item.Primary}} + Primary + {{/if}} + {{#if item.Local}} + Local + {{/if}} + + + {{/each}} + + + + {{else}} +
  • {{@dcs.firstObject.Name}}
  • + {{/if}} diff --git a/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs b/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs index 07296d4fe..4d7a040ff 100644 --- a/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs +++ b/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs @@ -87,12 +87,12 @@ <:main-nav>
      - + ul > [role='separator'] { margin-top: 0.7rem; padding-bottom: 0; } diff --git a/ui/packages/consul-ui/tests/integration/components/consul/datacenter/selector-test.js b/ui/packages/consul-ui/tests/integration/components/consul/datacenter/selector-test.js new file mode 100644 index 000000000..1a42372ce --- /dev/null +++ b/ui/packages/consul-ui/tests/integration/components/consul/datacenter/selector-test.js @@ -0,0 +1,49 @@ +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; +import { render } from '@ember/test-helpers'; + +module('Integration | Component | consul datacenter selector', function(hooks) { + setupRenderingTest(hooks); + + test('it does not display a dropdown when only one dc is available', async function(assert) { + const dcs = [ + { + Name: 'dc-1', + }, + ]; + this.set('dcs', dcs); + this.set('dc', dcs[0]); + + await render(hbs``); + + assert + .dom('[data-test-datacenter-disclosure-menu]') + .doesNotExist('datacenter dropdown is not displayed in nav'); + + assert + .dom('[data-test-datacenter-single]') + .hasText('dc-1', 'Datecenter name is displayed in nav'); + }); + + test('it does displays a dropdown when more than one dc is available', async function(assert) { + const dcs = [ + { + Name: 'dc-1', + }, + { + Name: 'dc-2', + }, + ]; + this.set('dcs', dcs); + this.set('dc', dcs[0]); + + await render(hbs``); + + assert + .dom('[data-test-datacenter-single]') + .doesNotExist('we are displaying more than just the name of the first dc'); + + assert.dom('[data-test-datacenter-disclosure-menu]').exists('datacenter dropdown is displayed'); + }); +}); diff --git a/ui/packages/consul-ui/tests/integration/components/hashicorp-consul-test.js b/ui/packages/consul-ui/tests/integration/components/hashicorp-consul-test.js index 2f611c552..58a75c94b 100644 --- a/ui/packages/consul-ui/tests/integration/components/hashicorp-consul-test.js +++ b/ui/packages/consul-ui/tests/integration/components/hashicorp-consul-test.js @@ -1,6 +1,7 @@ -import { module, skip } from 'qunit'; +import { module, skip, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import hbs from 'htmlbars-inline-precompile'; +import { render } from '@ember/test-helpers'; module('Integration | Component | hashicorp consul', function(hooks) { setupRenderingTest(hooks); From c474a369f6b75b24027b76a5744eb14d474dd6ad Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 18 Jul 2022 15:30:37 +0100 Subject: [PATCH 028/107] ui: Add a modal.opened property for inspecting whether the modal is open (#13723) * ui: Add a modal.opened property for inspecting whether the modal is open * merge isOpen setting into the exiting event handler * Revert to multiple listeners, plus comment to explain * Wrap close in an afterRender --- .../app/components/modal-dialog/README.mdx | 1 + .../app/components/modal-dialog/index.hbs | 3 +++ .../app/components/modal-dialog/index.js | 15 ++++++++++++--- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/ui/packages/consul-ui/app/components/modal-dialog/README.mdx b/ui/packages/consul-ui/app/components/modal-dialog/README.mdx index c249255c0..8b414da5d 100644 --- a/ui/packages/consul-ui/app/components/modal-dialog/README.mdx +++ b/ui/packages/consul-ui/app/components/modal-dialog/README.mdx @@ -70,5 +70,6 @@ Then all modals will be rendered into the ``. | --- | --- | --- | | `open` | `Function` | Opens the modal dialog | | `close` | `Function` | Closes the modal dialog | +| `opened` | `boolean` | Whether the modal is currently open or not | diff --git a/ui/packages/consul-ui/app/components/modal-dialog/index.hbs b/ui/packages/consul-ui/app/components/modal-dialog/index.hbs index 9ad3cd6f3..6cb2f944f 100644 --- a/ui/packages/consul-ui/app/components/modal-dialog/index.hbs +++ b/ui/packages/consul-ui/app/components/modal-dialog/index.hbs @@ -30,6 +30,7 @@ {{yield (hash open=(action "open") close=(action "close") + opened=this.isOpen aria=aria )}} @@ -39,6 +40,7 @@ {{yield (hash open=(action "open") close=(action "close") + opened=this.isOpen aria=aria )}} @@ -48,6 +50,7 @@ {{yield (hash open=(action "open") close=(action "close") + opened=this.isOpen aria=aria )}} diff --git a/ui/packages/consul-ui/app/components/modal-dialog/index.js b/ui/packages/consul-ui/app/components/modal-dialog/index.js index c7e1776fe..c9025fc7d 100644 --- a/ui/packages/consul-ui/app/components/modal-dialog/index.js +++ b/ui/packages/consul-ui/app/components/modal-dialog/index.js @@ -1,18 +1,27 @@ import Component from '@ember/component'; +import { set } from '@ember/object'; import Slotted from 'block-slots'; import A11yDialog from 'a11y-dialog'; +import { schedule } from '@ember/runloop'; export default Component.extend(Slotted, { tagName: '', onclose: function() {}, onopen: function() {}, + isOpen: false, actions: { connect: function($el) { this.dialog = new A11yDialog($el); - this.dialog.on('hide', () => this.onclose({ target: $el })); - this.dialog.on('show', () => this.onopen({ target: $el })); + this.dialog.on('hide', () => { + schedule('afterRender', _ => set(this, 'isOpen', false)); + this.onclose({ target: $el }) + }); + this.dialog.on('show', () => { + set(this, 'isOpen', true) + this.onopen({ target: $el }) + }); if (this.open) { - this.dialog.show(); + this.actions.open.apply(this, []); } }, disconnect: function($el) { From 539abdf60f410cca94ad45536259609453cf404c Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 18 Jul 2022 17:39:22 +0100 Subject: [PATCH 029/107] ui: Adds Peer initiation form (#13754) --- .../consul/peer/form/initiate/README.mdx | 26 +++++++++ .../peer/form/initiate/actions/index.hbs | 7 +++ .../peer/form/initiate/fieldsets/index.hbs | 53 +++++++++++++++++++ .../consul/peer/form/initiate/index.hbs | 41 ++++++++++++++ .../consul-ui/translations/common/en-us.yaml | 9 ++++ 5 files changed, 136 insertions(+) create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/initiate/README.mdx create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/initiate/actions/index.hbs create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/initiate/fieldsets/index.hbs create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/initiate/index.hbs diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/README.mdx b/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/README.mdx new file mode 100644 index 000000000..014456936 --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/README.mdx @@ -0,0 +1,26 @@ +# Consul::Peer::Form::Initiate + +```hbs preview-template + +{{#if source.data}} + + + + +{{/if}} + +``` diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/actions/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/actions/index.hbs new file mode 100644 index 000000000..555ddf3b0 --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/actions/index.hbs @@ -0,0 +1,7 @@ + + Add peer + diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/fieldsets/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/fieldsets/index.hbs new file mode 100644 index 000000000..f0f2fd5eb --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/fieldsets/index.hbs @@ -0,0 +1,53 @@ +
      + {{#let + (hash + help='Enter a name to locally identify the new peer.' + Name=(array + (hash + test=(t 'common.validations.dns-hostname.test') + error=(t 'common.validations.dns-hostname.error' name="Name") + ) + ) + ) + + (hash + help='Enter the token received from the operator of the desired peer.' + PeeringToken=(array) + ) + + as |Name PeeringToken|}} +

      + Enter a token generated in the desired peer. +

      + + +
      + + + {{yield (hash + valid=(not (state-matches fsm.state 'error')) + )}} +
      + +
      + {{/let}} +
      diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/index.hbs new file mode 100644 index 000000000..a0b1294bf --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/initiate/index.hbs @@ -0,0 +1,41 @@ +
      + + +{{#let + (unique-id) +as |id|}} +
      + {{yield (hash + Fieldsets=(component "consul/peer/form/initiate/fieldsets" + item=@item + ) + Actions=(component "consul/peer/form/initiate/actions" + item=@item + id=id + ) + )}} +
      +{{/let}} +
      +
      +
      diff --git a/ui/packages/consul-ui/translations/common/en-us.yaml b/ui/packages/consul-ui/translations/common/en-us.yaml index d45545c3c..a096b0980 100644 --- a/ui/packages/consul-ui/translations/common/en-us.yaml +++ b/ui/packages/consul-ui/translations/common/en-us.yaml @@ -72,3 +72,12 @@ sort: status: asc: Unhealthy to Healthy desc: Healthy to Unhealthy +validations: + dns-hostname: + help: | + Must be a valid DNS hostname. Must contain 1-64 characters (numbers, letters, and hyphens), and must begin with a letter. + test: "^[a-zA-Z0-9]([a-zA-Z0-9-]'{0,62}'[a-zA-Z0-9])?$" + error: "{name} must be a valid DNS hostname." + immutable: + help: Once created, this cannot be changed. + From 987b6255fb60ac3005a60d8929964be817b807e0 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 18 Jul 2022 17:39:52 +0100 Subject: [PATCH 030/107] ui: Add peer token generation form (#13755) * ui: Add peer token generation form --- .../consul/peer/form/generate/README.mdx | 28 ++++++++++ .../peer/form/generate/actions/index.hbs | 10 ++++ .../consul/peer/form/generate/chart.xstate.js | 26 +++++++++ .../peer/form/generate/fieldsets/index.hbs | 40 +++++++++++++ .../consul/peer/form/generate/index.hbs | 56 +++++++++++++++++++ 5 files changed, 160 insertions(+) create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/generate/README.mdx create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/generate/actions/index.hbs create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/generate/chart.xstate.js create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/generate/fieldsets/index.hbs create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/generate/index.hbs diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/generate/README.mdx b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/README.mdx new file mode 100644 index 000000000..6b791f391 --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/README.mdx @@ -0,0 +1,28 @@ +# Consul::Peer::Form::Generate + +```hbs preview-template + + +{{#if source.data}} + + + + +{{/if}} + +``` diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/generate/actions/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/actions/index.hbs new file mode 100644 index 000000000..5c7513233 --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/actions/index.hbs @@ -0,0 +1,10 @@ + + Generate token + diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/generate/chart.xstate.js b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/chart.xstate.js new file mode 100644 index 000000000..1c3b43f9d --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/chart.xstate.js @@ -0,0 +1,26 @@ +export default { + id: 'consul-peer-generate-form', + initial: 'idle', + states: { + idle: { + on: { + LOAD: { + target: 'loading' + } + } + }, + loading: { + on: { + SUCCESS: { + target: 'success' + }, + ERROR: { + target: 'error' + } + } + }, + success: { + }, + error: {}, + }, +}; diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/generate/fieldsets/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/fieldsets/index.hbs new file mode 100644 index 000000000..f6fd4fcbc --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/fieldsets/index.hbs @@ -0,0 +1,40 @@ +
      + + {{#let + (hash + help=(concat + (t 'common.validations.dns-hostname.help') + (t 'common.validations.immutable.help') + ) + Name=(array + (hash + test=(t 'common.validations.dns-hostname.test') + error=(t 'common.validations.dns-hostname.error' name="Name") + ) + ) + ) + as |Name|}} +
      + + {{yield (hash + valid=(not (state-matches fsm.state 'error')) + )}} +
      + + {{/let}} +
      +
      diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/generate/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/index.hbs new file mode 100644 index 000000000..204906324 --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/index.hbs @@ -0,0 +1,56 @@ +
      + + + {{#let + (unique-id) + as |id reset|}} +
      + + + {{yield (hash + Fieldsets=(component "consul/peer/form/generate/fieldsets" + item=@item + ) + Actions=(component "consul/peer/form/generate/actions" + item=@item + id=id + ) + )}} + + + + + + + + + +
      + {{/let}} + +
      +
      From 4ff097c4cff75cacf15c1f80ee3d0ef9ec33152a Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Mon, 18 Jul 2022 10:20:04 -0700 Subject: [PATCH 031/107] peering: track exported services (#13784) Signed-off-by: acpana <8968914+acpana@users.noreply.github.com> --- agent/consul/leader_peering_test.go | 47 +++++++++++++----- .../services/peerstream/replication.go | 15 ++++-- .../services/peerstream/stream_resources.go | 2 +- .../services/peerstream/stream_test.go | 48 +++++++++++++++++++ .../services/peerstream/stream_tracker.go | 31 +++++++++++- agent/rpc/peering/service.go | 2 + 6 files changed, 126 insertions(+), 19 deletions(-) diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index aa720bd6b..c21156b42 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -616,7 +616,7 @@ func insertTestPeeringData(t *testing.T, store *state.Store, peer string, lastId } // TODO(peering): once we move away from leader only request for PeeringList, move this test to consul/server_test maybe -func TestLeader_Peering_ImportedServicesCount(t *testing.T) { +func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -747,10 +747,10 @@ func TestLeader_Peering_ImportedServicesCount(t *testing.T) { /// finished adding services type testCase struct { - name string - description string - exportedService structs.ExportedServicesConfigEntry - expectedImportedServicesCount uint64 + name string + description string + exportedService structs.ExportedServicesConfigEntry + expectedImportedExportedServicesCount uint64 // same count for a server that imports the services form a server that exports them } testCases := []testCase{ @@ -770,7 +770,7 @@ func TestLeader_Peering_ImportedServicesCount(t *testing.T) { }, }, }, - expectedImportedServicesCount: 4, // 3 services from above + the "consul" service + expectedImportedExportedServicesCount: 4, // 3 services from above + the "consul" service }, { name: "no sync", @@ -778,7 +778,7 @@ func TestLeader_Peering_ImportedServicesCount(t *testing.T) { exportedService: structs.ExportedServicesConfigEntry{ Name: "default", }, - expectedImportedServicesCount: 0, // we want to see this decremented from 4 --> 0 + expectedImportedExportedServicesCount: 0, // we want to see this decremented from 4 --> 0 }, { name: "just a, b services", @@ -804,7 +804,7 @@ func TestLeader_Peering_ImportedServicesCount(t *testing.T) { }, }, }, - expectedImportedServicesCount: 2, + expectedImportedExportedServicesCount: 2, }, { name: "unexport b service", @@ -822,7 +822,7 @@ func TestLeader_Peering_ImportedServicesCount(t *testing.T) { }, }, }, - expectedImportedServicesCount: 1, + expectedImportedExportedServicesCount: 1, }, { name: "export c service", @@ -848,7 +848,7 @@ func TestLeader_Peering_ImportedServicesCount(t *testing.T) { }, }, }, - expectedImportedServicesCount: 2, + expectedImportedExportedServicesCount: 2, }, } @@ -866,11 +866,34 @@ func TestLeader_Peering_ImportedServicesCount(t *testing.T) { lastIdx++ require.NoError(t, s1.fsm.State().EnsureConfigEntry(lastIdx, &tc.exportedService)) + // Check that imported services count on S2 are what we expect retry.Run(t, func(r *retry.R) { - resp2, err := peeringClient2.PeeringList(ctx, &pbpeering.PeeringListRequest{}) + // on Read + resp, err := peeringClient2.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-s1"}) require.NoError(r, err) + require.NotNil(r, resp.Peering) + require.Equal(r, tc.expectedImportedExportedServicesCount, resp.Peering.ImportedServiceCount) + + // on List + resp2, err2 := peeringClient2.PeeringList(ctx, &pbpeering.PeeringListRequest{}) + require.NoError(r, err2) require.NotEmpty(r, resp2.Peerings) - require.Equal(r, tc.expectedImportedServicesCount, resp2.Peerings[0].ImportedServiceCount) + require.Equal(r, tc.expectedImportedExportedServicesCount, resp2.Peerings[0].ImportedServiceCount) + }) + + // Check that exported services count on S1 are what we expect + retry.Run(t, func(r *retry.R) { + // on Read + resp, err := peeringClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-s2"}) + require.NoError(r, err) + require.NotNil(r, resp.Peering) + require.Equal(r, tc.expectedImportedExportedServicesCount, resp.Peering.ExportedServiceCount) + + // on List + resp2, err2 := peeringClient.PeeringList(ctx, &pbpeering.PeeringListRequest{}) + require.NoError(r, err2) + require.NotEmpty(r, resp2.Peerings) + require.Equal(r, tc.expectedImportedExportedServicesCount, resp2.Peerings[0].ExportedServiceCount) }) }) } diff --git a/agent/grpc-external/services/peerstream/replication.go b/agent/grpc-external/services/peerstream/replication.go index c69d705d3..938605cac 100644 --- a/agent/grpc-external/services/peerstream/replication.go +++ b/agent/grpc-external/services/peerstream/replication.go @@ -36,10 +36,14 @@ import ( // If there are no instances in the event, we consider that to be a de-registration. func makeServiceResponse( logger hclog.Logger, + mst *MutableStatus, update cache.UpdateEvent, ) (*pbpeerstream.ReplicationMessage_Response, error) { + serviceName := strings.TrimPrefix(update.CorrelationID, subExportedService) + sn := structs.ServiceNameFromString(serviceName) csn, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) if !ok { + logger.Error("did not increment or decrement exported services count", "service_name", serviceName) return nil, fmt.Errorf("invalid type for service response: %T", update.Result) } @@ -51,9 +55,6 @@ func makeServiceResponse( if err != nil { return nil, fmt.Errorf("failed to marshal: %w", err) } - - serviceName := strings.TrimPrefix(update.CorrelationID, subExportedService) - // If no nodes are present then it's due to one of: // 1. The service is newly registered or exported and yielded a transient empty update. // 2. All instances of the service were de-registered. @@ -61,7 +62,10 @@ func makeServiceResponse( // // We don't distinguish when these three things occurred, but it's safe to send a DELETE Op in all cases, so we do that. // Case #1 is a no-op for the importing peer. - if len(export.Nodes) == 0 { + if len(csn.Nodes) == 0 { + logger.Trace("decrementing exported services count", "service_name", sn.String()) + mst.RemoveExportedService(sn) + return &pbpeerstream.ReplicationMessage_Response{ ResourceURL: pbpeerstream.TypeURLExportedService, // TODO(peering): Nonce management @@ -71,6 +75,9 @@ func makeServiceResponse( }, nil } + logger.Trace("incrementing exported services count", "service_name", sn.String()) + mst.TrackExportedService(sn) + // If there are nodes in the response, we push them as an UPSERT operation. return &pbpeerstream.ReplicationMessage_Response{ ResourceURL: pbpeerstream.TypeURLExportedService, diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index 26d5a7b00..c67f7da04 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -424,7 +424,7 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { var resp *pbpeerstream.ReplicationMessage_Response switch { case strings.HasPrefix(update.CorrelationID, subExportedService): - resp, err = makeServiceResponse(logger, update) + resp, err = makeServiceResponse(logger, status, update) if err != nil { // Log the error and skip this response to avoid locking up peering due to a bad update event. logger.Error("failed to create service response", "error", err) diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index 1e3117ecc..d5f9e2c36 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -22,6 +22,7 @@ import ( "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/stream" @@ -1006,6 +1007,53 @@ func (b *testStreamBackend) CatalogDeregister(req *structs.DeregisterRequest) er return nil } +func Test_makeServiceResponse_ExportedServicesCount(t *testing.T) { + peerName := "billing" + peerID := "1fabcd52-1d46-49b0-b1d8-71559aee47f5" + + srv, store := newTestServer(t, nil) + require.NoError(t, store.PeeringWrite(31, &pbpeering.Peering{ + ID: peerID, + Name: peerName}, + )) + + // connect the stream + mst, err := srv.Tracker.Connected(peerID) + require.NoError(t, err) + + testutil.RunStep(t, "simulate an update to export a service", func(t *testing.T) { + update := cache.UpdateEvent{ + CorrelationID: subExportedService + "api", + Result: &pbservice.IndexedCheckServiceNodes{ + Nodes: []*pbservice.CheckServiceNode{ + { + Service: &pbservice.NodeService{ + ID: "api-1", + Service: "api", + PeerName: peerName, + }, + }, + }, + }} + _, err := makeServiceResponse(srv.Logger, mst, update) + require.NoError(t, err) + + require.Equal(t, 1, mst.GetExportedServicesCount()) + }) + + testutil.RunStep(t, "simulate a delete for an exported service", func(t *testing.T) { + update := cache.UpdateEvent{ + CorrelationID: subExportedService + "api", + Result: &pbservice.IndexedCheckServiceNodes{ + Nodes: []*pbservice.CheckServiceNode{}, + }} + _, err := makeServiceResponse(srv.Logger, mst, update) + require.NoError(t, err) + + require.Equal(t, 0, mst.GetExportedServicesCount()) + }) +} + func Test_processResponse_Validation(t *testing.T) { peerName := "billing" peerID := "1fabcd52-1d46-49b0-b1d8-71559aee47f5" diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index 4244bbe09..40d8ecb63 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -166,9 +166,11 @@ type Status struct { // - The last error message when receiving from the stream. LastReceiveErrorMessage string - // TODO(peering): consider keeping track of imported service counts thru raft - // ImportedServices is set that keeps track of which service names are imported for the peer + // TODO(peering): consider keeping track of imported and exported services thru raft + // ImportedServices keeps track of which service names are imported for the peer ImportedServices map[string]struct{} + // ExportedServices keeps track of which service names a peer asks to export + ExportedServices map[string]struct{} } func newMutableStatus(now func() time.Time, connected bool) *MutableStatus { @@ -274,3 +276,28 @@ func (s *MutableStatus) GetImportedServicesCount() int { return len(s.ImportedServices) } + +func (s *MutableStatus) RemoveExportedService(sn structs.ServiceName) { + s.mu.Lock() + defer s.mu.Unlock() + + delete(s.ExportedServices, sn.String()) +} + +func (s *MutableStatus) TrackExportedService(sn structs.ServiceName) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.ExportedServices == nil { + s.ExportedServices = make(map[string]struct{}) + } + + s.ExportedServices[sn.String()] = struct{}{} +} + +func (s *MutableStatus) GetExportedServicesCount() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return len(s.ExportedServices) +} diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 47e39c2b7..845818eb2 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -346,6 +346,7 @@ func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequ s.Logger.Trace("did not find peer in stream tracker when reading peer", "peerID", peering.ID) } else { cp.ImportedServiceCount = uint64(len(st.ImportedServices)) + cp.ExportedServiceCount = uint64(len(st.ExportedServices)) } return &pbpeering.PeeringReadResponse{Peering: cp}, nil @@ -386,6 +387,7 @@ func (s *Server) PeeringList(ctx context.Context, req *pbpeering.PeeringListRequ s.Logger.Trace("did not find peer in stream tracker when listing peers", "peerID", p.ID) } else { cp.ImportedServiceCount = uint64(len(st.ImportedServices)) + cp.ExportedServiceCount = uint64(len(st.ExportedServices)) } cPeerings = append(cPeerings, cp) From 84b458149fad88e21d1cace0660cc6a29ee9d683 Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Mon, 18 Jul 2022 10:34:59 -0700 Subject: [PATCH 032/107] fix leader annotation (#13786) Signed-off-by: acpana <8968914+acpana@users.noreply.github.com> --- internal/tools/proto-gen-rpc-glue/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/tools/proto-gen-rpc-glue/main.go b/internal/tools/proto-gen-rpc-glue/main.go index ec1f2d93c..22e1f8980 100644 --- a/internal/tools/proto-gen-rpc-glue/main.go +++ b/internal/tools/proto-gen-rpc-glue/main.go @@ -109,7 +109,7 @@ func processFile(path string) error { log.Printf(" ReadTODO from %s", ann.ReadTODO) } if ann.LeaderReadTODO != "" { - log.Printf(" LeaderReadTODO from %s", ann.ReadTODO) + log.Printf(" LeaderReadTODO from %s", ann.LeaderReadTODO) } if ann.WriteTODO != "" { log.Printf(" WriteTODO from %s", ann.WriteTODO) @@ -161,7 +161,7 @@ var _ time.Month buf.WriteString(fmt.Sprintf(tmplDatacenter, typ.Name, typ.Annotation.Datacenter)) } if typ.Annotation.LeaderReadTODO != "" { - buf.WriteString(fmt.Sprintf(tmplLeaderOnlyReadTODO, typ.Name, typ.Annotation.ReadTODO)) + buf.WriteString(fmt.Sprintf(tmplLeaderOnlyReadTODO, typ.Name, typ.Annotation.LeaderReadTODO)) } if typ.Annotation.ReadTODO != "" { buf.WriteString(fmt.Sprintf(tmplReadTODO, typ.Name, typ.Annotation.ReadTODO)) From 47c3a92711c70b142610974f819bec7f29cf5979 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Fri, 15 Jul 2022 16:05:53 -0700 Subject: [PATCH 033/107] Fix panic on acl token read with -self and -expanded --- .changelog/13787.txt | 3 +++ command/acl/token/read/token_read.go | 7 +++++++ 2 files changed, 10 insertions(+) create mode 100644 .changelog/13787.txt diff --git a/.changelog/13787.txt b/.changelog/13787.txt new file mode 100644 index 000000000..0682d70c4 --- /dev/null +++ b/.changelog/13787.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: when `acl token read` is used with the `-self` and `-expanded` flags, return an error instead of panicking +``` diff --git a/command/acl/token/read/token_read.go b/command/acl/token/read/token_read.go index 4e66d9ea7..e5a3b87b0 100644 --- a/command/acl/token/read/token_read.go +++ b/command/acl/token/read/token_read.go @@ -92,6 +92,13 @@ func (c *cmd) Run(args []string) int { return 1 } } else { + // TODO: consider updating this CLI command and underlying HTTP API endpoint + // to support expanded read of a "self" token, which is a much better user workflow. + if c.expanded { + c.UI.Error("Cannot use both -expanded and -self. Instead, use -expanded and -id=.") + return 1 + } + t, _, err = client.ACL().TokenReadSelf(nil) if err != nil { c.UI.Error(fmt.Sprintf("Error reading token: %v", err)) From eba682fc082b8cc37edc1eeb3a817440d165bc3e Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Mon, 18 Jul 2022 16:12:03 -0700 Subject: [PATCH 034/107] peerstream: set keepalive enforcement to 15s (#13796) The client is set to send keepalive pings every 30s. The server keepalive enforcement must be set to a number less than that, otherwise it will disconnect clients for sending pings too often. MinTime governs the minimum amount of time between pings. --- agent/grpc-external/server.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/agent/grpc-external/server.go b/agent/grpc-external/server.go index 606dba642..751cca91c 100644 --- a/agent/grpc-external/server.go +++ b/agent/grpc-external/server.go @@ -5,6 +5,8 @@ import ( recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "time" agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware" "github.com/hashicorp/consul/tlsutil" @@ -25,6 +27,12 @@ func NewServer(logger agentmiddleware.Logger, tls *tlsutil.Configurator) *grpc.S // Add middlware interceptors to recover in case of panics. recovery.StreamServerInterceptor(recoveryOpts...), ), + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + // This must be less than the keealive.ClientParameters Time setting, otherwise + // the server will disconnect the client for sending too many keepalive pings. + // Currently the client param is set to 30s. + MinTime: 15 * time.Second, + }), } if tls != nil && tls.GRPCTLSConfigured() { creds := credentials.NewTLS(tls.IncomingGRPCConfig()) From 7bfe4dd02078bd7facb751d46f52305c7b6916f1 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Tue, 19 Jul 2022 15:06:11 +0200 Subject: [PATCH 035/107] ui: chore - fix CI test-suite (#13799) * fix linting issue * Update datacenter selector page-object to not include separator. * change non-valid li to div for singe dc name --- .../app/components/consul/datacenter/selector/index.hbs | 5 ++++- .../consul-ui/app/components/hashicorp-consul/pageobject.js | 2 +- .../tests/integration/components/hashicorp-consul-test.js | 3 +-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/ui/packages/consul-ui/app/components/consul/datacenter/selector/index.hbs b/ui/packages/consul-ui/app/components/consul/datacenter/selector/index.hbs index f54b4606b..be1c33da4 100644 --- a/ui/packages/consul-ui/app/components/consul/datacenter/selector/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/datacenter/selector/index.hbs @@ -27,6 +27,7 @@ {{#each menu.items as |item|}} {{else}} -
    • {{@dcs.firstObject.Name}}
    • +
      + {{@dcs.firstObject.Name}} +
      {{/if}} diff --git a/ui/packages/consul-ui/app/components/hashicorp-consul/pageobject.js b/ui/packages/consul-ui/app/components/hashicorp-consul/pageobject.js index aa0e1d572..3bdada40e 100644 --- a/ui/packages/consul-ui/app/components/hashicorp-consul/pageobject.js +++ b/ui/packages/consul-ui/app/components/hashicorp-consul/pageobject.js @@ -50,7 +50,7 @@ export default (collection, clickable, attribute, is, authForm, emptyState) => s ':checked', '[data-test-nspace-menu] > input[type="checkbox"]' ); - page.navigation.dcs = collection('[data-test-datacenter-menu] li', { + page.navigation.dcs = collection('[data-test-datacenter-menu] [data-test-dc-item]', { name: clickable('a'), }); return page; diff --git a/ui/packages/consul-ui/tests/integration/components/hashicorp-consul-test.js b/ui/packages/consul-ui/tests/integration/components/hashicorp-consul-test.js index 58a75c94b..2f611c552 100644 --- a/ui/packages/consul-ui/tests/integration/components/hashicorp-consul-test.js +++ b/ui/packages/consul-ui/tests/integration/components/hashicorp-consul-test.js @@ -1,7 +1,6 @@ -import { module, skip, test } from 'qunit'; +import { module, skip } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import hbs from 'htmlbars-inline-precompile'; -import { render } from '@ember/test-helpers'; module('Integration | Component | hashicorp consul', function(hooks) { setupRenderingTest(hooks); From a0640aa69699bdc44df8129a9eb49b014bbd1969 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Tue, 19 Jul 2022 09:16:24 -0700 Subject: [PATCH 036/107] makefile: give better error for tool installed by wrong package (#13797) I had protoc-gen-go installed through `google.golang.org/protobuf` instead of `github.com/golang/protobuf` and `make proto` was failing silently. This change will ensure you get an error: ``` protoc-gen-go is already installed by module "google.golang.org/protobuf" but should be installed by module "github.com/golang/protobuf". Delete it and re-run to re-install. ``` --- build-support/scripts/devtools.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/build-support/scripts/devtools.sh b/build-support/scripts/devtools.sh index 1d2078abc..7e1521502 100755 --- a/build-support/scripts/devtools.sh +++ b/build-support/scripts/devtools.sh @@ -188,8 +188,14 @@ function install_versioned_tool { fi if command -v "${command}" &>/dev/null ; then - got="$(go version -m $(which "${command}") | grep '\bmod\b' | grep "${module}" | - awk '{print $2 "@" $3}')" + mod_line="$(go version -m "$(which "${command}")" | grep '\smod\s')" + act_mod=$(echo "${mod_line}" | awk '{print $2}') + if [[ "$module" != "$act_mod" ]]; then + err "${command} is already installed by module \"${act_mod}\" but should be installed by module \"${module}\". Delete it and re-run to re-install." + return 1 + fi + + got="$(echo "${mod_line}" | grep "${module}" | awk '{print $2 "@" $3}')" if [[ "$expect" != "$got" ]]; then should_install=1 install_reason="upgrade" From 6b653fb82784e3ba4e55d13a6fc5b210b8d5a4ac Mon Sep 17 00:00:00 2001 From: Ranjandas Date: Wed, 20 Jul 2022 02:45:41 +1000 Subject: [PATCH 037/107] Update Single DC Multi K8S doc (#13278) * Updated note with details of various K8S CNI options Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- .../single-dc-multi-k8s.mdx | 109 ++++++++++-------- 1 file changed, 59 insertions(+), 50 deletions(-) diff --git a/website/content/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s.mdx b/website/content/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s.mdx index b6c680cb2..d27c23fed 100644 --- a/website/content/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s.mdx +++ b/website/content/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s.mdx @@ -6,15 +6,22 @@ description: Single Consul Datacenter deployed in multiple Kubernetes clusters # Single Consul Datacenter in Multiple Kubernetes Clusters --> Requires consul-helm v0.32.1 or higher. +This page describes deploying a single Consul datacenter in multiple Kubernetes clusters, +with servers and clients running in one cluster and only clients in the rest of the clusters. +This example uses two Kubernetes clusters, but this approach could be extended to using more than two. -This page describes how to deploy a single Consul datacenter in multiple Kubernetes clusters, -with both servers and clients running in one cluster, and only clients running in the rest of the clusters. -In this example, we will use two Kubernetes clusters, but this approach could be extended to using more than two. +## Requirements + +* Consul-Helm version `v0.32.1` or higher +* This deployment topology requires that the Kubernetes clusters have a flat network +for both pods and nodes so that pods or nodes from one cluster can connect +to pods or nodes in another. In many hosted Kubernetes environments, this may have to be explicitly configured based on the hosting provider's network. Refer to the following documentation for instructions: + * [Azure AKS CNI](https://docs.microsoft.com/en-us/azure/aks/concepts-network#azure-cni-advanced-networking) + * [AWS EKS CNI](https://docs.aws.amazon.com/eks/latest/userguide/pod-networking.html) + * [GKE VPC-native clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips). + +If a flat network is unavailable across all Kubernetes clusters, follow the instructions for using [Admin Partitions](/docs/enterprise/admin-partitions), which is a Consul Enterprise feature. -~> **Note:** This deployment topology requires that your Kubernetes clusters have a flat network -for both pods and nodes, so that pods or nodes from one cluster can connect -to pods or nodes in another. If a flat network is not available across all Kubernetes clusters, follow the instructions for using [Admin Partitions](/docs/enterprise/admin-partitions), which is a Consul Enterprise feature. ## Prepare Helm release name ahead of installs @@ -23,7 +30,7 @@ The Helm chart uses the Helm release name as a prefix for the ACL resources that it creates, such as tokens and auth methods. If the names of the Helm releases are identical, subsequent Consul on Kubernetes clusters overwrite existing ACL resources and cause the clusters to fail. -Before you proceed with installation, prepare the Helm release names as environment variables for both the server and client installs to use. +Before proceeding with installation, prepare the Helm release names as environment variables for both the server and client install. ```shell-session $ export HELM_RELEASE_SERVER=server @@ -34,8 +41,7 @@ Before you proceed with installation, prepare the Helm release names as environm ## Deploying Consul servers and clients in the first cluster -First, we will deploy the Consul servers with Consul clients in the first cluster. -For that, we will use the following Helm configuration: +First, deploy the first cluster with Consul Servers and Clients with the example Helm configuration below. @@ -61,30 +67,30 @@ ui: -Note that we are deploying in a secure configuration, with gossip encryption, -TLS for all components, and ACLs. We are enabling the Consul Service Mesh and the controller for CRDs -so that we can use them to later verify that our services can connect with each other across clusters. +Note that this will deploy a secure configuration with gossip encryption, +TLS for all components and ACLs. In addition, this will enable the Consul Service Mesh and the controller for CRDs +that can be used later to verify the connectivity of services across clusters. -We're also setting UI's service type to be `NodePort`. -This is needed so that we can connect to servers from another cluster without using the pod IPs of the servers, +The UI's service type is set to be `NodePort`. +This is needed to connect to servers from another cluster without using the pod IPs of the servers, which are likely going to change. -To deploy, first we need to generate the Gossip encryption key and save it as a Kubernetes secret. +To deploy, first generate the Gossip encryption key and save it as a Kubernetes secret. ```shell $ kubectl create secret generic consul-gossip-encryption-key --from-literal=key=$(consul keygen) ``` -Now we can install our Consul cluster with Helm: +Now install Consul cluster with Helm: ```shell-session $ helm install ${HELM_RELEASE_SERVER} --values cluster1-config.yaml hashicorp/consul ``` -Once the installation finishes and all components are running and ready, -we need to extract the gossip encryption key we've created, the CA certificate -and the ACL bootstrap token generated during installation, -so that we can apply them to our second Kubernetes cluster. +Once the installation finishes and all components are running and ready, the following information needs to be extracted (using the below command) and applied to the second Kubernetes cluster. + * The Gossip encryption key created + * The CA certificate generated during installation + * The ACL bootstrap token generated during installation ```shell-session $ kubectl get secret consul-gossip-encryption-key ${HELM_RELEASE_SERVER}-consul-ca-cert ${HELM_RELEASE_SERVER}-consul-bootstrap-acl-token --output yaml > cluster1-credentials.yaml @@ -93,15 +99,19 @@ $ kubectl get secret consul-gossip-encryption-key ${HELM_RELEASE_SERVER}-consul- ## Deploying Consul clients in the second cluster ~> **Note:** If multiple Kubernetes clusters will be joined to the Consul Datacenter, then the following instructions will need to be repeated for each additional Kubernetes cluster. -Now we can switch to the second Kubernetes cluster where we will deploy only the Consul clients +Switch to the second Kubernetes cluster where Consul clients will be deployed that will join the first Consul cluster. -First, we need to apply credentials we've extracted from the first cluster to the second cluster: +```shell-session +$ kubectl config use-context +``` + +First, apply the credentials extracted from the first cluster to the second cluster: ```shell-session $ kubectl apply --filename cluster1-credentials.yaml ``` -To deploy in the second cluster, we will use the following Helm configuration: +To deploy in the second cluster, the following example Helm configuration will be used: @@ -145,14 +155,12 @@ connectInject: -Note that we're referencing secrets from the first cluster in ACL, gossip, and TLS configuration. - -Next, we need to set up the `externalServers` configuration. +Note the references to the secrets extracted and applied from the first cluster in ACL, gossip, and TLS configuration. The `externalServers.hosts` and `externalServers.httpsPort` refer to the IP and port of the UI's NodePort service deployed in the first cluster. Set the `externalServers.hosts` to any Node IP of the first cluster, -which you can see by running `kubectl get nodes --output wide`. +which can be seen by running `kubectl get nodes --output wide`. Set `externalServers.httpsPort` to the `nodePort` of the `cluster1-consul-ui` service. In our example, the port is `31557`. @@ -162,37 +170,37 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE cluster1-consul-ui NodePort 10.0.240.80 443:31557/TCP 40h ``` -We set the `externalServer.tlsServerName` to `server.dc1.consul`. This the DNS SAN +Set the `externalServer.tlsServerName` to `server.dc1.consul`. This the DNS SAN (Subject Alternative Name) that is present in the Consul server's certificate. -We need to set it because we're connecting to the Consul servers over the node IP, +This is required because the connection to the Consul servers uses the node IP, but that IP isn't present in the server's certificate. -To make sure that the hostname verification succeeds during the TLS handshake, we need to set the TLS +To make sure that the hostname verification succeeds during the TLS handshake, set the TLS server name to a DNS name that *is* present in the certificate. -Next, we need to set `externalServers.k8sAuthMethodHost` to the address of the second Kubernetes API server. -This should be the address that is reachable from the first cluster, and so it cannot be the internal DNS +Next, set `externalServers.k8sAuthMethodHost` to the address of the second Kubernetes API server. +This should be the address that is reachable from the first cluster, so it cannot be the internal DNS available in each Kubernetes cluster. Consul needs it so that `consul login` with the Kubernetes auth method will work from the second cluster. More specifically, the Consul server will need to perform the verification of the Kubernetes service account -whenever `consul login` is called, and to verify service accounts from the second cluster it needs to +whenever `consul login` is called, and to verify service accounts from the second cluster, it needs to reach the Kubernetes API in that cluster. -The easiest way to get it is to set it from your `kubeconfig` by running `kubectl config view` and grabbing +The easiest way to get it is from the `kubeconfig` by running `kubectl config view` and grabbing the value of `cluster.server` for the second cluster. -Lastly, we need to set up the clients so that they can discover the servers in the first cluster. -For this, we will use Consul's cloud auto-join feature -for the [Kubernetes provider](/docs/install/cloud-auto-join#kubernetes-k8s). -To use it we need to provide a way for the Consul clients to reach the first Kubernetes cluster. -To do that, we need to save the `kubeconfig` for the first cluster as a Kubernetes secret in the second cluster -and reference it in the `clients.join` value. Note that we're making that secret available to the client pods +Lastly, set up the clients so that they can discover the servers in the first cluster. +For this, Consul's cloud auto-join feature +for the [Kubernetes provider](/docs/install/cloud-auto-join#kubernetes-k8s) can be used. + +This can be configured by saving the `kubeconfig` for the first cluster as a Kubernetes secret in the second cluster +and referencing it in the `clients.join` value. Note that the secret is made available to the client pods by setting it in `client.extraVolumes`. -~> **Note:** The kubeconfig you're providing to the client should have minimal permissions. +~> **Note:** The kubeconfig provided to the client should have minimal permissions. The cloud auto-join provider will only need permission to read pods. Please see [Kubernetes Cloud auto-join](/docs/install/cloud-auto-join#kubernetes-k8s) for more details. -Now we're ready to install! +Now, proceed with the installation of the second cluster. ```shell-session $ helm install ${HELM_RELEASE_CLIENT} --values cluster2-config.yaml hashicorp/consul @@ -200,12 +208,11 @@ $ helm install ${HELM_RELEASE_CLIENT} --values cluster2-config.yaml hashicorp/co ## Verifying the Consul Service Mesh works -~> When Transparent proxy is enabled, services in one Kubernetes cluster that need to communicate with a service in another Kubernetes cluster must have a explicit upstream configured through the ["consul.hashicorp.com/connect-service-upstreams"](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) annotation. +~> When Transparent proxy is enabled, services in one Kubernetes cluster that need to communicate with a service in another Kubernetes cluster must have an explicit upstream configured through the ["consul.hashicorp.com/connect-service-upstreams"](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) annotation. -Now that we have our Consul cluster in multiple k8s clusters up and running, we will -deploy two services and verify that they can connect to each other. +Now that the Consul cluster spanning across multiple k8s clusters is up and running, deploy two services in separate k8s clusters and verify that they can connect to each other. -First, we'll deploy `static-server` service in the first cluster: +First, deploy `static-server` service in the first cluster: @@ -271,9 +278,9 @@ spec: -Note that we're defining a Service intention so that our services are allowed to talk to each other. +Note that defining a Service intention is required so that our services are allowed to talk to each other. -Then we'll deploy `static-client` in the second cluster with the following configuration: +Next, deploy `static-client` in the second cluster with the following configuration: @@ -321,9 +328,11 @@ spec: -Once both services are up and running, we can connect to the `static-server` from `static-client`: +Once both services are up and running, try connecting to the `static-server` from `static-client`: ```shell-session $ kubectl exec deploy/static-client -- curl --silent localhost:1234 "hello world" ``` + +A successful installation would return `hello world` for the above curl command output. From 64b3705a3181be14c176f437f70edc0c0ffd2dd2 Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Tue, 19 Jul 2022 11:43:29 -0700 Subject: [PATCH 038/107] peering: refactor reconcile, cleanup (#13795) Signed-off-by: acpana <8968914+acpana@users.noreply.github.com> --- agent/consul/leader_peering_test.go | 32 +++++--- .../services/peerstream/replication.go | 6 -- .../services/peerstream/stream_tracker.go | 8 ++ agent/rpc/peering/service.go | 80 +++++++------------ api/peering_test.go | 4 +- 5 files changed, 59 insertions(+), 71 deletions(-) diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index c21156b42..c3196a54e 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -747,10 +747,11 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { /// finished adding services type testCase struct { - name string - description string - exportedService structs.ExportedServicesConfigEntry - expectedImportedExportedServicesCount uint64 // same count for a server that imports the services form a server that exports them + name string + description string + exportedService structs.ExportedServicesConfigEntry + expectedImportedServsCount uint64 + expectedExportedServsCount uint64 } testCases := []testCase{ @@ -770,7 +771,8 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { }, }, }, - expectedImportedExportedServicesCount: 4, // 3 services from above + the "consul" service + expectedImportedServsCount: 4, // 3 services from above + the "consul" service + expectedExportedServsCount: 4, // 3 services from above + the "consul" service }, { name: "no sync", @@ -778,7 +780,8 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { exportedService: structs.ExportedServicesConfigEntry{ Name: "default", }, - expectedImportedExportedServicesCount: 0, // we want to see this decremented from 4 --> 0 + expectedImportedServsCount: 0, // we want to see this decremented from 4 --> 0 + expectedExportedServsCount: 0, // we want to see this decremented from 4 --> 0 }, { name: "just a, b services", @@ -804,7 +807,8 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { }, }, }, - expectedImportedExportedServicesCount: 2, + expectedImportedServsCount: 2, + expectedExportedServsCount: 2, }, { name: "unexport b service", @@ -822,7 +826,8 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { }, }, }, - expectedImportedExportedServicesCount: 1, + expectedImportedServsCount: 1, + expectedExportedServsCount: 1, }, { name: "export c service", @@ -848,7 +853,8 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { }, }, }, - expectedImportedExportedServicesCount: 2, + expectedImportedServsCount: 2, + expectedExportedServsCount: 2, }, } @@ -872,13 +878,13 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { resp, err := peeringClient2.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-s1"}) require.NoError(r, err) require.NotNil(r, resp.Peering) - require.Equal(r, tc.expectedImportedExportedServicesCount, resp.Peering.ImportedServiceCount) + require.Equal(r, tc.expectedImportedServsCount, resp.Peering.ImportedServiceCount) // on List resp2, err2 := peeringClient2.PeeringList(ctx, &pbpeering.PeeringListRequest{}) require.NoError(r, err2) require.NotEmpty(r, resp2.Peerings) - require.Equal(r, tc.expectedImportedExportedServicesCount, resp2.Peerings[0].ImportedServiceCount) + require.Equal(r, tc.expectedExportedServsCount, resp2.Peerings[0].ImportedServiceCount) }) // Check that exported services count on S1 are what we expect @@ -887,13 +893,13 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { resp, err := peeringClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-s2"}) require.NoError(r, err) require.NotNil(r, resp.Peering) - require.Equal(r, tc.expectedImportedExportedServicesCount, resp.Peering.ExportedServiceCount) + require.Equal(r, tc.expectedImportedServsCount, resp.Peering.ExportedServiceCount) // on List resp2, err2 := peeringClient.PeeringList(ctx, &pbpeering.PeeringListRequest{}) require.NoError(r, err2) require.NotEmpty(r, resp2.Peerings) - require.Equal(r, tc.expectedImportedExportedServicesCount, resp2.Peerings[0].ExportedServiceCount) + require.Equal(r, tc.expectedExportedServsCount, resp2.Peerings[0].ExportedServiceCount) }) }) } diff --git a/agent/grpc-external/services/peerstream/replication.go b/agent/grpc-external/services/peerstream/replication.go index 938605cac..be79a23bd 100644 --- a/agent/grpc-external/services/peerstream/replication.go +++ b/agent/grpc-external/services/peerstream/replication.go @@ -43,7 +43,6 @@ func makeServiceResponse( sn := structs.ServiceNameFromString(serviceName) csn, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) if !ok { - logger.Error("did not increment or decrement exported services count", "service_name", serviceName) return nil, fmt.Errorf("invalid type for service response: %T", update.Result) } @@ -63,7 +62,6 @@ func makeServiceResponse( // We don't distinguish when these three things occurred, but it's safe to send a DELETE Op in all cases, so we do that. // Case #1 is a no-op for the importing peer. if len(csn.Nodes) == 0 { - logger.Trace("decrementing exported services count", "service_name", sn.String()) mst.RemoveExportedService(sn) return &pbpeerstream.ReplicationMessage_Response{ @@ -75,7 +73,6 @@ func makeServiceResponse( }, nil } - logger.Trace("incrementing exported services count", "service_name", sn.String()) mst.TrackExportedService(sn) // If there are nodes in the response, we push them as an UPSERT operation. @@ -220,7 +217,6 @@ func (s *Server) handleUpsert( return fmt.Errorf("did not increment imported services count for service=%q: %w", sn.String(), err) } - logger.Trace("incrementing imported services count", "service_name", sn.String()) mutableStatus.TrackImportedService(sn) return nil @@ -468,11 +464,9 @@ func (s *Server) handleDelete( err := s.handleUpdateService(peerName, partition, sn, nil) if err != nil { - logger.Error("did not decrement imported services count", "service_name", sn.String(), "error", err) return err } - logger.Trace("decrementing imported services count", "service_name", sn.String()) mutableStatus.RemoveImportedService(sn) return nil diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index 40d8ecb63..c9d41e127 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -173,6 +173,14 @@ type Status struct { ExportedServices map[string]struct{} } +func (s *Status) GetImportedServicesCount() uint64 { + return uint64(len(s.ImportedServices)) +} + +func (s *Status) GetExportedServicesCount() uint64 { + return uint64(len(s.ExportedServices)) +} + func newMutableStatus(now func() time.Time, connected bool) *MutableStatus { return &MutableStatus{ Status: Status{ diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 845818eb2..4b7d051bc 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -13,6 +13,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" grpcstatus "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/state" @@ -338,17 +339,7 @@ func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequ return &pbpeering.PeeringReadResponse{Peering: nil}, nil } - cp := copyPeeringWithNewState(peering, s.reconciledStreamStateHint(peering.ID, peering.State)) - - // add imported services count - st, found := s.Tracker.StreamStatus(peering.ID) - if !found { - s.Logger.Trace("did not find peer in stream tracker when reading peer", "peerID", peering.ID) - } else { - cp.ImportedServiceCount = uint64(len(st.ImportedServices)) - cp.ExportedServiceCount = uint64(len(st.ExportedServices)) - } - + cp := s.reconcilePeering(peering) return &pbpeering.PeeringReadResponse{Peering: cp}, nil } @@ -379,34 +370,38 @@ func (s *Server) PeeringList(ctx context.Context, req *pbpeering.PeeringListRequ // reconcile the actual peering state; need to copy over the ds for peering var cPeerings []*pbpeering.Peering for _, p := range peerings { - cp := copyPeeringWithNewState(p, s.reconciledStreamStateHint(p.ID, p.State)) - - // add imported services count - st, found := s.Tracker.StreamStatus(p.ID) - if !found { - s.Logger.Trace("did not find peer in stream tracker when listing peers", "peerID", p.ID) - } else { - cp.ImportedServiceCount = uint64(len(st.ImportedServices)) - cp.ExportedServiceCount = uint64(len(st.ExportedServices)) - } - + cp := s.reconcilePeering(p) cPeerings = append(cPeerings, cp) } + return &pbpeering.PeeringListResponse{Peerings: cPeerings}, nil } -// TODO(peering): Maybe get rid of this when actually monitoring the stream health -// reconciledStreamStateHint peaks into the streamTracker and determines whether a peering should be marked -// as PeeringState.Active or not -func (s *Server) reconciledStreamStateHint(pID string, pState pbpeering.PeeringState) pbpeering.PeeringState { - streamState, found := s.Tracker.StreamStatus(pID) +// TODO(peering): Get rid of this func when we stop using the stream tracker for imported/ exported services and the peering state +// reconcilePeering enriches the peering with the following information: +// -- PeeringState.Active if the peering is active +// -- ImportedServicesCount and ExportedServicesCount +// NOTE: we return a new peering with this additional data +func (s *Server) reconcilePeering(peering *pbpeering.Peering) *pbpeering.Peering { + streamState, found := s.Tracker.StreamStatus(peering.ID) + if !found { + s.Logger.Warn("did not find peer in stream tracker; cannot populate imported and"+ + " exported services count or reconcile peering state", "peerID", peering.ID) + return peering + } else { + cp := copyPeering(peering) - if found && streamState.Connected { - return pbpeering.PeeringState_ACTIVE + // reconcile pbpeering.PeeringState_Active + if streamState.Connected { + cp.State = pbpeering.PeeringState_ACTIVE + } + + // add imported & exported services counts + cp.ImportedServiceCount = streamState.GetImportedServicesCount() + cp.ExportedServiceCount = streamState.GetExportedServicesCount() + + return cp } - - // default, no reconciliation - return pState } // TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. @@ -605,22 +600,9 @@ func (s *Server) getExistingOrCreateNewPeerID(peerName, partition string) (strin return id, nil } -func copyPeeringWithNewState(p *pbpeering.Peering, state pbpeering.PeeringState) *pbpeering.Peering { - return &pbpeering.Peering{ - ID: p.ID, - Name: p.Name, - Partition: p.Partition, - DeletedAt: p.DeletedAt, - Meta: p.Meta, - PeerID: p.PeerID, - PeerCAPems: p.PeerCAPems, - PeerServerAddresses: p.PeerServerAddresses, - PeerServerName: p.PeerServerName, - CreateIndex: p.CreateIndex, - ModifyIndex: p.ModifyIndex, - ImportedServiceCount: p.ImportedServiceCount, - ExportedServiceCount: p.ExportedServiceCount, +func copyPeering(p *pbpeering.Peering) *pbpeering.Peering { + var copyP pbpeering.Peering + proto.Merge(©P, p) - State: state, - } + return ©P } diff --git a/api/peering_test.go b/api/peering_test.go index 1c022a9cf..fcd7c5b3c 100644 --- a/api/peering_test.go +++ b/api/peering_test.go @@ -152,9 +152,7 @@ func TestAPI_Peering_GenerateToken_Read_Establish_Delete(t *testing.T) { }) defer s2.Stop() - testutil.RunStep(t, "register services to get synced dc2", func(t *testing.T) { - testNodeServiceCheckRegistrations(t, c2, "dc2") - }) + testNodeServiceCheckRegistrations(t, c2, "dc2") ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() From dcc230f6996d700c2a43fdae02310ed9936eb033 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Tue, 19 Jul 2022 14:56:28 -0400 Subject: [PATCH 039/107] Make envoy resources for inferred peered upstreams (#13758) Peered upstreams has a separate loop in xds from discovery chain upstreams. This PR adds similar but slightly modified code to add filters for peered upstream listeners, clusters, and endpoints in the case of transparent proxy. --- agent/proxycfg/connect_proxy.go | 4 +- agent/proxycfg/snapshot.go | 18 +- agent/proxycfg/testing_peering.go | 141 ++++++++++++ agent/xds/clusters.go | 5 +- agent/xds/clusters_test.go | 4 - agent/xds/endpoints.go | 1 + agent/xds/listeners.go | 50 ++++- agent/xds/listeners_test.go | 4 - agent/xds/resources_test.go | 8 + ...-proxy-with-peered-upstreams.latest.golden | 207 ++++++++++++++++++ ...-proxy-with-peered-upstreams.latest.golden | 60 +++++ .../endpoints/transparent-proxy.latest.golden | 106 +++++++++ ...-proxy-with-peered-upstreams.latest.golden | 176 +++++++++++++++ ...-proxy-with-peered-upstreams.latest.golden | 5 + .../routes/transparent-proxy.latest.golden | 5 + 15 files changed, 771 insertions(+), 23 deletions(-) create mode 100644 agent/xds/testdata/clusters/transparent-proxy-with-peered-upstreams.latest.golden create mode 100644 agent/xds/testdata/endpoints/transparent-proxy-with-peered-upstreams.latest.golden create mode 100644 agent/xds/testdata/endpoints/transparent-proxy.latest.golden create mode 100644 agent/xds/testdata/listeners/transparent-proxy-with-peered-upstreams.latest.golden create mode 100644 agent/xds/testdata/routes/transparent-proxy-with-peered-upstreams.latest.golden create mode 100644 agent/xds/testdata/routes/transparent-proxy.latest.golden diff --git a/agent/proxycfg/connect_proxy.go b/agent/proxycfg/connect_proxy.go index 823f7d9ef..9b0f3e54b 100644 --- a/agent/proxycfg/connect_proxy.go +++ b/agent/proxycfg/connect_proxy.go @@ -224,7 +224,7 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e } // Check whether a watch for this peer exists to avoid duplicates. - if _, ok := snap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer); !ok { + if ok := snap.ConnectProxy.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok { peerCtx, cancel := context.WithCancel(ctx) if err := s.dataSources.TrustBundle.Notify(peerCtx, &pbpeering.TrustBundleReadRequest{ Name: uid.Peer, @@ -342,7 +342,7 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s snap.ConnectProxy.PeerUpstreamEndpoints.InitWatch(uid, hcancel) // Check whether a watch for this peer exists to avoid duplicates. - if _, ok := snap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer); !ok { + if ok := snap.ConnectProxy.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok { peerCtx, cancel := context.WithCancel(ctx) if err := s.dataSources.TrustBundle.Notify(peerCtx, &pbpeering.TrustBundleReadRequest{ Name: uid.Peer, diff --git a/agent/proxycfg/snapshot.go b/agent/proxycfg/snapshot.go index b04c67c26..b96994c21 100644 --- a/agent/proxycfg/snapshot.go +++ b/agent/proxycfg/snapshot.go @@ -838,19 +838,23 @@ func (u *ConfigSnapshotUpstreams) UpstreamPeerMeta(uid UpstreamID) structs.Peeri return *csn.Service.Connect.PeerMeta } +// PeeredUpstreamIDs returns a slice of peered UpstreamIDs from explicit config entries +// and implicit imported services. +// Upstreams whose trust bundles have not been stored in the snapshot are ignored. func (u *ConfigSnapshotUpstreams) PeeredUpstreamIDs() []UpstreamID { - out := make([]UpstreamID, 0, len(u.UpstreamConfig)) - for uid := range u.UpstreamConfig { - if uid.Peer == "" { - continue + out := make([]UpstreamID, 0, u.PeerUpstreamEndpoints.Len()) + u.PeerUpstreamEndpoints.ForEachKey(func(uid UpstreamID) bool { + if _, ok := u.PeerUpstreamEndpoints.Get(uid); !ok { + // uid might exist in the map but if Set hasn't been called, skip for now. + return true } if _, ok := u.UpstreamPeerTrustBundles.Get(uid.Peer); !ok { // The trust bundle for this upstream is not available yet, skip for now. - continue + return true } - out = append(out, uid) - } + return true + }) return out } diff --git a/agent/proxycfg/testing_peering.go b/agent/proxycfg/testing_peering.go index 9b1973c9a..0f20ad6ca 100644 --- a/agent/proxycfg/testing_peering.go +++ b/agent/proxycfg/testing_peering.go @@ -108,3 +108,144 @@ func TestConfigSnapshotPeering(t testing.T) *ConfigSnapshot { }, }) } + +func TestConfigSnapshotPeeringTProxy(t testing.T) *ConfigSnapshot { + // Test two explicitly defined upstreams api-a and noEndpoints + // as well as one implicitly inferred upstream db. + + var ( + noEndpointsUpstream = structs.Upstream{ + DestinationName: "no-endpoints", + DestinationPeer: "peer-a", + LocalBindPort: 1234, + } + noEndpoints = structs.PeeredServiceName{ + ServiceName: structs.NewServiceName("no-endpoints", nil), + Peer: "peer-a", + } + + apiAUpstream = structs.Upstream{ + DestinationName: "api-a", + DestinationPeer: "peer-a", + LocalBindPort: 9090, + } + apiA = structs.PeeredServiceName{ + ServiceName: structs.NewServiceName("api-a", nil), + Peer: "peer-a", + } + + db = structs.PeeredServiceName{ + ServiceName: structs.NewServiceName("db", nil), + Peer: "peer-a", + } + ) + + const peerTrustDomain = "1c053652-8512-4373-90cf-5a7f6263a994.consul" + + return TestConfigSnapshot(t, func(ns *structs.NodeService) { + ns.Proxy.Mode = structs.ProxyModeTransparent + ns.Proxy.Upstreams = []structs.Upstream{ + noEndpointsUpstream, + apiAUpstream, + } + }, []UpdateEvent{ + { + CorrelationID: meshConfigEntryID, + Result: &structs.ConfigEntryResponse{ + Entry: nil, + }, + }, + { + CorrelationID: peeredUpstreamsID, + Result: &structs.IndexedPeeredServiceList{ + Services: []structs.PeeredServiceName{ + apiA, + noEndpoints, + db, // implicitly added here + }, + }, + }, + { + CorrelationID: peerTrustBundleIDPrefix + "peer-a", + Result: &pbpeering.TrustBundleReadResponse{ + Bundle: TestPeerTrustBundles(t).Bundles[0], + }, + }, + { + CorrelationID: upstreamPeerWatchIDPrefix + NewUpstreamID(&noEndpointsUpstream).String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: []structs.CheckServiceNode{}, + }, + }, + { + CorrelationID: upstreamPeerWatchIDPrefix + NewUpstreamID(&apiAUpstream).String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Node: "node1", + Address: "127.0.0.1", + PeerName: "peer-a", + }, + Service: &structs.NodeService{ + ID: "api-a-1", + Service: "api-a", + PeerName: "peer-a", + Address: "1.2.3.4", + TaggedAddresses: map[string]structs.ServiceAddress{ + "virtual": {Address: "10.0.0.1"}, + structs.TaggedAddressVirtualIP: {Address: "240.0.0.1"}, + }, + Connect: structs.ServiceConnect{ + PeerMeta: &structs.PeeringServiceMeta{ + SNI: []string{ + "api-a.default.default.cloud.external." + peerTrustDomain, + }, + SpiffeID: []string{ + "spiffe://" + peerTrustDomain + "/ns/default/dc/cloud-dc/svc/api-a", + }, + Protocol: "tcp", + }, + }, + }, + }, + }, + }, + }, + { + CorrelationID: upstreamPeerWatchIDPrefix + NewUpstreamIDFromPeeredServiceName(db).String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Node: "node1", + Address: "127.0.0.1", + PeerName: "peer-a", + }, + Service: &structs.NodeService{ + ID: "db-1", + Service: "db", + PeerName: "peer-a", + Address: "2.3.4.5", // Expect no endpoint or listener for this address + TaggedAddresses: map[string]structs.ServiceAddress{ + "virtual": {Address: "10.0.0.2"}, + structs.TaggedAddressVirtualIP: {Address: "240.0.0.2"}, + }, + Connect: structs.ServiceConnect{ + PeerMeta: &structs.PeeringServiceMeta{ + SNI: []string{ + "db.default.default.cloud.external." + peerTrustDomain, + }, + SpiffeID: []string{ + "spiffe://" + peerTrustDomain + "/ns/default/dc/cloud-dc/svc/db", + }, + Protocol: "tcp", + }, + }, + }, + }, + }, + }, + }, + }) +} diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index 562e7e692..ed7f8af1a 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -134,7 +134,7 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid) - upstreamCluster, err := s.makeUpstreamClusterForPeerService(upstreamCfg, peerMeta, cfgSnap) + upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, upstreamCfg, peerMeta, cfgSnap) if err != nil { return nil, err } @@ -693,6 +693,7 @@ func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, nam } func (s *ResourceGenerator) makeUpstreamClusterForPeerService( + uid proxycfg.UpstreamID, upstream *structs.Upstream, peerMeta structs.PeeringServiceMeta, cfgSnap *proxycfg.ConfigSnapshot, @@ -702,8 +703,6 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService( err error ) - uid := proxycfg.NewUpstreamID(upstream) - cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta) if cfg.EnvoyClusterJSON != "" { c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON) diff --git a/agent/xds/clusters_test.go b/agent/xds/clusters_test.go index 96e7615c7..a56853b81 100644 --- a/agent/xds/clusters_test.go +++ b/agent/xds/clusters_test.go @@ -609,10 +609,6 @@ func TestClustersFromSnapshot(t *testing.T) { name: "ingress-multiple-listeners-duplicate-service", create: proxycfg.TestConfigSnapshotIngress_MultipleListenersDuplicateService, }, - { - name: "transparent-proxy", - create: proxycfg.TestConfigSnapshotTransparentProxy, - }, { name: "transparent-proxy-catalog-destinations-only", create: proxycfg.TestConfigSnapshotTransparentProxyCatalogDestinationsOnly, diff --git a/agent/xds/endpoints.go b/agent/xds/endpoints.go index edfe1c616..8fda9adc2 100644 --- a/agent/xds/endpoints.go +++ b/agent/xds/endpoints.go @@ -3,6 +3,7 @@ package xds import ( "errors" "fmt" + envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 0ef16899f..5b7b0d61f 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -264,8 +264,9 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. } outboundListener.ListenerFilters = append(outboundListener.ListenerFilters, tlsInspector) } - // Looping over explicit upstreams is only needed for cross-peer because - // they do not have discovery chains. + + // Looping over explicit and implicit upstreams is only needed for cross-peer + // because they do not have discovery chains. for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() { upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid] @@ -326,7 +327,50 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. // Below we create a filter chain per upstream, rather than a listener per upstream // as we do for explicit upstreams above. - // TODO(peering): tproxy + filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{ + routeName: uid.EnvoyID(), + clusterName: clusterName, + filterName: uid.EnvoyID(), + protocol: cfg.Protocol, + useRDS: false, + }) + if err != nil { + return nil, err + } + + endpoints, _ := cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Get(uid) + uniqueAddrs := make(map[string]struct{}) + + // Match on the virtual IP for the upstream service (identified by the chain's ID). + // We do not match on all endpoints here since it would lead to load balancing across + // all instances when any instance address is dialed. + for _, e := range endpoints { + if vip := e.Service.TaggedAddresses[structs.TaggedAddressVirtualIP]; vip.Address != "" { + uniqueAddrs[vip.Address] = struct{}{} + } + + // The virtualIPTag is used by consul-k8s to store the ClusterIP for a service. + // For services imported from a peer,the partition will be equal in all cases. + if acl.EqualPartitions(e.Node.PartitionOrDefault(), cfgSnap.ProxyID.PartitionOrDefault()) { + if vip := e.Service.TaggedAddresses[virtualIPTag]; vip.Address != "" { + uniqueAddrs[vip.Address] = struct{}{} + } + } + } + if len(uniqueAddrs) > 2 { + s.Logger.Debug("detected multiple virtual IPs for an upstream, all will be used to match traffic", + "upstream", uid, "ip_count", len(uniqueAddrs)) + } + + // For every potential address we collected, create the appropriate address prefix to match on. + // In this case we are matching on exact addresses, so the prefix is the address itself, + // and the prefix length is based on whether it's IPv4 or IPv6. + filterChain.FilterChainMatch = makeFilterChainMatchFromAddrs(uniqueAddrs) + + // Only attach the filter chain if there are addresses to match on + if filterChain.FilterChainMatch != nil && len(filterChain.FilterChainMatch.PrefixRanges) > 0 { + outboundListener.FilterChains = append(outboundListener.FilterChains, filterChain) + } } diff --git a/agent/xds/listeners_test.go b/agent/xds/listeners_test.go index 3055b4436..c51730074 100644 --- a/agent/xds/listeners_test.go +++ b/agent/xds/listeners_test.go @@ -756,10 +756,6 @@ func TestListenersFromSnapshot(t *testing.T) { name: "ingress-with-sds-service-level-mixed-no-tls", create: proxycfg.TestConfigSnapshotIngressGatewaySDS_MixedNoTLS, }, - { - name: "transparent-proxy", - create: proxycfg.TestConfigSnapshotTransparentProxy, - }, { name: "transparent-proxy-http-upstream", create: proxycfg.TestConfigSnapshotTransparentProxyHTTPUpstream, diff --git a/agent/xds/resources_test.go b/agent/xds/resources_test.go index 983f1bb44..1e3151e07 100644 --- a/agent/xds/resources_test.go +++ b/agent/xds/resources_test.go @@ -144,10 +144,18 @@ func TestAllResourcesFromSnapshot(t *testing.T) { }) }, }, + { + name: "transparent-proxy", + create: proxycfg.TestConfigSnapshotTransparentProxy, + }, { name: "connect-proxy-with-peered-upstreams", create: proxycfg.TestConfigSnapshotPeering, }, + { + name: "transparent-proxy-with-peered-upstreams", + create: proxycfg.TestConfigSnapshotPeeringTProxy, + }, } tests = append(tests, getConnectProxyTransparentProxyGoldenTestCases()...) tests = append(tests, getMeshGatewayPeeringGoldenTestCases()...) diff --git a/agent/xds/testdata/clusters/transparent-proxy-with-peered-upstreams.latest.golden b/agent/xds/testdata/clusters/transparent-proxy-with-peered-upstreams.latest.golden new file mode 100644 index 000000000..766a66d12 --- /dev/null +++ b/agent/xds/testdata/clusters/transparent-proxy-with-peered-upstreams.latest.golden @@ -0,0 +1,207 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "api-a.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul", + "altStatName": "api-a.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICczCCAdwCCQC3BLnEmLCrSjANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCQVoxEjAQBgNVBAcMCUZsYWdzdGFmZjEMMAoGA1UECgwDRm9v\nMRAwDgYDVQQLDAdleGFtcGxlMQ8wDQYDVQQDDAZwZWVyLWExHTAbBgkqhkiG9w0B\nCQEWDmZvb0BwZWVyLWEuY29tMB4XDTIyMDUyNjAxMDQ0NFoXDTIzMDUyNjAxMDQ0\nNFowfjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkFaMRIwEAYDVQQHDAlGbGFnc3Rh\nZmYxDDAKBgNVBAoMA0ZvbzEQMA4GA1UECwwHZXhhbXBsZTEPMA0GA1UEAwwGcGVl\nci1hMR0wGwYJKoZIhvcNAQkBFg5mb29AcGVlci1hLmNvbTCBnzANBgkqhkiG9w0B\nAQEFAAOBjQAwgYkCgYEA2zFYGTbXDAntT5pLTpZ2+VTiqx4J63VRJH1kdu11f0FV\nc2jl1pqCuYDbQXknDU0Pv1Q5y0+nSAihD2KqGS571r+vHQiPtKYPYRqPEe9FzAhR\n2KhWH6v/tk5DG1HqOjV9/zWRKB12gdFNZZqnw/e7NjLNq3wZ2UAwxXip5uJ8uwMC\nAwEAATANBgkqhkiG9w0BAQsFAAOBgQC/CJ9Syf4aL91wZizKTejwouRYoWv4gRAk\nyto45ZcNMHfJ0G2z+XAMl9ZbQsLgXmzAx4IM6y5Jckq8pKC4PEijCjlKTktLHlEy\n0ggmFxtNB1tid2NC8dOzcQ3l45+gDjDqdILhAvLDjlAIebdkqVqb2CfFNW/I2CQH\nZAuKN1aoKA==\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cloud-dc/svc/api-a" + } + ] + } + }, + "sni": "api-a.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul", + "altStatName": "db.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICczCCAdwCCQC3BLnEmLCrSjANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCQVoxEjAQBgNVBAcMCUZsYWdzdGFmZjEMMAoGA1UECgwDRm9v\nMRAwDgYDVQQLDAdleGFtcGxlMQ8wDQYDVQQDDAZwZWVyLWExHTAbBgkqhkiG9w0B\nCQEWDmZvb0BwZWVyLWEuY29tMB4XDTIyMDUyNjAxMDQ0NFoXDTIzMDUyNjAxMDQ0\nNFowfjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkFaMRIwEAYDVQQHDAlGbGFnc3Rh\nZmYxDDAKBgNVBAoMA0ZvbzEQMA4GA1UECwwHZXhhbXBsZTEPMA0GA1UEAwwGcGVl\nci1hMR0wGwYJKoZIhvcNAQkBFg5mb29AcGVlci1hLmNvbTCBnzANBgkqhkiG9w0B\nAQEFAAOBjQAwgYkCgYEA2zFYGTbXDAntT5pLTpZ2+VTiqx4J63VRJH1kdu11f0FV\nc2jl1pqCuYDbQXknDU0Pv1Q5y0+nSAihD2KqGS571r+vHQiPtKYPYRqPEe9FzAhR\n2KhWH6v/tk5DG1HqOjV9/zWRKB12gdFNZZqnw/e7NjLNq3wZ2UAwxXip5uJ8uwMC\nAwEAATANBgkqhkiG9w0BAQsFAAOBgQC/CJ9Syf4aL91wZizKTejwouRYoWv4gRAk\nyto45ZcNMHfJ0G2z+XAMl9ZbQsLgXmzAx4IM6y5Jckq8pKC4PEijCjlKTktLHlEy\n0ggmFxtNB1tid2NC8dOzcQ3l45+gDjDqdILhAvLDjlAIebdkqVqb2CfFNW/I2CQH\nZAuKN1aoKA==\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cloud-dc/svc/db" + } + ] + } + }, + "sni": "db.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "local_app", + "type": "STATIC", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "local_app", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 8080 + } + } + } + } + ] + } + ] + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "no-endpoints?peer=peer-a", + "altStatName": "no-endpoints?peer=peer-a", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICczCCAdwCCQC3BLnEmLCrSjANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCQVoxEjAQBgNVBAcMCUZsYWdzdGFmZjEMMAoGA1UECgwDRm9v\nMRAwDgYDVQQLDAdleGFtcGxlMQ8wDQYDVQQDDAZwZWVyLWExHTAbBgkqhkiG9w0B\nCQEWDmZvb0BwZWVyLWEuY29tMB4XDTIyMDUyNjAxMDQ0NFoXDTIzMDUyNjAxMDQ0\nNFowfjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkFaMRIwEAYDVQQHDAlGbGFnc3Rh\nZmYxDDAKBgNVBAoMA0ZvbzEQMA4GA1UECwwHZXhhbXBsZTEPMA0GA1UEAwwGcGVl\nci1hMR0wGwYJKoZIhvcNAQkBFg5mb29AcGVlci1hLmNvbTCBnzANBgkqhkiG9w0B\nAQEFAAOBjQAwgYkCgYEA2zFYGTbXDAntT5pLTpZ2+VTiqx4J63VRJH1kdu11f0FV\nc2jl1pqCuYDbQXknDU0Pv1Q5y0+nSAihD2KqGS571r+vHQiPtKYPYRqPEe9FzAhR\n2KhWH6v/tk5DG1HqOjV9/zWRKB12gdFNZZqnw/e7NjLNq3wZ2UAwxXip5uJ8uwMC\nAwEAATANBgkqhkiG9w0BAQsFAAOBgQC/CJ9Syf4aL91wZizKTejwouRYoWv4gRAk\nyto45ZcNMHfJ0G2z+XAMl9ZbQsLgXmzAx4IM6y5Jckq8pKC4PEijCjlKTktLHlEy\n0ggmFxtNB1tid2NC8dOzcQ3l45+gDjDqdILhAvLDjlAIebdkqVqb2CfFNW/I2CQH\nZAuKN1aoKA==\n-----END CERTIFICATE-----\n" + } + } + } + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "original-destination", + "type": "ORIGINAL_DST", + "connectTimeout": "5s", + "lbPolicy": "CLUSTER_PROVIDED" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/transparent-proxy-with-peered-upstreams.latest.golden b/agent/xds/testdata/endpoints/transparent-proxy-with-peered-upstreams.latest.golden new file mode 100644 index 000000000..220919da9 --- /dev/null +++ b/agent/xds/testdata/endpoints/transparent-proxy-with-peered-upstreams.latest.golden @@ -0,0 +1,60 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "api-a.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 0 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "db.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "2.3.4.5", + "portValue": 0 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "no-endpoints?peer=peer-a", + "endpoints": [ + { + + } + ] + } + ], + "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/transparent-proxy.latest.golden b/agent/xds/testdata/endpoints/transparent-proxy.latest.golden new file mode 100644 index 000000000..fb13b6259 --- /dev/null +++ b/agent/xds/testdata/endpoints/transparent-proxy.latest.golden @@ -0,0 +1,106 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.1", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.2", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.1", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.20.1.2", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "google.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "9.9.9.9", + "portValue": 9090 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "no-endpoints.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + + } + ] + } + ], + "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/transparent-proxy-with-peered-upstreams.latest.golden b/agent/xds/testdata/listeners/transparent-proxy-with-peered-upstreams.latest.golden new file mode 100644 index 000000000..f9001c934 --- /dev/null +++ b/agent/xds/testdata/listeners/transparent-proxy-with-peered-upstreams.latest.golden @@ -0,0 +1,176 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "api-a?peer=peer-a:127.0.0.1:9090", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9090 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.api-a?peer=peer-a", + "cluster": "api-a.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "no-endpoints?peer=peer-a:127.0.0.1:1234", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 1234 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.no-endpoints?peer=peer-a", + "cluster": "no-endpoints?peer=peer-a" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "outbound_listener:127.0.0.1:15001", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 15001 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "prefixRanges": [ + { + "addressPrefix": "10.0.0.2", + "prefixLen": 32 + }, + { + "addressPrefix": "240.0.0.2", + "prefixLen": 32 + } + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db?peer=peer-a", + "cluster": "db.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul" + } + } + ] + } + ], + "defaultFilterChain": { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.original-destination", + "cluster": "original-destination" + } + } + ] + }, + "listenerFilters": [ + { + "name": "envoy.filters.listener.original_dst", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.original_dst.v3.OriginalDst" + } + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/routes/transparent-proxy-with-peered-upstreams.latest.golden b/agent/xds/testdata/routes/transparent-proxy-with-peered-upstreams.latest.golden new file mode 100644 index 000000000..9c050cbe6 --- /dev/null +++ b/agent/xds/testdata/routes/transparent-proxy-with-peered-upstreams.latest.golden @@ -0,0 +1,5 @@ +{ + "versionInfo": "00000001", + "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/routes/transparent-proxy.latest.golden b/agent/xds/testdata/routes/transparent-proxy.latest.golden new file mode 100644 index 000000000..9c050cbe6 --- /dev/null +++ b/agent/xds/testdata/routes/transparent-proxy.latest.golden @@ -0,0 +1,5 @@ +{ + "versionInfo": "00000001", + "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "nonce": "00000001" +} \ No newline at end of file From a9f17c0f99f2767ea453e6fc7ee6b5dcf178aae8 Mon Sep 17 00:00:00 2001 From: Paul Glass Date: Tue, 19 Jul 2022 15:26:44 -0600 Subject: [PATCH 040/107] Extract AWS auth implementation out of Consul (#13760) --- agent/consul/authmethod/awsauth/aws.go | 2 +- agent/consul/authmethod/awsauth/aws_test.go | 4 +- command/login/aws.go | 2 +- command/login/login_test.go | 2 +- go.mod | 3 +- go.sum | 2 + internal/iamauth/README.md | 2 - internal/iamauth/auth.go | 311 ------------ internal/iamauth/auth_test.go | 124 ----- internal/iamauth/config.go | 80 --- internal/iamauth/config_test.go | 150 ------ internal/iamauth/iamauthtest/testing.go | 187 ------- internal/iamauth/responses/arn.go | 94 ---- internal/iamauth/responses/responses.go | 96 ---- internal/iamauth/responses/responses_test.go | 293 ----------- internal/iamauth/responsestest/testing.go | 81 ---- internal/iamauth/token.go | 403 ---------------- internal/iamauth/token_test.go | 483 ------------------- internal/iamauth/util.go | 143 ------ lib/glob.go | 24 - lib/glob_test.go | 37 -- 21 files changed, 9 insertions(+), 2514 deletions(-) delete mode 100644 internal/iamauth/README.md delete mode 100644 internal/iamauth/auth.go delete mode 100644 internal/iamauth/auth_test.go delete mode 100644 internal/iamauth/config.go delete mode 100644 internal/iamauth/config_test.go delete mode 100644 internal/iamauth/iamauthtest/testing.go delete mode 100644 internal/iamauth/responses/arn.go delete mode 100644 internal/iamauth/responses/responses.go delete mode 100644 internal/iamauth/responses/responses_test.go delete mode 100644 internal/iamauth/responsestest/testing.go delete mode 100644 internal/iamauth/token.go delete mode 100644 internal/iamauth/token_test.go delete mode 100644 internal/iamauth/util.go delete mode 100644 lib/glob.go delete mode 100644 lib/glob_test.go diff --git a/agent/consul/authmethod/awsauth/aws.go b/agent/consul/authmethod/awsauth/aws.go index f3995cdc5..7c7758476 100644 --- a/agent/consul/authmethod/awsauth/aws.go +++ b/agent/consul/authmethod/awsauth/aws.go @@ -4,9 +4,9 @@ import ( "context" "fmt" + iamauth "github.com/hashicorp/consul-awsauth" "github.com/hashicorp/consul/agent/consul/authmethod" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/iamauth" "github.com/hashicorp/go-hclog" ) diff --git a/agent/consul/authmethod/awsauth/aws_test.go b/agent/consul/authmethod/awsauth/aws_test.go index 3025275cf..031cd035b 100644 --- a/agent/consul/authmethod/awsauth/aws_test.go +++ b/agent/consul/authmethod/awsauth/aws_test.go @@ -8,10 +8,10 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws/credentials" + iamauth "github.com/hashicorp/consul-awsauth" + "github.com/hashicorp/consul-awsauth/iamauthtest" "github.com/hashicorp/consul/agent/consul/authmethod" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/iamauth" - "github.com/hashicorp/consul/internal/iamauth/iamauthtest" "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" ) diff --git a/command/login/aws.go b/command/login/aws.go index bae90c943..c0d2212dc 100644 --- a/command/login/aws.go +++ b/command/login/aws.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" + iamauth "github.com/hashicorp/consul-awsauth" "github.com/hashicorp/consul/agent/consul/authmethod/awsauth" - "github.com/hashicorp/consul/internal/iamauth" "github.com/hashicorp/go-hclog" ) diff --git a/command/login/login_test.go b/command/login/login_test.go index 7eba6a403..6340d93f7 100644 --- a/command/login/login_test.go +++ b/command/login/login_test.go @@ -13,13 +13,13 @@ import ( "github.com/stretchr/testify/require" "gopkg.in/square/go-jose.v2/jwt" + "github.com/hashicorp/consul-awsauth/iamauthtest" "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/agent/consul/authmethod/kubeauth" "github.com/hashicorp/consul/agent/consul/authmethod/testauth" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" - "github.com/hashicorp/consul/internal/iamauth/iamauthtest" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" ) diff --git a/go.mod b/go.mod index 84e9d1d18..cb048763d 100644 --- a/go.mod +++ b/go.mod @@ -25,6 +25,7 @@ require ( github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 + github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706 github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4 github.com/hashicorp/consul/api v1.13.1 github.com/hashicorp/consul/sdk v0.10.0 @@ -37,7 +38,6 @@ require ( github.com/hashicorp/go-memdb v1.3.2 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-raftchunking v0.6.2 - github.com/hashicorp/go-retryablehttp v0.6.7 github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-uuid v1.0.2 @@ -133,6 +133,7 @@ require ( github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-retryablehttp v0.6.7 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/mdns v1.0.4 // indirect github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 // indirect diff --git a/go.sum b/go.sum index 722d90784..5e859cc7a 100644 --- a/go.sum +++ b/go.sum @@ -294,6 +294,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmg github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706 h1:1ZEjnveDe20yFa6lSkfdQZm5BR/b271n0MsB5R2L3us= +github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706/go.mod h1:1Cs8FlmD1BfSQXJGcFLSV5FuIx1AbJP+EJGdxosoS2g= github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4 h1:Com/5n/omNSBusX11zdyIYtidiqewLIanchbm//McZA= github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4/go.mod h1:vWEAHAeAqfOwB3pSgHMQpIu8VH1jL+Ltg54Tw0wt/NI= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= diff --git a/internal/iamauth/README.md b/internal/iamauth/README.md deleted file mode 100644 index a9880a355..000000000 --- a/internal/iamauth/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This is an internal package to house the AWS IAM auth method utilities for potential -future extraction from Consul. diff --git a/internal/iamauth/auth.go b/internal/iamauth/auth.go deleted file mode 100644 index aaf6bc657..000000000 --- a/internal/iamauth/auth.go +++ /dev/null @@ -1,311 +0,0 @@ -package iamauth - -import ( - "context" - "encoding/xml" - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strings" - "time" - - "github.com/hashicorp/consul/internal/iamauth/responses" - "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/lib/stringslice" - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-retryablehttp" -) - -const ( - // Retry configuration - retryWaitMin = 500 * time.Millisecond - retryWaitMax = 30 * time.Second -) - -type Authenticator struct { - config *Config - logger hclog.Logger -} - -type IdentityDetails struct { - EntityName string - EntityId string - AccountId string - - EntityPath string - EntityTags map[string]string -} - -func NewAuthenticator(config *Config, logger hclog.Logger) (*Authenticator, error) { - if err := config.Validate(); err != nil { - return nil, err - } - return &Authenticator{ - config: config, - logger: logger, - }, nil -} - -// ValidateLogin determines if the identity in the loginToken is permitted to login. -// If so, it returns details about the identity. Otherwise, an error is returned. -func (a *Authenticator) ValidateLogin(ctx context.Context, loginToken string) (*IdentityDetails, error) { - token, err := NewBearerToken(loginToken, a.config) - if err != nil { - return nil, err - } - - req, err := token.GetCallerIdentityRequest() - if err != nil { - return nil, err - } - - if a.config.ServerIDHeaderValue != "" { - err := validateHeaderValue(req.Header, a.config.ServerIDHeaderName, a.config.ServerIDHeaderValue) - if err != nil { - return nil, err - } - } - - callerIdentity, err := a.submitCallerIdentityRequest(ctx, req) - if err != nil { - return nil, err - } - a.logger.Debug("iamauth login attempt", "arn", callerIdentity.Arn) - - entity, err := responses.ParseArn(callerIdentity.Arn) - if err != nil { - return nil, err - } - - identityDetails := &IdentityDetails{ - EntityName: entity.FriendlyName, - // This could either be a "userID:SessionID" (in the case of an assumed role) or just a "userID" - // (in the case of an IAM user). - EntityId: strings.Split(callerIdentity.UserId, ":")[0], - AccountId: callerIdentity.Account, - } - clientArn := entity.CanonicalArn() - - // Fetch the IAM Role or IAM User, if configured. - // This requires the token to contain a signed iam:GetRole or iam:GetUser request. - if a.config.EnableIAMEntityDetails { - iamReq, err := token.GetEntityRequest() - if err != nil { - return nil, err - } - - if a.config.ServerIDHeaderValue != "" { - err := validateHeaderValue(iamReq.Header, a.config.ServerIDHeaderName, a.config.ServerIDHeaderValue) - if err != nil { - return nil, err - } - } - - iamEntityDetails, err := a.submitGetIAMEntityRequest(ctx, iamReq, token.entityRequestType) - if err != nil { - return nil, err - } - - // Only the CallerIdentity response is a guarantee of the client's identity. - // The role/user details must have a unique id match to the CallerIdentity before use. - if iamEntityDetails.EntityId() != identityDetails.EntityId { - return nil, fmt.Errorf("unique id mismatch in login token") - } - - // Use the full ARN with path from the Role/User details - clientArn = iamEntityDetails.EntityArn() - identityDetails.EntityPath = iamEntityDetails.EntityPath() - identityDetails.EntityTags = iamEntityDetails.EntityTags() - } - - if err := a.validateIdentity(clientArn); err != nil { - return nil, err - } - return identityDetails, nil -} - -// https://github.com/hashicorp/vault/blob/ba533d006f2244103648785ebfe8a9a9763d2b6e/builtin/credential/aws/path_login.go#L1321-L1361 -func (a *Authenticator) validateIdentity(clientArn string) error { - if stringslice.Contains(a.config.BoundIAMPrincipalARNs, clientArn) { - // Matches one of BoundIAMPrincipalARNs, so it is trusted - return nil - } - if a.config.EnableIAMEntityDetails { - for _, principalArn := range a.config.BoundIAMPrincipalARNs { - if strings.HasSuffix(principalArn, "*") && lib.GlobbedStringsMatch(principalArn, clientArn) { - // Wildcard match, so it is trusted - return nil - } - } - } - return fmt.Errorf("IAM principal %s is not trusted", clientArn) -} - -func (a *Authenticator) submitCallerIdentityRequest(ctx context.Context, req *http.Request) (*responses.GetCallerIdentityResult, error) { - responseBody, err := a.submitRequest(ctx, req) - if err != nil { - return nil, err - } - callerIdentityResponse, err := parseGetCallerIdentityResponse(responseBody) - if err != nil { - return nil, fmt.Errorf("error parsing STS response") - } - - if n := len(callerIdentityResponse.GetCallerIdentityResult); n != 1 { - return nil, fmt.Errorf("received %d identities in STS response but expected 1", n) - } - return &callerIdentityResponse.GetCallerIdentityResult[0], nil -} - -func (a *Authenticator) submitGetIAMEntityRequest(ctx context.Context, req *http.Request, reqType string) (responses.IAMEntity, error) { - responseBody, err := a.submitRequest(ctx, req) - if err != nil { - return nil, err - } - iamResponse, err := parseGetIAMEntityResponse(responseBody, reqType) - if err != nil { - return nil, fmt.Errorf("error parsing IAM response: %s", err) - } - return iamResponse, nil - -} - -// https://github.com/hashicorp/vault/blob/b17e3256dde937a6248c9a2fa56206aac93d07de/builtin/credential/aws/path_login.go#L1636 -func (a *Authenticator) submitRequest(ctx context.Context, req *http.Request) (string, error) { - retryableReq, err := retryablehttp.FromRequest(req) - if err != nil { - return "", err - } - retryableReq = retryableReq.WithContext(ctx) - client := cleanhttp.DefaultClient() - client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - } - retryingClient := &retryablehttp.Client{ - HTTPClient: client, - RetryWaitMin: retryWaitMin, - RetryWaitMax: retryWaitMax, - RetryMax: a.config.MaxRetries, - CheckRetry: retryablehttp.DefaultRetryPolicy, - Backoff: retryablehttp.DefaultBackoff, - } - - response, err := retryingClient.Do(retryableReq) - if err != nil { - return "", fmt.Errorf("error making request: %w", err) - } - if response != nil { - defer response.Body.Close() - } - // Validate that the response type is XML - if ct := response.Header.Get("Content-Type"); ct != "text/xml" { - return "", fmt.Errorf("response body is invalid") - } - - // we check for status code afterwards to also print out response body - responseBody, err := ioutil.ReadAll(response.Body) - if err != nil { - return "", err - } - if response.StatusCode != 200 { - return "", fmt.Errorf("received error code %d: %s", response.StatusCode, string(responseBody)) - } - return string(responseBody), nil - -} - -// https://github.com/hashicorp/vault/blob/ba533d006f2244103648785ebfe8a9a9763d2b6e/builtin/credential/aws/path_login.go#L1625-L1634 -func parseGetCallerIdentityResponse(response string) (responses.GetCallerIdentityResponse, error) { - result := responses.GetCallerIdentityResponse{} - response = strings.TrimSpace(response) - if !strings.HasPrefix(response, " 2 { - return fmt.Errorf("found multiple SignedHeaders components") - } - signedHeaders := string(matches[1]) - return ensureHeaderIsSigned(signedHeaders, headerName) - } - // NOTE: If we support GET requests, then we need to parse the X-Amz-SignedHeaders - // argument out of the query string and search in there for the header value - return fmt.Errorf("missing Authorization header") -} - -func ensureHeaderIsSigned(signedHeaders, headerToSign string) error { - // Not doing a constant time compare here, the values aren't secret - for _, header := range strings.Split(signedHeaders, ";") { - if header == strings.ToLower(headerToSign) { - return nil - } - } - return fmt.Errorf("header wasn't signed") -} diff --git a/internal/iamauth/auth_test.go b/internal/iamauth/auth_test.go deleted file mode 100644 index 909b64509..000000000 --- a/internal/iamauth/auth_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package iamauth - -import ( - "context" - "encoding/json" - "testing" - - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/hashicorp/consul/internal/iamauth/iamauthtest" - "github.com/hashicorp/consul/internal/iamauth/responses" - "github.com/hashicorp/consul/internal/iamauth/responsestest" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/require" -) - -func TestValidateLogin(t *testing.T) { - f := iamauthtest.MakeFixture() - - var ( - serverForRoleMismatchedIds = &iamauthtest.Server{ - GetCallerIdentityResponse: f.ServerForRole.GetCallerIdentityResponse, - GetRoleResponse: responsestest.MakeGetRoleResponse(f.RoleARN, "AAAAsomenonmatchingid", responses.Tags{}), - } - serverForUserMismatchedIds = &iamauthtest.Server{ - GetCallerIdentityResponse: f.ServerForUser.GetCallerIdentityResponse, - GetUserResponse: responsestest.MakeGetUserResponse(f.UserARN, "AAAAsomenonmatchingid", responses.Tags{}), - } - ) - - cases := map[string]struct { - config *Config - server *iamauthtest.Server - expIdent *IdentityDetails - expError string - }{ - "no bound principals": { - expError: "not trusted", - server: f.ServerForRole, - config: &Config{}, - }, - "no matching principal": { - expError: "not trusted", - server: f.ServerForUser, - config: &Config{ - BoundIAMPrincipalARNs: []string{ - "arn:aws:iam::1234567890:user/some-other-role", - "arn:aws:iam::1234567890:user/some-other-user", - }, - }, - }, - "mismatched server id header": { - expError: `expected "some-non-matching-value" but got "server.id.example.com"`, - server: f.ServerForRole, - config: &Config{ - BoundIAMPrincipalARNs: []string{f.CanonicalRoleARN}, - ServerIDHeaderValue: "some-non-matching-value", - ServerIDHeaderName: "X-Test-ServerID", - }, - }, - "role unique id mismatch": { - expError: "unique id mismatch in login token", - // The RoleId in the GetRole response must match the UserId in the GetCallerIdentity response - // during login. If not, the RoleId cannot be used. - server: serverForRoleMismatchedIds, - config: &Config{ - BoundIAMPrincipalARNs: []string{f.RoleARN}, - EnableIAMEntityDetails: true, - }, - }, - "user unique id mismatch": { - expError: "unique id mismatch in login token", - server: serverForUserMismatchedIds, - config: &Config{ - BoundIAMPrincipalARNs: []string{f.UserARN}, - EnableIAMEntityDetails: true, - }, - }, - } - logger := hclog.New(nil) - for name, c := range cases { - t.Run(name, func(t *testing.T) { - fakeAws := iamauthtest.NewTestServer(t, c.server) - - c.config.STSEndpoint = fakeAws.URL + "/sts" - c.config.IAMEndpoint = fakeAws.URL + "/iam" - setTestHeaderNames(c.config) - - // This bypasses NewAuthenticator, which bypasses config.Validate(). - auth := &Authenticator{config: c.config, logger: logger} - - loginInput := &LoginInput{ - Creds: credentials.NewStaticCredentials("fake", "fake", ""), - IncludeIAMEntity: c.config.EnableIAMEntityDetails, - STSEndpoint: c.config.STSEndpoint, - STSRegion: "fake-region", - Logger: logger, - ServerIDHeaderValue: "server.id.example.com", - } - setLoginInputHeaderNames(loginInput) - loginData, err := GenerateLoginData(loginInput) - require.NoError(t, err) - loginBytes, err := json.Marshal(loginData) - require.NoError(t, err) - - ident, err := auth.ValidateLogin(context.Background(), string(loginBytes)) - if c.expError != "" { - require.Error(t, err) - require.Contains(t, err.Error(), c.expError) - require.Nil(t, ident) - } else { - require.NoError(t, err) - require.Equal(t, c.expIdent, ident) - } - }) - } -} - -func setLoginInputHeaderNames(in *LoginInput) { - in.ServerIDHeaderName = "X-Test-ServerID" - in.GetEntityMethodHeader = "X-Test-Method" - in.GetEntityURLHeader = "X-Test-URL" - in.GetEntityHeadersHeader = "X-Test-Headers" - in.GetEntityBodyHeader = "X-Test-Body" -} diff --git a/internal/iamauth/config.go b/internal/iamauth/config.go deleted file mode 100644 index d3c722c55..000000000 --- a/internal/iamauth/config.go +++ /dev/null @@ -1,80 +0,0 @@ -package iamauth - -import ( - "fmt" - "strings" - - awsArn "github.com/aws/aws-sdk-go/aws/arn" -) - -type Config struct { - BoundIAMPrincipalARNs []string - EnableIAMEntityDetails bool - IAMEntityTags []string - ServerIDHeaderValue string - MaxRetries int - IAMEndpoint string - STSEndpoint string - AllowedSTSHeaderValues []string - - // Customizable header names - ServerIDHeaderName string - GetEntityMethodHeader string - GetEntityURLHeader string - GetEntityHeadersHeader string - GetEntityBodyHeader string -} - -func (c *Config) Validate() error { - if len(c.BoundIAMPrincipalARNs) == 0 { - return fmt.Errorf("BoundIAMPrincipalARNs is required and must have at least 1 entry") - } - - for _, arn := range c.BoundIAMPrincipalARNs { - if n := strings.Count(arn, "*"); n > 0 { - if !c.EnableIAMEntityDetails { - return fmt.Errorf("Must set EnableIAMEntityDetails=true to use wildcards in BoundIAMPrincipalARNs") - } - if n != 1 || !strings.HasSuffix(arn, "*") { - return fmt.Errorf("Only one wildcard is allowed at the end of the bound IAM principal ARN") - } - } - - if parsed, err := awsArn.Parse(arn); err != nil { - return fmt.Errorf("Invalid principal ARN: %q", arn) - } else if parsed.Service != "iam" && parsed.Service != "sts" { - return fmt.Errorf("Invalid principal ARN: %q", arn) - } - } - - if len(c.IAMEntityTags) > 0 && !c.EnableIAMEntityDetails { - return fmt.Errorf("Must set EnableIAMEntityDetails=true to use IAMUserTags") - } - - // If server id header checking is enabled, we need the header name. - if c.ServerIDHeaderValue != "" && c.ServerIDHeaderName == "" { - return fmt.Errorf("Must set ServerIDHeaderName to use a server ID value") - } - - if c.EnableIAMEntityDetails && (c.GetEntityBodyHeader == "" || - c.GetEntityHeadersHeader == "" || - c.GetEntityMethodHeader == "" || - c.GetEntityURLHeader == "") { - return fmt.Errorf("Must set all of GetEntityMethodHeader, GetEntityURLHeader, " + - "GetEntityHeadersHeader, and GetEntityBodyHeader when EnableIAMEntityDetails=true") - } - - if c.STSEndpoint != "" { - if _, err := parseUrl(c.STSEndpoint); err != nil { - return fmt.Errorf("STSEndpoint is invalid: %s", err) - } - } - - if c.IAMEndpoint != "" { - if _, err := parseUrl(c.IAMEndpoint); err != nil { - return fmt.Errorf("IAMEndpoint is invalid: %s", err) - } - } - - return nil -} diff --git a/internal/iamauth/config_test.go b/internal/iamauth/config_test.go deleted file mode 100644 index d23dc992a..000000000 --- a/internal/iamauth/config_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package iamauth - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestConfigValidate(t *testing.T) { - principalArn := "arn:aws:iam::000000000000:role/my-role" - - cases := map[string]struct { - expError string - configs []Config - - includeHeaderNames bool - }{ - "bound iam principals are required": { - expError: "BoundIAMPrincipalARNs is required and must have at least 1 entry", - configs: []Config{ - {BoundIAMPrincipalARNs: nil}, - {BoundIAMPrincipalARNs: []string{}}, - }, - }, - "entity tags require entity details": { - expError: "Must set EnableIAMEntityDetails=true to use IAMUserTags", - configs: []Config{ - { - BoundIAMPrincipalARNs: []string{principalArn}, - EnableIAMEntityDetails: false, - IAMEntityTags: []string{"some-tag"}, - }, - }, - }, - "entity details require all entity header names": { - expError: "Must set all of GetEntityMethodHeader, GetEntityURLHeader, " + - "GetEntityHeadersHeader, and GetEntityBodyHeader when EnableIAMEntityDetails=true", - configs: []Config{ - { - BoundIAMPrincipalARNs: []string{principalArn}, - EnableIAMEntityDetails: true, - }, - { - BoundIAMPrincipalARNs: []string{principalArn}, - EnableIAMEntityDetails: true, - GetEntityBodyHeader: "X-Test-Header", - }, - { - BoundIAMPrincipalARNs: []string{principalArn}, - EnableIAMEntityDetails: true, - GetEntityHeadersHeader: "X-Test-Header", - }, - { - BoundIAMPrincipalARNs: []string{principalArn}, - EnableIAMEntityDetails: true, - GetEntityURLHeader: "X-Test-Header", - }, - { - BoundIAMPrincipalARNs: []string{principalArn}, - EnableIAMEntityDetails: true, - GetEntityMethodHeader: "X-Test-Header", - }, - }, - }, - "wildcard principals require entity details": { - expError: "Must set EnableIAMEntityDetails=true to use wildcards in BoundIAMPrincipalARNs", - configs: []Config{ - {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/*"}}, - {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/path/*"}}, - }, - }, - "only one wildcard suffix is allowed": { - expError: "Only one wildcard is allowed at the end of the bound IAM principal ARN", - configs: []Config{ - { - BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/**"}, - EnableIAMEntityDetails: true, - }, - { - BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/*/*"}, - EnableIAMEntityDetails: true, - }, - { - BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/*/path"}, - EnableIAMEntityDetails: true, - }, - { - BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/*/path/*"}, - EnableIAMEntityDetails: true, - }, - }, - }, - "invalid principal arns are disallowed": { - expError: fmt.Sprintf("Invalid principal ARN"), - configs: []Config{ - {BoundIAMPrincipalARNs: []string{""}}, - {BoundIAMPrincipalARNs: []string{" "}}, - {BoundIAMPrincipalARNs: []string{"*"}, EnableIAMEntityDetails: true}, - {BoundIAMPrincipalARNs: []string{"arn:aws:iam:role/my-role"}}, - }, - }, - "valid principal arns are allowed": { - includeHeaderNames: true, - configs: []Config{ - {BoundIAMPrincipalARNs: []string{"arn:aws:sts::000000000000:assumed-role/my-role/some-session-name"}}, - {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:user/my-user"}}, - {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/my-role"}}, - {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:*"}, EnableIAMEntityDetails: true}, - {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/*"}, EnableIAMEntityDetails: true}, - {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:role/path/*"}, EnableIAMEntityDetails: true}, - {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:user/*"}, EnableIAMEntityDetails: true}, - {BoundIAMPrincipalARNs: []string{"arn:aws:iam::000000000000:user/path/*"}, EnableIAMEntityDetails: true}, - }, - }, - "server id header value requires service id header name": { - expError: "Must set ServerIDHeaderName to use a server ID value", - configs: []Config{ - { - BoundIAMPrincipalARNs: []string{principalArn}, - ServerIDHeaderValue: "consul.test.example.com", - }, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - for _, conf := range c.configs { - if c.includeHeaderNames { - setTestHeaderNames(&conf) - } - err := conf.Validate() - if c.expError != "" { - require.Error(t, err) - require.Contains(t, err.Error(), c.expError) - } else { - require.NoError(t, err) - } - } - }) - } -} - -func setTestHeaderNames(conf *Config) { - conf.GetEntityMethodHeader = "X-Test-Method" - conf.GetEntityURLHeader = "X-Test-URL" - conf.GetEntityHeadersHeader = "X-Test-Headers" - conf.GetEntityBodyHeader = "X-Test-Body" -} diff --git a/internal/iamauth/iamauthtest/testing.go b/internal/iamauth/iamauthtest/testing.go deleted file mode 100644 index b2e1fb37c..000000000 --- a/internal/iamauth/iamauthtest/testing.go +++ /dev/null @@ -1,187 +0,0 @@ -package iamauthtest - -import ( - "encoding/xml" - "fmt" - "io" - "net/http" - "net/http/httptest" - "sort" - "strings" - "testing" - - "github.com/hashicorp/consul/internal/iamauth/responses" - "github.com/hashicorp/consul/internal/iamauth/responsestest" -) - -// NewTestServer returns a fake AWS API server for local tests: -// It supports the following paths: -// /sts returns STS API responses -// /iam returns IAM API responses -func NewTestServer(t *testing.T, s *Server) *httptest.Server { - server := httptest.NewUnstartedServer(s) - t.Cleanup(server.Close) - server.Start() - return server -} - -// Server contains configuration for the fake AWS API server. -type Server struct { - GetCallerIdentityResponse responses.GetCallerIdentityResponse - GetRoleResponse responses.GetRoleResponse - GetUserResponse responses.GetUserResponse -} - -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - writeError(w, http.StatusBadRequest, r) - return - } - - switch { - case strings.HasPrefix(r.URL.Path, "/sts"): - writeXML(w, s.GetCallerIdentityResponse) - case strings.HasPrefix(r.URL.Path, "/iam"): - if bodyBytes, err := io.ReadAll(r.Body); err == nil { - body := string(bodyBytes) - switch { - case strings.Contains(body, "Action=GetRole"): - writeXML(w, s.GetRoleResponse) - return - case strings.Contains(body, "Action=GetUser"): - writeXML(w, s.GetUserResponse) - return - } - } - writeError(w, http.StatusBadRequest, r) - default: - writeError(w, http.StatusNotFound, r) - } -} - -func writeXML(w http.ResponseWriter, val interface{}) { - str, err := xml.MarshalIndent(val, "", " ") - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprint(w, err.Error()) - return - } - w.Header().Add("Content-Type", "text/xml") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(str)) -} - -func writeError(w http.ResponseWriter, code int, r *http.Request) { - w.WriteHeader(code) - msg := fmt.Sprintf("%s %s", r.Method, r.URL) - fmt.Fprintf(w, ` - - Fake AWS Server Error: %s - -`, msg) -} - -type Fixture struct { - AssumedRoleARN string - CanonicalRoleARN string - RoleARN string - RoleARNWildcard string - RoleName string - RolePath string - RoleTags map[string]string - - EntityID string - EntityIDWithSession string - AccountID string - - UserARN string - UserARNWildcard string - UserName string - UserPath string - UserTags map[string]string - - ServerForRole *Server - ServerForUser *Server -} - -func MakeFixture() Fixture { - f := Fixture{ - AssumedRoleARN: "arn:aws:sts::1234567890:assumed-role/my-role/some-session", - CanonicalRoleARN: "arn:aws:iam::1234567890:role/my-role", - RoleARN: "arn:aws:iam::1234567890:role/some/path/my-role", - RoleARNWildcard: "arn:aws:iam::1234567890:role/some/path/*", - RoleName: "my-role", - RolePath: "some/path", - RoleTags: map[string]string{ - "service-name": "my-service", - "env": "my-env", - }, - - EntityID: "AAAsomeuniqueid", - EntityIDWithSession: "AAAsomeuniqueid:some-session", - AccountID: "1234567890", - - UserARN: "arn:aws:iam::1234567890:user/my-user", - UserARNWildcard: "arn:aws:iam::1234567890:user/*", - UserName: "my-user", - UserPath: "", - UserTags: map[string]string{"user-group": "my-group"}, - } - - f.ServerForRole = &Server{ - GetCallerIdentityResponse: responsestest.MakeGetCallerIdentityResponse( - f.AssumedRoleARN, f.EntityIDWithSession, f.AccountID, - ), - GetRoleResponse: responsestest.MakeGetRoleResponse( - f.RoleARN, f.EntityID, toTags(f.RoleTags), - ), - } - - f.ServerForUser = &Server{ - GetCallerIdentityResponse: responsestest.MakeGetCallerIdentityResponse( - f.UserARN, f.EntityID, f.AccountID, - ), - GetUserResponse: responsestest.MakeGetUserResponse( - f.UserARN, f.EntityID, toTags(f.UserTags), - ), - } - - return f -} - -func (f *Fixture) RoleTagKeys() []string { return keys(f.RoleTags) } -func (f *Fixture) UserTagKeys() []string { return keys(f.UserTags) } -func (f *Fixture) RoleTagValues() []string { return values(f.RoleTags) } -func (f *Fixture) UserTagValues() []string { return values(f.UserTags) } - -// toTags converts the map to a slice of responses.Tag -func toTags(tags map[string]string) responses.Tags { - members := []responses.TagMember{} - for k, v := range tags { - members = append(members, responses.TagMember{ - Key: k, - Value: v, - }) - } - return responses.Tags{Members: members} - -} - -// keys returns the keys in sorted order -func keys(tags map[string]string) []string { - result := []string{} - for k := range tags { - result = append(result, k) - } - sort.Strings(result) - return result -} - -// values returns values in tags, ordered by sorted keys -func values(tags map[string]string) []string { - result := []string{} - for _, k := range keys(tags) { // ensures sorted by key - result = append(result, tags[k]) - } - return result -} diff --git a/internal/iamauth/responses/arn.go b/internal/iamauth/responses/arn.go deleted file mode 100644 index ea5e541d3..000000000 --- a/internal/iamauth/responses/arn.go +++ /dev/null @@ -1,94 +0,0 @@ -package responses - -import ( - "fmt" - "strings" -) - -// https://github.com/hashicorp/vault/blob/ba533d006f2244103648785ebfe8a9a9763d2b6e/builtin/credential/aws/path_login.go#L1722-L1744 -type ParsedArn struct { - Partition string - AccountNumber string - Type string - Path string - FriendlyName string - SessionInfo string -} - -// https://github.com/hashicorp/vault/blob/ba533d006f2244103648785ebfe8a9a9763d2b6e/builtin/credential/aws/path_login.go#L1482-L1530 -// However, instance profiles are not support in Consul. -func ParseArn(iamArn string) (*ParsedArn, error) { - // iamArn should look like one of the following: - // 1. arn:aws:iam:::/ - // 2. arn:aws:sts:::assumed-role// - // if we get something like 2, then we want to transform that back to what - // most people would expect, which is arn:aws:iam:::role/ - var entity ParsedArn - fullParts := strings.Split(iamArn, ":") - if len(fullParts) != 6 { - return nil, fmt.Errorf("unrecognized arn: contains %d colon-separated parts, expected 6", len(fullParts)) - } - if fullParts[0] != "arn" { - return nil, fmt.Errorf("unrecognized arn: does not begin with \"arn:\"") - } - // normally aws, but could be aws-cn or aws-us-gov - entity.Partition = fullParts[1] - if entity.Partition == "" { - return nil, fmt.Errorf("unrecognized arn: %q is missing the partition", iamArn) - } - if fullParts[2] != "iam" && fullParts[2] != "sts" { - return nil, fmt.Errorf("unrecognized service: %v, not one of iam or sts", fullParts[2]) - } - // fullParts[3] is the region, which doesn't matter for AWS IAM entities - entity.AccountNumber = fullParts[4] - if entity.AccountNumber == "" { - return nil, fmt.Errorf("unrecognized arn: %q is missing the account number", iamArn) - } - // fullParts[5] would now be something like user/ or assumed-role// - parts := strings.Split(fullParts[5], "/") - if len(parts) < 2 { - return nil, fmt.Errorf("unrecognized arn: %q contains fewer than 2 slash-separated parts", fullParts[5]) - } - entity.Type = parts[0] - entity.Path = strings.Join(parts[1:len(parts)-1], "/") - entity.FriendlyName = parts[len(parts)-1] - // now, entity.FriendlyName should either be or - switch entity.Type { - case "assumed-role": - // Check for three parts for assumed role ARNs - if len(parts) < 3 { - return nil, fmt.Errorf("unrecognized arn: %q contains fewer than 3 slash-separated parts", fullParts[5]) - } - // Assumed roles don't have paths and have a slightly different format - // parts[2] is - entity.Path = "" - entity.FriendlyName = parts[1] - entity.SessionInfo = parts[2] - case "user": - case "role": - // case "instance-profile": - default: - return nil, fmt.Errorf("unrecognized principal type: %q", entity.Type) - } - - if entity.FriendlyName == "" { - return nil, fmt.Errorf("unrecognized arn: %q is missing the resource name", iamArn) - } - - return &entity, nil -} - -// CanonicalArn returns the canonical ARN for referring to an IAM entity -func (p *ParsedArn) CanonicalArn() string { - entityType := p.Type - // canonicalize "assumed-role" into "role" - if entityType == "assumed-role" { - entityType = "role" - } - // Annoyingly, the assumed-role entity type doesn't have the Path of the role which was assumed - // So, we "canonicalize" it by just completely dropping the path. The other option would be to - // make an AWS API call to look up the role by FriendlyName, which introduces more complexity to - // code and test, and it also breaks backwards compatibility in an area where we would really want - // it - return fmt.Sprintf("arn:%s:iam::%s:%s/%s", p.Partition, p.AccountNumber, entityType, p.FriendlyName) -} diff --git a/internal/iamauth/responses/responses.go b/internal/iamauth/responses/responses.go deleted file mode 100644 index ed57ca97b..000000000 --- a/internal/iamauth/responses/responses.go +++ /dev/null @@ -1,96 +0,0 @@ -package responses - -import "encoding/xml" - -type GetCallerIdentityResponse struct { - XMLName xml.Name `xml:"GetCallerIdentityResponse"` - GetCallerIdentityResult []GetCallerIdentityResult `xml:"GetCallerIdentityResult"` - ResponseMetadata []ResponseMetadata `xml:"ResponseMetadata"` -} - -type GetCallerIdentityResult struct { - Arn string `xml:"Arn"` - UserId string `xml:"UserId"` - Account string `xml:"Account"` -} - -type ResponseMetadata struct { - RequestId string `xml:"RequestId"` -} - -// IAMEntity is an interface for getting details from an IAM Role or User. -type IAMEntity interface { - EntityPath() string - EntityArn() string - EntityName() string - EntityId() string - EntityTags() map[string]string -} - -var _ IAMEntity = (*Role)(nil) -var _ IAMEntity = (*User)(nil) - -type GetRoleResponse struct { - XMLName xml.Name `xml:"GetRoleResponse"` - GetRoleResult []GetRoleResult `xml:"GetRoleResult"` - ResponseMetadata []ResponseMetadata `xml:"ResponseMetadata"` -} - -type GetRoleResult struct { - Role Role `xml:"Role"` -} - -type Role struct { - Arn string `xml:"Arn"` - Path string `xml:"Path"` - RoleId string `xml:"RoleId"` - RoleName string `xml:"RoleName"` - Tags Tags `xml:"Tags"` -} - -func (r *Role) EntityPath() string { return r.Path } -func (r *Role) EntityArn() string { return r.Arn } -func (r *Role) EntityName() string { return r.RoleName } -func (r *Role) EntityId() string { return r.RoleId } -func (r *Role) EntityTags() map[string]string { return tagsToMap(r.Tags) } - -type GetUserResponse struct { - XMLName xml.Name `xml:"GetUserResponse"` - GetUserResult []GetUserResult `xml:"GetUserResult"` - ResponseMetadata []ResponseMetadata `xml:"ResponseMetadata"` -} - -type GetUserResult struct { - User User `xml:"User"` -} - -type User struct { - Arn string `xml:"Arn"` - Path string `xml:"Path"` - UserId string `xml:"UserId"` - UserName string `xml:"UserName"` - Tags Tags `xml:"Tags"` -} - -func (u *User) EntityPath() string { return u.Path } -func (u *User) EntityArn() string { return u.Arn } -func (u *User) EntityName() string { return u.UserName } -func (u *User) EntityId() string { return u.UserId } -func (u *User) EntityTags() map[string]string { return tagsToMap(u.Tags) } - -type Tags struct { - Members []TagMember `xml:"member"` -} - -type TagMember struct { - Key string `xml:"Key"` - Value string `xml:"Value"` -} - -func tagsToMap(tags Tags) map[string]string { - result := map[string]string{} - for _, tag := range tags.Members { - result[tag.Key] = tag.Value - } - return result -} diff --git a/internal/iamauth/responses/responses_test.go b/internal/iamauth/responses/responses_test.go deleted file mode 100644 index a641be45a..000000000 --- a/internal/iamauth/responses/responses_test.go +++ /dev/null @@ -1,293 +0,0 @@ -package responses - -import ( - "encoding/xml" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestParseArn(t *testing.T) { - cases := map[string]struct { - arn string - expArn *ParsedArn - }{ - "assumed-role": { - arn: "arn:aws:sts::000000000000:assumed-role/my-role/session-name", - expArn: &ParsedArn{ - Partition: "aws", - AccountNumber: "000000000000", - Type: "assumed-role", - Path: "", - FriendlyName: "my-role", - SessionInfo: "session-name", - }, - }, - "role": { - arn: "arn:aws:iam::000000000000:role/my-role", - expArn: &ParsedArn{ - Partition: "aws", - AccountNumber: "000000000000", - Type: "role", - Path: "", - FriendlyName: "my-role", - SessionInfo: "", - }, - }, - "user": { - arn: "arn:aws:iam::000000000000:user/my-user", - expArn: &ParsedArn{ - Partition: "aws", - AccountNumber: "000000000000", - Type: "user", - Path: "", - FriendlyName: "my-user", - SessionInfo: "", - }, - }, - "role with path": { - arn: "arn:aws:iam::000000000000:role/path/my-role", - expArn: &ParsedArn{ - Partition: "aws", - AccountNumber: "000000000000", - Type: "role", - Path: "path", - FriendlyName: "my-role", - SessionInfo: "", - }, - }, - "role with path 2": { - arn: "arn:aws:iam::000000000000:role/path/to/my-role", - expArn: &ParsedArn{ - Partition: "aws", - AccountNumber: "000000000000", - Type: "role", - Path: "path/to", - FriendlyName: "my-role", - SessionInfo: "", - }, - }, - "role with path 3": { - arn: "arn:aws:iam::000000000000:role/some/path/to/my-role", - expArn: &ParsedArn{ - Partition: "aws", - AccountNumber: "000000000000", - Type: "role", - Path: "some/path/to", - FriendlyName: "my-role", - SessionInfo: "", - }, - }, - "user with path": { - arn: "arn:aws:iam::000000000000:user/path/my-user", - expArn: &ParsedArn{ - Partition: "aws", - AccountNumber: "000000000000", - Type: "user", - Path: "path", - FriendlyName: "my-user", - SessionInfo: "", - }, - }, - - // Invalid cases - "empty string": {arn: ""}, - "wildcard": {arn: "*"}, - "missing prefix": {arn: ":aws:sts::000000000000:assumed-role/my-role/session-name"}, - "missing partition": {arn: "arn::sts::000000000000:assumed-role/my-role/session-name"}, - "missing service": {arn: "arn:aws:::000000000000:assumed-role/my-role/session-name"}, - "missing separator": {arn: "arn:aws:sts:000000000000:assumed-role/my-role/session-name"}, - "missing account id": {arn: "arn:aws:sts:::assumed-role/my-role/session-name"}, - "missing resource": {arn: "arn:aws:sts::000000000000:"}, - "assumed-role missing parts": {arn: "arn:aws:sts::000000000000:assumed-role/my-role"}, - "role missing parts": {arn: "arn:aws:sts::000000000000:role"}, - "role missing parts 2": {arn: "arn:aws:sts::000000000000:role/"}, - "user missing parts": {arn: "arn:aws:sts::000000000000:user"}, - "user missing parts 2": {arn: "arn:aws:sts::000000000000:user/"}, - "unsupported service": {arn: "arn:aws:ecs:us-east-1:000000000000:task/my-task/00000000000000000000000000000000"}, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - parsed, err := ParseArn(c.arn) - if c.expArn != nil { - require.NoError(t, err) - require.Equal(t, c.expArn, parsed) - } else { - require.Error(t, err) - require.Nil(t, parsed) - } - }) - } -} - -func TestCanonicalArn(t *testing.T) { - cases := map[string]struct { - arn string - expArn string - }{ - "assumed-role arn": { - arn: "arn:aws:sts::000000000000:assumed-role/my-role/session-name", - expArn: "arn:aws:iam::000000000000:role/my-role", - }, - "role arn": { - arn: "arn:aws:iam::000000000000:role/my-role", - expArn: "arn:aws:iam::000000000000:role/my-role", - }, - "role arn with path": { - arn: "arn:aws:iam::000000000000:role/path/to/my-role", - expArn: "arn:aws:iam::000000000000:role/my-role", - }, - "user arn": { - arn: "arn:aws:iam::000000000000:user/my-user", - expArn: "arn:aws:iam::000000000000:user/my-user", - }, - "user arn with path": { - arn: "arn:aws:iam::000000000000:user/path/to/my-user", - expArn: "arn:aws:iam::000000000000:user/my-user", - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - parsed, err := ParseArn(c.arn) - require.NoError(t, err) - require.Equal(t, c.expArn, parsed.CanonicalArn()) - }) - } -} - -func TestUnmarshalXML(t *testing.T) { - t.Run("user xml", func(t *testing.T) { - var resp GetUserResponse - err := xml.Unmarshal([]byte(rawUserXML), &resp) - require.NoError(t, err) - require.Equal(t, expectedParsedUserXML, resp) - }) - t.Run("role xml", func(t *testing.T) { - var resp GetRoleResponse - err := xml.Unmarshal([]byte(rawRoleXML), &resp) - require.NoError(t, err) - require.Equal(t, expectedParsedRoleXML, resp) - }) -} - -var ( - rawUserXML = ` - - - / - arn:aws:iam::000000000000:user/my-user - my-user - AIDAexampleuserid - 2021-01-01T00:01:02Z - - - some-value - some-tag - - - another-value - another-tag - - - third-value - third-tag - - - - - - 11815b96-cb16-4d33-b2cf-0042fa4db4cd - -` - - expectedParsedUserXML = GetUserResponse{ - XMLName: xml.Name{ - Space: "https://iam.amazonaws.com/doc/2010-05-08/", - Local: "GetUserResponse", - }, - GetUserResult: []GetUserResult{ - { - User: User{ - Arn: "arn:aws:iam::000000000000:user/my-user", - Path: "/", - UserId: "AIDAexampleuserid", - UserName: "my-user", - Tags: Tags{ - Members: []TagMember{ - {Key: "some-tag", Value: "some-value"}, - {Key: "another-tag", Value: "another-value"}, - {Key: "third-tag", Value: "third-value"}, - }, - }, - }, - }, - }, - ResponseMetadata: []ResponseMetadata{ - {RequestId: "11815b96-cb16-4d33-b2cf-0042fa4db4cd"}, - }, - } - - rawRoleXML = ` - - - / - some-json-document-that-we-ignore - 43200 - AROAsomeuniqueid - - 2022-01-01T01:02:03Z - us-east-1 - - my-role - arn:aws:iam::000000000000:role/my-role - 2020-01-01T00:00:01Z - - - some-value - some-key - - - another-value - another-key - - - a-third-value - third-key - - - - - - a9866067-c0e5-4b5e-86ba-429c1151e2fb - -` - - expectedParsedRoleXML = GetRoleResponse{ - XMLName: xml.Name{ - Space: "https://iam.amazonaws.com/doc/2010-05-08/", - Local: "GetRoleResponse", - }, - GetRoleResult: []GetRoleResult{ - { - Role: Role{ - Arn: "arn:aws:iam::000000000000:role/my-role", - Path: "/", - RoleId: "AROAsomeuniqueid", - RoleName: "my-role", - Tags: Tags{ - Members: []TagMember{ - {Key: "some-key", Value: "some-value"}, - {Key: "another-key", Value: "another-value"}, - {Key: "third-key", Value: "a-third-value"}, - }, - }, - }, - }, - }, - ResponseMetadata: []ResponseMetadata{ - {RequestId: "a9866067-c0e5-4b5e-86ba-429c1151e2fb"}, - }, - } -) diff --git a/internal/iamauth/responsestest/testing.go b/internal/iamauth/responsestest/testing.go deleted file mode 100644 index 7daec0517..000000000 --- a/internal/iamauth/responsestest/testing.go +++ /dev/null @@ -1,81 +0,0 @@ -package responsestest - -import ( - "strings" - - "github.com/hashicorp/consul/internal/iamauth/responses" -) - -func MakeGetCallerIdentityResponse(arn, userId, accountId string) responses.GetCallerIdentityResponse { - // Sanity check the UserId for unit tests. - parsed := parseArn(arn) - switch parsed.Type { - case "assumed-role": - if !strings.Contains(userId, ":") { - panic("UserId for assumed-role in GetCallerIdentity response must be ':'") - } - default: - if strings.Contains(userId, ":") { - panic("UserId in GetCallerIdentity must not contain ':'") - } - } - - return responses.GetCallerIdentityResponse{ - GetCallerIdentityResult: []responses.GetCallerIdentityResult{ - { - Arn: arn, - UserId: userId, - Account: accountId, - }, - }, - } -} - -func MakeGetRoleResponse(arn, id string, tags responses.Tags) responses.GetRoleResponse { - if strings.Contains(id, ":") { - panic("RoleId in GetRole response must not contain ':'") - } - parsed := parseArn(arn) - return responses.GetRoleResponse{ - GetRoleResult: []responses.GetRoleResult{ - { - Role: responses.Role{ - Arn: arn, - Path: parsed.Path, - RoleId: id, - RoleName: parsed.FriendlyName, - Tags: tags, - }, - }, - }, - } -} - -func MakeGetUserResponse(arn, id string, tags responses.Tags) responses.GetUserResponse { - if strings.Contains(id, ":") { - panic("UserId in GetUser resposne must not contain ':'") - } - parsed := parseArn(arn) - return responses.GetUserResponse{ - GetUserResult: []responses.GetUserResult{ - { - User: responses.User{ - Arn: arn, - Path: parsed.Path, - UserId: id, - UserName: parsed.FriendlyName, - Tags: tags, - }, - }, - }, - } -} - -func parseArn(arn string) *responses.ParsedArn { - parsed, err := responses.ParseArn(arn) - if err != nil { - // For testing, just fail immediately. - panic(err) - } - return parsed -} diff --git a/internal/iamauth/token.go b/internal/iamauth/token.go deleted file mode 100644 index 10422ca6c..000000000 --- a/internal/iamauth/token.go +++ /dev/null @@ -1,403 +0,0 @@ -package iamauth - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "net/textproto" - "net/url" - "strings" - - "github.com/hashicorp/consul/lib/stringslice" -) - -const ( - amzHeaderPrefix = "X-Amz-" -) - -var defaultAllowedSTSRequestHeaders = []string{ - "X-Amz-Algorithm", - "X-Amz-Content-Sha256", - "X-Amz-Credential", - "X-Amz-Date", - "X-Amz-Security-Token", - "X-Amz-Signature", - "X-Amz-SignedHeaders", -} - -// BearerToken is a login "token" for an IAM auth method. It is a signed -// sts:GetCallerIdentity request in JSON format. Optionally, it can include a -// signed embedded iam:GetRole or iam:GetUser request in the headers. -type BearerToken struct { - config *Config - - getCallerIdentityMethod string - getCallerIdentityURL string - getCallerIdentityHeader http.Header - getCallerIdentityBody string - - getIAMEntityMethod string - getIAMEntityURL string - getIAMEntityHeader http.Header - getIAMEntityBody string - - entityRequestType string - parsedCallerIdentityURL *url.URL - parsedIAMEntityURL *url.URL -} - -var _ json.Unmarshaler = (*BearerToken)(nil) - -func NewBearerToken(loginToken string, config *Config) (*BearerToken, error) { - token := &BearerToken{config: config} - if err := json.Unmarshal([]byte(loginToken), &token); err != nil { - return nil, fmt.Errorf("invalid token: %s", err) - } - - if err := token.validate(); err != nil { - return nil, err - } - - if config.EnableIAMEntityDetails { - method, err := token.getHeader(token.config.GetEntityMethodHeader) - if err != nil { - return nil, err - } - - rawUrl, err := token.getHeader(token.config.GetEntityURLHeader) - if err != nil { - return nil, err - } - - headerJson, err := token.getHeader(token.config.GetEntityHeadersHeader) - if err != nil { - return nil, err - } - - var header http.Header - if err := json.Unmarshal([]byte(headerJson), &header); err != nil { - return nil, err - } - - body, err := token.getHeader(token.config.GetEntityBodyHeader) - if err != nil { - return nil, err - } - - parsedUrl, err := parseUrl(rawUrl) - if err != nil { - return nil, err - } - - token.getIAMEntityMethod = method - token.getIAMEntityBody = body - token.getIAMEntityURL = rawUrl - token.getIAMEntityHeader = header - token.parsedIAMEntityURL = parsedUrl - - if err := token.validateIAMHostname(); err != nil { - return nil, err - } - - reqType, err := token.validateIAMEntityBody() - if err != nil { - return nil, err - } - token.entityRequestType = reqType - } - return token, nil -} - -// https://github.com/hashicorp/vault/blob/b17e3256dde937a6248c9a2fa56206aac93d07de/builtin/credential/aws/path_login.go#L1178 -func (t *BearerToken) validate() error { - if t.getCallerIdentityMethod != "POST" { - return fmt.Errorf("iam_http_request_method must be POST") - } - if err := t.validateSTSHostname(); err != nil { - return err - } - if err := t.validateGetCallerIdentityBody(); err != nil { - return err - } - if err := t.validateAllowedSTSHeaderValues(); err != nil { - return err - } - return nil -} - -// validateSTSHostname checks the CallerIdentityURL in the BearerToken -// either matches the admin configured STSEndpoint or, if STSEndpoint is not set, -// that the URL matches a known Amazon AWS hostname for the STS service, one of: -// -// sts.amazonaws.com -// sts.*.amazonaws.com -// sts-fips.amazonaws.com -// sts-fips.*.amazonaws.com -// -// See https://docs.aws.amazon.com/general/latest/gr/sts.html -func (t *BearerToken) validateSTSHostname() error { - if t.config.STSEndpoint != "" { - // If an STS endpoint is configured, we (elsewhere) send the request to that endpoint. - return nil - } - if t.parsedCallerIdentityURL == nil { - return fmt.Errorf("invalid GetCallerIdentity URL: %v", t.getCallerIdentityURL) - } - - // Otherwise, validate the hostname looks like a known STS endpoint. - host := t.parsedCallerIdentityURL.Hostname() - if strings.HasSuffix(host, ".amazonaws.com") && - (strings.HasPrefix(host, "sts.") || strings.HasPrefix(host, "sts-fips.")) { - return nil - } - return fmt.Errorf("invalid STS hostname: %q", host) -} - -// validateIAMHostname checks the IAMEntityURL in the BearerToken -// either matches the admin configured IAMEndpoint or, if IAMEndpoint is not set, -// that the URL matches a known Amazon AWS hostname for the IAM service, one of: -// -// iam.amazonaws.com -// iam.*.amazonaws.com -// iam-fips.amazonaws.com -// iam-fips.*.amazonaws.com -// -// See https://docs.aws.amazon.com/general/latest/gr/iam-service.html -func (t *BearerToken) validateIAMHostname() error { - if t.config.IAMEndpoint != "" { - // If an IAM endpoint is configured, we (elsewhere) send the request to that endpoint. - return nil - } - if t.parsedIAMEntityURL == nil { - return fmt.Errorf("invalid IAM URL: %v", t.getIAMEntityURL) - } - - // Otherwise, validate the hostname looks like a known IAM endpoint. - host := t.parsedIAMEntityURL.Hostname() - if strings.HasSuffix(host, ".amazonaws.com") && - (strings.HasPrefix(host, "iam.") || strings.HasPrefix(host, "iam-fips.")) { - return nil - } - return fmt.Errorf("invalid IAM hostname: %q", host) -} - -// https://github.com/hashicorp/vault/blob/b17e3256dde937a6248c9a2fa56206aac93d07de/builtin/credential/aws/path_login.go#L1439 -func (t *BearerToken) validateGetCallerIdentityBody() error { - allowedValues := url.Values{ - "Action": []string{"GetCallerIdentity"}, - // Will assume for now that future versions don't change - // the semantics - "Version": nil, // any value is allowed - } - if _, err := parseRequestBody(t.getCallerIdentityBody, allowedValues); err != nil { - return fmt.Errorf("iam_request_body error: %s", err) - } - - return nil -} - -func (t *BearerToken) validateIAMEntityBody() (string, error) { - allowedValues := url.Values{ - "Action": []string{"GetRole", "GetUser"}, - "RoleName": nil, // any value is allowed - "UserName": nil, - "Version": nil, - } - body, err := parseRequestBody(t.getIAMEntityBody, allowedValues) - if err != nil { - return "", fmt.Errorf("iam_request_headers[%s] error: %s", t.config.GetEntityBodyHeader, err) - } - - // Disallow GetRole+UserName and GetUser+RoleName. - action := body["Action"][0] - _, hasRoleName := body["RoleName"] - _, hasUserName := body["UserName"] - if action == "GetUser" && hasUserName && !hasRoleName { - return action, nil - } else if action == "GetRole" && hasRoleName && !hasUserName { - return action, nil - } - return "", fmt.Errorf("iam_request_headers[%q] error: invalid request body %q", t.config.GetEntityBodyHeader, t.getIAMEntityBody) -} - -// parseRequestBody parses the AWS STS or IAM request body, such as 'Action=GetRole&RoleName=my-role'. -// It returns the parsed values, or an error if there are unexpected fields based on allowedValues. -// -// A key-value pair in the body is allowed if: -// - It is a single value (i.e. no bodies like 'Action=1&Action=2') -// - allowedValues[key] is an empty slice or nil (any value is allowed for the key) -// - allowedValues[key] is non-empty and contains the exact value -// This always requires an 'Action' field is present and non-empty. -func parseRequestBody(body string, allowedValues url.Values) (url.Values, error) { - qs, err := url.ParseQuery(body) - if err != nil { - return nil, err - } - - // Action field is always required. - if _, ok := qs["Action"]; !ok || len(qs["Action"]) == 0 || qs["Action"][0] == "" { - return nil, fmt.Errorf(`missing field "Action"`) - } - - // Ensure the body does not have extra fields and each - // field in the body matches the allowed values. - for k, v := range qs { - exp, ok := allowedValues[k] - if k != "Action" && !ok { - return nil, fmt.Errorf("unexpected field %q", k) - } - - if len(exp) == 0 { - // empty indicates any value is okay - continue - } else if len(v) != 1 || !stringslice.Contains(exp, v[0]) { - return nil, fmt.Errorf("unexpected value %s=%v", k, v) - } - } - - return qs, nil -} - -// https://github.com/hashicorp/vault/blob/861454e0ed1390d67ddaf1a53c1798e5e291728c/builtin/credential/aws/path_config_client.go#L349 -func (t *BearerToken) validateAllowedSTSHeaderValues() error { - for k := range t.getCallerIdentityHeader { - h := textproto.CanonicalMIMEHeaderKey(k) - if strings.HasPrefix(h, amzHeaderPrefix) && - !stringslice.Contains(defaultAllowedSTSRequestHeaders, h) && - !stringslice.Contains(t.config.AllowedSTSHeaderValues, h) { - return fmt.Errorf("invalid request header: %s", h) - } - } - return nil -} - -// UnmarshalJSON unmarshals the bearer token details which contains an HTTP -// request (a signed sts:GetCallerIdentity request). -func (t *BearerToken) UnmarshalJSON(data []byte) error { - var rawData struct { - Method string `json:"iam_http_request_method"` - UrlBase64 string `json:"iam_request_url"` - HeadersBase64 string `json:"iam_request_headers"` - BodyBase64 string `json:"iam_request_body"` - } - - if err := json.Unmarshal(data, &rawData); err != nil { - return err - } - - rawUrl, err := base64.StdEncoding.DecodeString(rawData.UrlBase64) - if err != nil { - return err - } - - headersJson, err := base64.StdEncoding.DecodeString(rawData.HeadersBase64) - if err != nil { - return err - } - - var headers http.Header - // This is a JSON-string in JSON - if err := json.Unmarshal(headersJson, &headers); err != nil { - return err - } - - body, err := base64.StdEncoding.DecodeString(rawData.BodyBase64) - if err != nil { - return err - } - - t.getCallerIdentityMethod = rawData.Method - t.getCallerIdentityBody = string(body) - t.getCallerIdentityHeader = headers - t.getCallerIdentityURL = string(rawUrl) - - parsedUrl, err := parseUrl(t.getCallerIdentityURL) - if err != nil { - return err - } - t.parsedCallerIdentityURL = parsedUrl - return nil -} - -func parseUrl(s string) (*url.URL, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - // url.Parse doesn't error on empty string - if u == nil || u.Scheme == "" || u.Host == "" { - return nil, fmt.Errorf("url is invalid: %q", s) - } - return u, nil -} - -// GetCallerIdentityRequest returns the sts:GetCallerIdentity request decoded -// from the bearer token. -func (t *BearerToken) GetCallerIdentityRequest() (*http.Request, error) { - // NOTE: We need to ensure we're calling STS, instead of acting as an unintended network proxy - // We validate up-front that t.getCallerIdentityURL is a known AWS STS hostname. - // Otherwise, we send to the admin-configured STSEndpoint. - endpoint := t.getCallerIdentityURL - if t.config.STSEndpoint != "" { - endpoint = t.config.STSEndpoint - } - - return buildHttpRequest( - t.getCallerIdentityMethod, - endpoint, - t.parsedCallerIdentityURL, - t.getCallerIdentityBody, - t.getCallerIdentityHeader, - ) -} - -// GetEntityRequest returns the iam:GetUser or iam:GetRole request from the request details, -// if present, embedded in the headers of the sts:GetCallerIdentity request. -func (t *BearerToken) GetEntityRequest() (*http.Request, error) { - endpoint := t.getIAMEntityURL - if t.config.IAMEndpoint != "" { - endpoint = t.config.IAMEndpoint - } - - return buildHttpRequest( - t.getIAMEntityMethod, - endpoint, - t.parsedIAMEntityURL, - t.getIAMEntityBody, - t.getIAMEntityHeader, - ) -} - -// getHeader returns the header from s.GetCallerIdentityHeader, or an error if -// the header is not found or is not a single value. -func (t *BearerToken) getHeader(name string) (string, error) { - values := t.getCallerIdentityHeader.Values(name) - if len(values) == 0 { - return "", fmt.Errorf("missing header %q", name) - } - if len(values) != 1 { - return "", fmt.Errorf("invalid value for header %q (expected 1 item)", name) - } - return values[0], nil -} - -// buildHttpRequest returns an HTTP request from the given details. -// This supports sending to a custom endpoint, but always preserves the -// Host header and URI path, which are signed and cannot be modified. -// There's a deeper explanation of this in the Vault source code. -// https://github.com/hashicorp/vault/blob/b17e3256dde937a6248c9a2fa56206aac93d07de/builtin/credential/aws/path_login.go#L1569 -func buildHttpRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (*http.Request, error) { - targetUrl := fmt.Sprintf("%s%s", endpoint, parsedUrl.RequestURI()) - request, err := http.NewRequest(method, targetUrl, strings.NewReader(body)) - if err != nil { - return nil, err - } - request.Host = parsedUrl.Host - for k, vals := range headers { - for _, val := range vals { - request.Header.Add(k, val) - } - } - return request, nil -} diff --git a/internal/iamauth/token_test.go b/internal/iamauth/token_test.go deleted file mode 100644 index 42f81151d..000000000 --- a/internal/iamauth/token_test.go +++ /dev/null @@ -1,483 +0,0 @@ -package iamauth - -import ( - "net/http" - "net/url" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNewBearerToken(t *testing.T) { - cases := map[string]struct { - tokenStr string - config Config - expToken BearerToken - expError string - }{ - "valid token": { - tokenStr: validBearerTokenJson, - expToken: validBearerTokenParsed, - }, - "valid token with role": { - tokenStr: validBearerTokenWithRoleJson, - config: Config{ - EnableIAMEntityDetails: true, - GetEntityMethodHeader: "X-Consul-IAM-GetEntity-Method", - GetEntityURLHeader: "X-Consul-IAM-GetEntity-URL", - GetEntityHeadersHeader: "X-Consul-IAM-GetEntity-Headers", - GetEntityBodyHeader: "X-Consul-IAM-GetEntity-Body", - STSEndpoint: validBearerTokenParsed.getCallerIdentityURL, - }, - expToken: validBearerTokenWithRoleParsed, - }, - - "empty json": { - tokenStr: `{}`, - expError: "unexpected end of JSON input", - }, - "missing iam_request_method field": { - tokenStr: tokenJsonMissingMethodField, - expError: "iam_http_request_method must be POST", - }, - "missing iam_request_url field": { - tokenStr: tokenJsonMissingUrlField, - expError: "url is invalid", - }, - "missing iam_request_headers field": { - tokenStr: tokenJsonMissingHeadersField, - expError: "unexpected end of JSON input", - }, - "missing iam_request_body field": { - tokenStr: tokenJsonMissingBodyField, - expError: "iam_request_body error", - }, - "invalid json": { - tokenStr: `{`, - expError: "unexpected end of JSON input", - }, - } - for name, c := range cases { - t.Run(name, func(t *testing.T) { - token, err := NewBearerToken(c.tokenStr, &c.config) - t.Logf("token = %+v", token) - if c.expError != "" { - require.Error(t, err) - require.Contains(t, err.Error(), c.expError) - require.Nil(t, token) - } else { - require.NoError(t, err) - c.expToken.config = &c.config - require.Equal(t, &c.expToken, token) - } - }) - } -} - -func TestParseRequestBody(t *testing.T) { - cases := map[string]struct { - body string - allowedValues url.Values - expValues url.Values - expError string - }{ - "one allowed field": { - body: "Action=GetCallerIdentity&Version=1234", - allowedValues: url.Values{"Version": []string{"1234"}}, - expValues: url.Values{ - "Action": []string{"GetCallerIdentity"}, - "Version": []string{"1234"}, - }, - }, - "many allowed fields": { - body: "Action=GetRole&RoleName=my-role&Version=1234", - allowedValues: url.Values{ - "Action": []string{"GetUser", "GetRole"}, - "UserName": nil, - "RoleName": nil, - "Version": nil, - }, - expValues: url.Values{ - "Action": []string{"GetRole"}, - "RoleName": []string{"my-role"}, - "Version": []string{"1234"}, - }, - }, - "action only": { - body: "Action=GetRole", - allowedValues: nil, - expValues: url.Values{"Action": []string{"GetRole"}}, - }, - - "empty body": { - expValues: url.Values{}, - expError: `missing field "Action"`, - }, - "disallowed field": { - body: "Action=GetRole&Version=1234&Extra=Abc", - allowedValues: url.Values{"Action": nil, "Version": nil}, - expError: `unexpected field "Extra"`, - }, - "mismatched action": { - body: "Action=GetRole", - allowedValues: url.Values{"Action": []string{"GetUser"}}, - expError: `unexpected value Action=[GetRole]`, - }, - "mismatched field": { - body: "Action=GetRole&Extra=1234", - allowedValues: url.Values{"Action": nil, "Extra": []string{"abc"}}, - expError: `unexpected value Extra=[1234]`, - }, - "multi-valued field": { - body: "Action=GetRole&Action=GetUser", - allowedValues: url.Values{"Action": []string{"GetRole", "GetUser"}}, - // only one value is allowed. - expError: `unexpected value Action=[GetRole GetUser]`, - }, - "empty action": { - body: "Action=", - allowedValues: nil, - expError: `missing field "Action"`, - }, - "missing action": { - body: "Version=1234", - allowedValues: url.Values{"Action": []string{"GetRole"}}, - expError: `missing field "Action"`, - }, - } - for name, c := range cases { - t.Run(name, func(t *testing.T) { - values, err := parseRequestBody(c.body, c.allowedValues) - if c.expError != "" { - require.Error(t, err) - require.Contains(t, err.Error(), c.expError) - require.Nil(t, values) - } else { - require.NoError(t, err) - require.Equal(t, c.expValues, values) - } - }) - } -} - -func TestValidateGetCallerIdentityBody(t *testing.T) { - cases := map[string]struct { - body string - expError string - }{ - "valid": {"Action=GetCallerIdentity&Version=1234", ""}, - "valid 2": {"Action=GetCallerIdentity", ""}, - "empty action": { - "Action=", - `iam_request_body error: missing field "Action"`, - }, - "invalid action": { - "Action=GetRole", - `iam_request_body error: unexpected value Action=[GetRole]`, - }, - "missing action": { - "Version=1234", - `iam_request_body error: missing field "Action"`, - }, - "empty": { - "", - `iam_request_body error: missing field "Action"`, - }, - } - for name, c := range cases { - t.Run(name, func(t *testing.T) { - token := &BearerToken{getCallerIdentityBody: c.body} - err := token.validateGetCallerIdentityBody() - if c.expError != "" { - require.Error(t, err) - require.Contains(t, err.Error(), c.expError) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestValidateIAMEntityBody(t *testing.T) { - cases := map[string]struct { - body string - expReqType string - expError string - }{ - "valid role": { - body: "Action=GetRole&RoleName=my-role&Version=1234", - expReqType: "GetRole", - }, - "valid role without version": { - body: "Action=GetRole&RoleName=my-role", - expReqType: "GetRole", - }, - "valid user": { - body: "Action=GetUser&UserName=my-role&Version=1234", - expReqType: "GetUser", - }, - "valid user without version": { - body: "Action=GetUser&UserName=my-role", - expReqType: "GetUser", - }, - - "invalid action": { - body: "Action=GetCallerIdentity", - expError: `unexpected value Action=[GetCallerIdentity]`, - }, - "role missing action": { - body: "RoleName=my-role&Version=1234", - expError: `missing field "Action"`, - }, - "user missing action": { - body: "UserName=my-role&Version=1234", - expError: `missing field "Action"`, - }, - "empty": { - body: "", - expError: `missing field "Action"`, - }, - "empty action": { - body: "Action=", - expError: `missing field "Action"`, - }, - "role with user name": { - body: "Action=GetRole&UserName=my-role&Version=1234", - expError: `invalid request body`, - }, - "user with role name": { - body: "Action=GetUser&RoleName=my-role&Version=1234", - expError: `invalid request body`, - }, - } - for name, c := range cases { - t.Run(name, func(t *testing.T) { - token := &BearerToken{ - config: &Config{}, - getIAMEntityBody: c.body, - } - reqType, err := token.validateIAMEntityBody() - if c.expError != "" { - require.Error(t, err) - require.Contains(t, err.Error(), c.expError) - require.Equal(t, "", reqType) - } else { - require.NoError(t, err) - require.Equal(t, c.expReqType, reqType) - } - }) - } -} - -func TestValidateSTSHostname(t *testing.T) { - cases := []struct { - url string - ok bool - }{ - // https://docs.aws.amazon.com/general/latest/gr/sts.html - {"sts.us-east-2.amazonaws.com", true}, - {"sts-fips.us-east-2.amazonaws.com", true}, - {"sts.us-east-1.amazonaws.com", true}, - {"sts-fips.us-east-1.amazonaws.com", true}, - {"sts.us-west-1.amazonaws.com", true}, - {"sts-fips.us-west-1.amazonaws.com", true}, - {"sts.us-west-2.amazonaws.com", true}, - {"sts-fips.us-west-2.amazonaws.com", true}, - {"sts.af-south-1.amazonaws.com", true}, - {"sts.ap-east-1.amazonaws.com", true}, - {"sts.ap-southeast-3.amazonaws.com", true}, - {"sts.ap-south-1.amazonaws.com", true}, - {"sts.ap-northeast-3.amazonaws.com", true}, - {"sts.ap-northeast-2.amazonaws.com", true}, - {"sts.ap-southeast-1.amazonaws.com", true}, - {"sts.ap-southeast-2.amazonaws.com", true}, - {"sts.ap-northeast-1.amazonaws.com", true}, - {"sts.ca-central-1.amazonaws.com", true}, - {"sts.eu-central-1.amazonaws.com", true}, - {"sts.eu-west-1.amazonaws.com", true}, - {"sts.eu-west-2.amazonaws.com", true}, - {"sts.eu-south-1.amazonaws.com", true}, - {"sts.eu-west-3.amazonaws.com", true}, - {"sts.eu-north-1.amazonaws.com", true}, - {"sts.me-south-1.amazonaws.com", true}, - {"sts.sa-east-1.amazonaws.com", true}, - {"sts.us-gov-east-1.amazonaws.com", true}, - {"sts.us-gov-west-1.amazonaws.com", true}, - - // prefix must be either 'sts.' or 'sts-fips.' - {".amazonaws.com", false}, - {"iam.amazonaws.com", false}, - {"other.amazonaws.com", false}, - // suffix must be '.amazonaws.com' and not some other domain - {"stsamazonaws.com", false}, - {"sts-fipsamazonaws.com", false}, - {"sts.stsamazonaws.com", false}, - {"sts.notamazonaws.com", false}, - {"sts-fips.stsamazonaws.com", false}, - {"sts-fips.notamazonaws.com", false}, - {"sts.amazonaws.com.spoof", false}, - {"sts.amazonaws.spoof.com", false}, - {"xyz.sts.amazonaws.com", false}, - } - for _, c := range cases { - t.Run(c.url, func(t *testing.T) { - url := "https://" + c.url - parsedUrl, err := parseUrl(url) - require.NoError(t, err) - - token := &BearerToken{ - config: &Config{}, - getCallerIdentityURL: url, - parsedCallerIdentityURL: parsedUrl, - } - err = token.validateSTSHostname() - if c.ok { - require.NoError(t, err) - } else { - require.Error(t, err) - } - }) - } -} - -func TestValidateIAMHostname(t *testing.T) { - cases := []struct { - url string - ok bool - }{ - // https://docs.aws.amazon.com/general/latest/gr/iam-service.html - {"iam.amazonaws.com", true}, - {"iam-fips.amazonaws.com", true}, - {"iam.us-gov.amazonaws.com", true}, - {"iam-fips.us-gov.amazonaws.com", true}, - - // prefix must be either 'iam.' or 'aim-fips.' - {".amazonaws.com", false}, - {"sts.amazonaws.com", false}, - {"other.amazonaws.com", false}, - // suffix must be '.amazonaws.com' and not some other domain - {"iamamazonaws.com", false}, - {"iam-fipsamazonaws.com", false}, - {"iam.iamamazonaws.com", false}, - {"iam.notamazonaws.com", false}, - {"iam-fips.iamamazonaws.com", false}, - {"iam-fips.notamazonaws.com", false}, - {"iam.amazonaws.com.spoof", false}, - {"iam.amazonaws.spoof.com", false}, - {"xyz.iam.amazonaws.com", false}, - } - for _, c := range cases { - t.Run(c.url, func(t *testing.T) { - url := "https://" + c.url - parsedUrl, err := parseUrl(url) - require.NoError(t, err) - - token := &BearerToken{ - config: &Config{}, - getCallerIdentityURL: url, - parsedIAMEntityURL: parsedUrl, - } - err = token.validateIAMHostname() - if c.ok { - require.NoError(t, err) - } else { - require.Error(t, err) - } - }) - } -} - -var ( - validBearerTokenJson = `{ - "iam_http_request_method":"POST", - "iam_request_body":"QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==", - "iam_request_headers":"eyJBdXRob3JpemF0aW9uIjpbIkFXUzQtSE1BQy1TSEEyNTYgQ3JlZGVudGlhbD1mYWtlLzIwMjIwMzIyL3VzLWVhc3QtMS9zdHMvYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGU7eC1hbXotc2VjdXJpdHktdG9rZW4sIFNpZ25hdHVyZT1lZmMzMjBiOTcyZDA3YjM4YjY1ZWIyNDI1NjgwNWUwMzE0OWRhNTg2ZDgwNGY4YzYzNjRjZTk4ZGViZTA4MGIxIl0sIkNvbnRlbnQtTGVuZ3RoIjpbIjQzIl0sIkNvbnRlbnQtVHlwZSI6WyJhcHBsaWNhdGlvbi94LXd3dy1mb3JtLXVybGVuY29kZWQ7IGNoYXJzZXQ9dXRmLTgiXSwiVXNlci1BZ2VudCI6WyJhd3Mtc2RrLWdvLzEuNDIuMzQgKGdvMS4xNy41OyBkYXJ3aW47IGFtZDY0KSJdLCJYLUFtei1EYXRlIjpbIjIwMjIwMzIyVDIxMTEwM1oiXSwiWC1BbXotU2VjdXJpdHktVG9rZW4iOlsiZmFrZSJdfQ==", - "iam_request_url":"aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=" -}` - - validBearerTokenParsed = BearerToken{ - getCallerIdentityMethod: "POST", - getCallerIdentityURL: "https://sts.amazonaws.com/", - getCallerIdentityHeader: http.Header{ - "Authorization": []string{"AWS4-HMAC-SHA256 Credential=fake/20220322/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-amz-security-token, Signature=efc320b972d07b38b65eb24256805e03149da586d804f8c6364ce98debe080b1"}, - "Content-Length": []string{"43"}, - "Content-Type": []string{"application/x-www-form-urlencoded; charset=utf-8"}, - "User-Agent": []string{"aws-sdk-go/1.42.34 (go1.17.5; darwin; amd64)"}, - "X-Amz-Date": []string{"20220322T211103Z"}, - "X-Amz-Security-Token": []string{"fake"}, - }, - getCallerIdentityBody: "Action=GetCallerIdentity&Version=2011-06-15", - parsedCallerIdentityURL: &url.URL{ - Scheme: "https", - Host: "sts.amazonaws.com", - Path: "/", - }, - } - - validBearerTokenWithRoleJson = `{"iam_http_request_method":"POST","iam_request_body":"QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==","iam_request_headers":"eyJBdXRob3JpemF0aW9uIjpbIkFXUzQtSE1BQy1TSEEyNTYgQ3JlZGVudGlhbD1mYWtlLWtleS1pZC8yMDIyMDMyMi9mYWtlLXJlZ2lvbi9zdHMvYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGU7eC1jb25zdWwtaWFtLWdldGVudGl0eS1ib2R5O3gtY29uc3VsLWlhbS1nZXRlbnRpdHktaGVhZGVyczt4LWNvbnN1bC1pYW0tZ2V0ZW50aXR5LW1ldGhvZDt4LWNvbnN1bC1pYW0tZ2V0ZW50aXR5LXVybCwgU2lnbmF0dXJlPTU2MWFjMzFiNWFkMDFjMTI0YzU0YzE2OGY3NmVhNmJmZDY0NWI4ZWM1MzQ1ZjgzNTc3MjljOWFhMGI0NzEzMzciXSwiQ29udGVudC1MZW5ndGgiOlsiNDMiXSwiQ29udGVudC1UeXBlIjpbImFwcGxpY2F0aW9uL3gtd3d3LWZvcm0tdXJsZW5jb2RlZDsgY2hhcnNldD11dGYtOCJdLCJVc2VyLUFnZW50IjpbImF3cy1zZGstZ28vMS40Mi4zNCAoZ28xLjE3LjU7IGRhcndpbjsgYW1kNjQpIl0sIlgtQW16LURhdGUiOlsiMjAyMjAzMjJUMjI1NzQyWiJdLCJYLUNvbnN1bC1JYW0tR2V0ZW50aXR5LUJvZHkiOlsiQWN0aW9uPUdldFJvbGVcdTAwMjZSb2xlTmFtZT1teS1yb2xlXHUwMDI2VmVyc2lvbj0yMDEwLTA1LTA4Il0sIlgtQ29uc3VsLUlhbS1HZXRlbnRpdHktSGVhZGVycyI6WyJ7XCJBdXRob3JpemF0aW9uXCI6W1wiQVdTNC1ITUFDLVNIQTI1NiBDcmVkZW50aWFsPWZha2Uta2V5LWlkLzIwMjIwMzIyL3VzLWVhc3QtMS9pYW0vYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGUsIFNpZ25hdHVyZT1hYTJhMTlkMGEzMDVkNzRiYmQwMDk3NzZiY2E4ODBlNTNjZmE5OTFlNDgzZTQwMzk0NzE4MWE0MWNjNDgyOTQwXCJdLFwiQ29udGVudC1MZW5ndGhcIjpbXCI1MFwiXSxcIkNvbnRlbnQtVHlwZVwiOltcImFwcGxpY2F0aW9uL3gtd3d3LWZvcm0tdXJsZW5jb2RlZDsgY2hhcnNldD11dGYtOFwiXSxcIlVzZXItQWdlbnRcIjpbXCJhd3Mtc2RrLWdvLzEuNDIuMzQgKGdvMS4xNy41OyBkYXJ3aW47IGFtZDY0KVwiXSxcIlgtQW16LURhdGVcIjpbXCIyMDIyMDMyMlQyMjU3NDJaXCJdfSJdLCJYLUNvbnN1bC1JYW0tR2V0ZW50aXR5LU1ldGhvZCI6WyJQT1NUIl0sIlgtQ29uc3VsLUlhbS1HZXRlbnRpdHktVXJsIjpbImh0dHBzOi8vaWFtLmFtYXpvbmF3cy5jb20vIl19","iam_request_url":"aHR0cDovLzEyNy4wLjAuMTo2MzY5Ni9zdHMv"}` - - validBearerTokenWithRoleParsed = BearerToken{ - getCallerIdentityMethod: "POST", - getCallerIdentityURL: "http://127.0.0.1:63696/sts/", - getCallerIdentityHeader: http.Header{ - "Authorization": []string{"AWS4-HMAC-SHA256 Credential=fake-key-id/20220322/fake-region/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-consul-iam-getentity-body;x-consul-iam-getentity-headers;x-consul-iam-getentity-method;x-consul-iam-getentity-url, Signature=561ac31b5ad01c124c54c168f76ea6bfd645b8ec5345f8357729c9aa0b471337"}, - "Content-Length": []string{"43"}, - "Content-Type": []string{"application/x-www-form-urlencoded; charset=utf-8"}, - "User-Agent": []string{"aws-sdk-go/1.42.34 (go1.17.5; darwin; amd64)"}, - "X-Amz-Date": []string{"20220322T225742Z"}, - "X-Consul-Iam-Getentity-Body": []string{"Action=GetRole&RoleName=my-role&Version=2010-05-08"}, - "X-Consul-Iam-Getentity-Headers": []string{`{"Authorization":["AWS4-HMAC-SHA256 Credential=fake-key-id/20220322/us-east-1/iam/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aa2a19d0a305d74bbd009776bca880e53cfa991e483e403947181a41cc482940"],"Content-Length":["50"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.42.34 (go1.17.5; darwin; amd64)"],"X-Amz-Date":["20220322T225742Z"]}`}, - "X-Consul-Iam-Getentity-Method": []string{"POST"}, - "X-Consul-Iam-Getentity-Url": []string{"https://iam.amazonaws.com/"}, - }, - getCallerIdentityBody: "Action=GetCallerIdentity&Version=2011-06-15", - - // Fields parsed from headers above - getIAMEntityMethod: "POST", - getIAMEntityURL: "https://iam.amazonaws.com/", - getIAMEntityHeader: http.Header{ - "Authorization": []string{"AWS4-HMAC-SHA256 Credential=fake-key-id/20220322/us-east-1/iam/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aa2a19d0a305d74bbd009776bca880e53cfa991e483e403947181a41cc482940"}, - "Content-Length": []string{"50"}, - "Content-Type": []string{"application/x-www-form-urlencoded; charset=utf-8"}, - "User-Agent": []string{"aws-sdk-go/1.42.34 (go1.17.5; darwin; amd64)"}, - "X-Amz-Date": []string{"20220322T225742Z"}, - }, - getIAMEntityBody: "Action=GetRole&RoleName=my-role&Version=2010-05-08", - entityRequestType: "GetRole", - - parsedCallerIdentityURL: &url.URL{ - Scheme: "http", - Host: "127.0.0.1:63696", - Path: "/sts/", - }, - parsedIAMEntityURL: &url.URL{ - Scheme: "https", - Host: "iam.amazonaws.com", - Path: "/", - }, - } - - tokenJsonMissingMethodField = `{ - "iam_request_body":"QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==", - "iam_request_headers":"eyJBdXRob3JpemF0aW9uIjpbIkFXUzQtSE1BQy1TSEEyNTYgQ3JlZGVudGlhbD1mYWtlLzIwMjIwMzIyL3VzLWVhc3QtMS9zdHMvYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGU7eC1hbXotc2VjdXJpdHktdG9rZW4sIFNpZ25hdHVyZT1lZmMzMjBiOTcyZDA3YjM4YjY1ZWIyNDI1NjgwNWUwMzE0OWRhNTg2ZDgwNGY4YzYzNjRjZTk4ZGViZTA4MGIxIl0sIkNvbnRlbnQtTGVuZ3RoIjpbIjQzIl0sIkNvbnRlbnQtVHlwZSI6WyJhcHBsaWNhdGlvbi94LXd3dy1mb3JtLXVybGVuY29kZWQ7IGNoYXJzZXQ9dXRmLTgiXSwiVXNlci1BZ2VudCI6WyJhd3Mtc2RrLWdvLzEuNDIuMzQgKGdvMS4xNy41OyBkYXJ3aW47IGFtZDY0KSJdLCJYLUFtei1EYXRlIjpbIjIwMjIwMzIyVDIxMTEwM1oiXSwiWC1BbXotU2VjdXJpdHktVG9rZW4iOlsiZmFrZSJdfQ==", - "iam_request_url":"aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=" -}` - - tokenJsonMissingBodyField = `{ - "iam_http_request_method":"POST", - "iam_request_headers":"eyJBdXRob3JpemF0aW9uIjpbIkFXUzQtSE1BQy1TSEEyNTYgQ3JlZGVudGlhbD1mYWtlLzIwMjIwMzIyL3VzLWVhc3QtMS9zdHMvYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGU7eC1hbXotc2VjdXJpdHktdG9rZW4sIFNpZ25hdHVyZT1lZmMzMjBiOTcyZDA3YjM4YjY1ZWIyNDI1NjgwNWUwMzE0OWRhNTg2ZDgwNGY4YzYzNjRjZTk4ZGViZTA4MGIxIl0sIkNvbnRlbnQtTGVuZ3RoIjpbIjQzIl0sIkNvbnRlbnQtVHlwZSI6WyJhcHBsaWNhdGlvbi94LXd3dy1mb3JtLXVybGVuY29kZWQ7IGNoYXJzZXQ9dXRmLTgiXSwiVXNlci1BZ2VudCI6WyJhd3Mtc2RrLWdvLzEuNDIuMzQgKGdvMS4xNy41OyBkYXJ3aW47IGFtZDY0KSJdLCJYLUFtei1EYXRlIjpbIjIwMjIwMzIyVDIxMTEwM1oiXSwiWC1BbXotU2VjdXJpdHktVG9rZW4iOlsiZmFrZSJdfQ==", - "iam_request_url":"aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=" -}` - - tokenJsonMissingHeadersField = `{ - "iam_http_request_method":"POST", - "iam_request_body":"QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==", - "iam_request_url":"aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=" -}` - - tokenJsonMissingUrlField = `{ - "iam_http_request_method":"POST", - "iam_request_body":"QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==", - "iam_request_headers":"eyJBdXRob3JpemF0aW9uIjpbIkFXUzQtSE1BQy1TSEEyNTYgQ3JlZGVudGlhbD1mYWtlLzIwMjIwMzIyL3VzLWVhc3QtMS9zdHMvYXdzNF9yZXF1ZXN0LCBTaWduZWRIZWFkZXJzPWNvbnRlbnQtbGVuZ3RoO2NvbnRlbnQtdHlwZTtob3N0O3gtYW16LWRhdGU7eC1hbXotc2VjdXJpdHktdG9rZW4sIFNpZ25hdHVyZT1lZmMzMjBiOTcyZDA3YjM4YjY1ZWIyNDI1NjgwNWUwMzE0OWRhNTg2ZDgwNGY4YzYzNjRjZTk4ZGViZTA4MGIxIl0sIkNvbnRlbnQtTGVuZ3RoIjpbIjQzIl0sIkNvbnRlbnQtVHlwZSI6WyJhcHBsaWNhdGlvbi94LXd3dy1mb3JtLXVybGVuY29kZWQ7IGNoYXJzZXQ9dXRmLTgiXSwiVXNlci1BZ2VudCI6WyJhd3Mtc2RrLWdvLzEuNDIuMzQgKGdvMS4xNy41OyBkYXJ3aW47IGFtZDY0KSJdLCJYLUFtei1EYXRlIjpbIjIwMjIwMzIyVDIxMTEwM1oiXSwiWC1BbXotU2VjdXJpdHktVG9rZW4iOlsiZmFrZSJdfQ==" -}` -) diff --git a/internal/iamauth/util.go b/internal/iamauth/util.go deleted file mode 100644 index b92270cfd..000000000 --- a/internal/iamauth/util.go +++ /dev/null @@ -1,143 +0,0 @@ -package iamauth - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/hashicorp/consul/internal/iamauth/responses" - "github.com/hashicorp/go-hclog" -) - -type LoginInput struct { - Creds *credentials.Credentials - IncludeIAMEntity bool - STSEndpoint string - STSRegion string - - Logger hclog.Logger - - ServerIDHeaderValue string - // Customizable header names - ServerIDHeaderName string - GetEntityMethodHeader string - GetEntityURLHeader string - GetEntityHeadersHeader string - GetEntityBodyHeader string -} - -// GenerateLoginData populates the necessary data to send for the bearer token. -// https://github.com/hashicorp/go-secure-stdlib/blob/main/awsutil/generate_credentials.go#L232-L301 -func GenerateLoginData(in *LoginInput) (map[string]interface{}, error) { - cfg := aws.Config{ - Credentials: in.Creds, - // These are empty strings by default (i.e. not enabled) - Region: aws.String(in.STSRegion), - Endpoint: aws.String(in.STSEndpoint), - STSRegionalEndpoint: endpoints.RegionalSTSEndpoint, - } - - stsSession, err := session.NewSessionWithOptions(session.Options{Config: cfg}) - if err != nil { - return nil, err - } - - svc := sts.New(stsSession) - stsRequest, _ := svc.GetCallerIdentityRequest(nil) - - // Include the iam:GetRole or iam:GetUser request in headers. - if in.IncludeIAMEntity { - entityRequest, err := formatSignedEntityRequest(svc, in) - if err != nil { - return nil, err - } - - headersJson, err := json.Marshal(entityRequest.HTTPRequest.Header) - if err != nil { - return nil, err - } - requestBody, err := ioutil.ReadAll(entityRequest.HTTPRequest.Body) - if err != nil { - return nil, err - } - - stsRequest.HTTPRequest.Header.Add(in.GetEntityMethodHeader, entityRequest.HTTPRequest.Method) - stsRequest.HTTPRequest.Header.Add(in.GetEntityURLHeader, entityRequest.HTTPRequest.URL.String()) - stsRequest.HTTPRequest.Header.Add(in.GetEntityHeadersHeader, string(headersJson)) - stsRequest.HTTPRequest.Header.Add(in.GetEntityBodyHeader, string(requestBody)) - } - - // Inject the required auth header value, if supplied, and then sign the request including that header - if in.ServerIDHeaderValue != "" { - stsRequest.HTTPRequest.Header.Add(in.ServerIDHeaderName, in.ServerIDHeaderValue) - } - - stsRequest.Sign() - - // Now extract out the relevant parts of the request - headersJson, err := json.Marshal(stsRequest.HTTPRequest.Header) - if err != nil { - return nil, err - } - requestBody, err := ioutil.ReadAll(stsRequest.HTTPRequest.Body) - if err != nil { - return nil, err - } - - return map[string]interface{}{ - "iam_http_request_method": stsRequest.HTTPRequest.Method, - "iam_request_url": base64.StdEncoding.EncodeToString([]byte(stsRequest.HTTPRequest.URL.String())), - "iam_request_headers": base64.StdEncoding.EncodeToString(headersJson), - "iam_request_body": base64.StdEncoding.EncodeToString(requestBody), - }, nil -} - -func formatSignedEntityRequest(svc *sts.STS, in *LoginInput) (*request.Request, error) { - // We need to retrieve the IAM user or role for the iam:GetRole or iam:GetUser request. - // GetCallerIdentity returns this and requires no permissions. - resp, err := svc.GetCallerIdentity(nil) - if err != nil { - return nil, err - } - - arn, err := responses.ParseArn(*resp.Arn) - if err != nil { - return nil, err - } - - iamSession, err := session.NewSessionWithOptions(session.Options{ - Config: aws.Config{ - Credentials: svc.Config.Credentials, - }, - }) - if err != nil { - return nil, err - } - iamSvc := iam.New(iamSession) - - var req *request.Request - switch arn.Type { - case "role", "assumed-role": - req, _ = iamSvc.GetRoleRequest(&iam.GetRoleInput{RoleName: &arn.FriendlyName}) - case "user": - req, _ = iamSvc.GetUserRequest(&iam.GetUserInput{UserName: &arn.FriendlyName}) - default: - return nil, fmt.Errorf("entity %s is not an IAM role or IAM user", arn.Type) - } - - // Inject the required auth header value, if supplied, and then sign the request including that header - if in.ServerIDHeaderValue != "" { - req.HTTPRequest.Header.Add(in.ServerIDHeaderName, in.ServerIDHeaderValue) - } - - req.Sign() - return req, nil -} diff --git a/lib/glob.go b/lib/glob.go deleted file mode 100644 index 969e3ab25..000000000 --- a/lib/glob.go +++ /dev/null @@ -1,24 +0,0 @@ -package lib - -import "strings" - -// GlobbedStringsMatch compares item to val with support for a leading and/or -// trailing wildcard '*' in item. -func GlobbedStringsMatch(item, val string) bool { - if len(item) < 2 { - return val == item - } - - hasPrefix := strings.HasPrefix(item, "*") - hasSuffix := strings.HasSuffix(item, "*") - - if hasPrefix && hasSuffix { - return strings.Contains(val, item[1:len(item)-1]) - } else if hasPrefix { - return strings.HasSuffix(val, item[1:]) - } else if hasSuffix { - return strings.HasPrefix(val, item[:len(item)-1]) - } - - return val == item -} diff --git a/lib/glob_test.go b/lib/glob_test.go deleted file mode 100644 index 6c29f5ef1..000000000 --- a/lib/glob_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package lib - -import "testing" - -func TestGlobbedStringsMatch(t *testing.T) { - tests := []struct { - item string - val string - expect bool - }{ - {"", "", true}, - {"*", "*", true}, - {"**", "**", true}, - {"*t", "t", true}, - {"*t", "test", true}, - {"t*", "test", true}, - {"*test", "test", true}, - {"*test", "a test", true}, - {"test", "a test", false}, - {"*test", "tests", false}, - {"test*", "test", true}, - {"test*", "testsss", true}, - {"test**", "testsss", false}, - {"test**", "test*", true}, - {"**test", "*test", true}, - {"TEST", "test", false}, - {"test", "test", true}, - } - - for _, tt := range tests { - actual := GlobbedStringsMatch(tt.item, tt.val) - - if actual != tt.expect { - t.Fatalf("Bad testcase %#v, expected %t, got %t", tt, tt.expect, actual) - } - } -} From 285b4cef2b66a9962b66942323992b40ac972638 Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Tue, 19 Jul 2022 14:51:04 -0700 Subject: [PATCH 041/107] connect: Add support for Envoy 1.23, remove 1.19 (#13807) --- .changelog/13807.txt | 6 ++++++ .circleci/config.yml | 7 ++++--- agent/xds/envoy_versioning.go | 2 +- agent/xds/envoy_versioning_test.go | 7 ++++--- agent/xds/proxysupport/proxysupport.go | 6 +++--- test/integration/connect/envoy/run-tests.sh | 2 +- 6 files changed, 19 insertions(+), 11 deletions(-) create mode 100644 .changelog/13807.txt diff --git a/.changelog/13807.txt b/.changelog/13807.txt new file mode 100644 index 000000000..d1cec75f7 --- /dev/null +++ b/.changelog/13807.txt @@ -0,0 +1,6 @@ +```release-note: improvement +connect: Add Envoy 1.23.0 to support matrix +``` +```release-note: breaking-change +connect: Removes support for Envoy 1.19 +``` diff --git a/.circleci/config.yml b/.circleci/config.yml index a9c434b46..de9620486 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -24,9 +24,10 @@ references: VAULT_BINARY_VERSION: 1.9.4 GO_VERSION: 1.18.1 envoy-versions: &supported_envoy_versions - - &default_envoy_version "1.19.5" - - "1.20.4" - - "1.21.3" + - &default_envoy_version "1.20.6" + - "1.21.4" + - "1.22.2" + - "1.23.0" images: # When updating the Go version, remember to also update the versions in the # workflows section for go-test-lib jobs. diff --git a/agent/xds/envoy_versioning.go b/agent/xds/envoy_versioning.go index e0face0bd..31955e28a 100644 --- a/agent/xds/envoy_versioning.go +++ b/agent/xds/envoy_versioning.go @@ -11,7 +11,7 @@ import ( var ( // minSupportedVersion is the oldest mainline version we support. This should always be // the zero'th point release of the last element of proxysupport.EnvoyVersions. - minSupportedVersion = version.Must(version.NewVersion("1.19.0")) + minSupportedVersion = version.Must(version.NewVersion("1.20.0")) specificUnsupportedVersions = []unsupportedVersion{} ) diff --git a/agent/xds/envoy_versioning_test.go b/agent/xds/envoy_versioning_test.go index 36c3831b1..4e446de1e 100644 --- a/agent/xds/envoy_versioning_test.go +++ b/agent/xds/envoy_versioning_test.go @@ -120,6 +120,7 @@ func TestDetermineSupportedProxyFeaturesFromString(t *testing.T) { "1.16.6": {expectErr: "Envoy 1.16.6 " + errTooOld}, "1.17.4": {expectErr: "Envoy 1.17.4 " + errTooOld}, "1.18.6": {expectErr: "Envoy 1.18.6 " + errTooOld}, + "1.19.5": {expectErr: "Envoy 1.19.5 " + errTooOld}, } // Insert a bunch of valid versions. @@ -134,10 +135,10 @@ func TestDetermineSupportedProxyFeaturesFromString(t *testing.T) { } */ for _, v := range []string{ - "1.19.0", "1.19.1", "1.19.2", "1.19.3", "1.19.4", "1.19.5", - "1.20.0", "1.20.1", "1.20.2", "1.20.3", "1.20.4", - "1.21.0", "1.21.1", "1.21.2", "1.21.3", + "1.20.0", "1.20.1", "1.20.2", "1.20.3", "1.20.4", "1.20.5", "1.20.6", + "1.21.0", "1.21.1", "1.21.2", "1.21.3", "1.21.4", "1.22.0", "1.22.1", "1.22.2", + "1.23.0", } { cases[v] = testcase{expect: supportedProxyFeatures{}} } diff --git a/agent/xds/proxysupport/proxysupport.go b/agent/xds/proxysupport/proxysupport.go index 22ddd15d8..bdb7cc864 100644 --- a/agent/xds/proxysupport/proxysupport.go +++ b/agent/xds/proxysupport/proxysupport.go @@ -7,8 +7,8 @@ package proxysupport // // see: https://www.consul.io/docs/connect/proxies/envoy#supported-versions var EnvoyVersions = []string{ + "1.23.0", "1.22.2", - "1.21.3", - "1.20.4", - "1.19.5", + "1.21.4", + "1.20.6", } diff --git a/test/integration/connect/envoy/run-tests.sh b/test/integration/connect/envoy/run-tests.sh index 21705a3ae..fc885f9a1 100755 --- a/test/integration/connect/envoy/run-tests.sh +++ b/test/integration/connect/envoy/run-tests.sh @@ -12,7 +12,7 @@ DEBUG=${DEBUG:-} XDS_TARGET=${XDS_TARGET:-server} # ENVOY_VERSION to run each test against -ENVOY_VERSION=${ENVOY_VERSION:-"1.22.2"} +ENVOY_VERSION=${ENVOY_VERSION:-"1.23.0"} export ENVOY_VERSION export DOCKER_BUILDKIT=1 From a9d3de8b3e2f3bf1a4d7406eade181d004432633 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 20 Jul 2022 12:38:39 +0100 Subject: [PATCH 042/107] ui: Peer token use form (#13792) --- .../consul/peer/form/generate/index.hbs | 13 ++++ .../components/consul/peer/form/index.scss | 62 +++++++++++++++++++ .../consul/peer/form/token/actions/index.hbs | 18 ++++++ .../peer/form/token/fieldsets/index.hbs | 37 +++++++++++ .../app/styles/base/icons/icons/index.scss | 2 +- .../consul-ui/app/styles/components.scss | 1 + 6 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/index.scss create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/token/actions/index.hbs create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/token/fieldsets/index.hbs diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/generate/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/index.hbs index 204906324..2d0667a65 100644 --- a/ui/packages/consul-peerings/app/components/consul/peer/form/generate/index.hbs +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/generate/index.hbs @@ -47,6 +47,19 @@ + {{yield (hash + Fieldsets=(component "consul/peer/form/token/fieldsets" + item=@item + token=fsm.state.context.PeeringToken + regenerate=@regenerate + onclick=(queue (set @item 'Name' '')) + ) + Actions=(component "consul/peer/form/token/actions" + token=fsm.state.context.PeeringToken + item=@item + id=id + ) + )}} diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/index.scss b/ui/packages/consul-peerings/app/components/consul/peer/form/index.scss new file mode 100644 index 000000000..c23773be1 --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/index.scss @@ -0,0 +1,62 @@ + +.consul-peer-form-token-actions { + button:first-of-type { + @extend %primary-button; + } + button:last-of-type { + @extend %secondary-button; + } +} + +.consul-peer-form-generate { + & { + width: 416px; + min-height: 200px; + } + ol { + list-style-position: outside; + list-style-type: none; + counter-reset: hexagonal-counter; + position: relative; + } + ol::before { + content: ''; + border-left: var(--decor-border-100); + border-color: rgb(var(--tone-gray-300)); + height: 100%; + position: absolute; + left: 2rem; + } + li { + counter-increment: hexagonal-counter; + position: relative; + margin-left: 60px; + margin-bottom: 1rem; + } + li .copyable-code { + margin-top: 1rem; + } + li::before { + --icon-name: icon-hexagon; + --icon-size: icon-600; + content: ''; + position: absolute; + z-index: 2; + } + li::after { + content: counter(hexagonal-counter); + position: absolute; + top: 0px; + font-size: 14px; + font-weight: var(--typo-weight-bold); + background-color: rgb(var(--tone-gray-000)); + z-index: 1; + text-align: center; + } + li::before, + li::after { + left: -2.4rem; + width: 20px; + height: 20px; + } +} diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/token/actions/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/token/actions/index.hbs new file mode 100644 index 000000000..70cdb6d1d --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/token/actions/index.hbs @@ -0,0 +1,18 @@ +
      + + Copy token + + + Close + +
      diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/token/fieldsets/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/token/fieldsets/index.hbs new file mode 100644 index 000000000..b2a53bdd0 --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/token/fieldsets/index.hbs @@ -0,0 +1,37 @@ +{{#if @regenerate}} +

      + Token regenerated! Here’s what’s next: +

      +{{else}} +

      + Token generated! Here’s what’s next: +

      +{{/if}} +
        +
      1. + Copy the token
        + This token cannot be viewed again after creation. +
        + +
      2. +
      3. + Switch to the peer
        + Someone on your team should log into the Datacenter (OSS) or Admin Partition (Enterprise) that you want this one to connect with. +
      4. +
      5. + Initiate the peering
        + From there, initiate a new peering, name it, and paste this token in. +
      6. +
      +{{#if (not @regenerate)}} + + Generate another token + +{{/if}} + diff --git a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss index 74559e892..9d1a5efe3 100644 --- a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss +++ b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss @@ -382,7 +382,7 @@ // @import './heart-fill/index.scss'; // @import './heart-off/index.scss'; // @import './help/index.scss'; -// @import './hexagon/index.scss'; +@import './hexagon/index.scss'; // @import './hexagon-fill/index.scss'; @import './history/index.scss'; // @import './home/index.scss'; diff --git a/ui/packages/consul-ui/app/styles/components.scss b/ui/packages/consul-ui/app/styles/components.scss index 234b6793a..e6dca0229 100644 --- a/ui/packages/consul-ui/app/styles/components.scss +++ b/ui/packages/consul-ui/app/styles/components.scss @@ -108,3 +108,4 @@ @import 'consul-ui/components/peerings/badge'; @import 'consul-ui/components/consul/node/peer-info'; @import 'consul-ui/components/consul/service/peer-info'; +@import 'consul-ui/components/consul/peer/form'; From 9176d3ed338e10c80b0b6b8dfb02195773ca23f0 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 20 Jul 2022 12:58:47 +0100 Subject: [PATCH 043/107] ui: Add Peer Form (#13794) --- .../components/consul/peer/form/README.mdx | 21 +++++++ .../consul/peer/form/chart.xstate.js | 20 ++++++ .../app/components/consul/peer/form/index.hbs | 63 +++++++++++++++++++ .../components/consul/peer/form/index.scss | 8 +++ .../app/components/consul/peer/index.scss | 1 + 5 files changed, 113 insertions(+) create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/README.mdx create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/chart.xstate.js create mode 100644 ui/packages/consul-peerings/app/components/consul/peer/form/index.hbs diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/README.mdx b/ui/packages/consul-peerings/app/components/consul/peer/form/README.mdx new file mode 100644 index 000000000..edd8ed012 --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/README.mdx @@ -0,0 +1,21 @@ +# Consul::Peer::Form + +```hbs preview-template + + + + + + +``` diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/chart.xstate.js b/ui/packages/consul-peerings/app/components/consul/peer/form/chart.xstate.js new file mode 100644 index 000000000..7f611fd32 --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/chart.xstate.js @@ -0,0 +1,20 @@ +export default { + id: 'consul-peer-form', + initial: 'generate', + on: { + INITIATE: [ + { + target: 'initiate', + }, + ], + GENERATE: [ + { + target: 'generate', + }, + ], + }, + states: { + initiate: {}, + generate: {}, + }, +}; diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/index.hbs new file mode 100644 index 000000000..374405b4a --- /dev/null +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/index.hbs @@ -0,0 +1,63 @@ +
      + + + + + + + + {{yield (hash + Form=(component 'consul/peer/form/generate' + item=source.data + ) + ) + }} + + + + + + + + {{yield (hash + Form=(component 'consul/peer/form/initiate' + item=source.data + ) + ) + }} + + + + + + +
      diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/index.scss b/ui/packages/consul-peerings/app/components/consul/peer/form/index.scss index c23773be1..e4b96b461 100644 --- a/ui/packages/consul-peerings/app/components/consul/peer/form/index.scss +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/index.scss @@ -1,3 +1,11 @@ +.consul-peer-form { + & { + width: 416px; + } + nav { + margin-bottom: 20px; + } +} .consul-peer-form-token-actions { button:first-of-type { diff --git a/ui/packages/consul-peerings/app/components/consul/peer/index.scss b/ui/packages/consul-peerings/app/components/consul/peer/index.scss index 9f82e6a21..8b1be7ce8 100644 --- a/ui/packages/consul-peerings/app/components/consul/peer/index.scss +++ b/ui/packages/consul-peerings/app/components/consul/peer/index.scss @@ -2,4 +2,5 @@ @import './list'; @import './search-bar'; +@import './form'; From 59b044f96d416b77b90790ae21f5fa234dae2781 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Wed, 20 Jul 2022 17:07:52 +0200 Subject: [PATCH 044/107] ui: no partition and peer in bucket-list at the same time (#13812) * don't show partition / peer at the same time in bucket-list * use bucket-list in intentions table * add bucket-list tests * Simplify bucket list - match old behavior Refactor the bucket-list component to be easier to grok and match how the old template based approach worked. I.e. do not surface partition or namespace when it matches the passed nspace or partition property. * Update docs for bucket-list * fix linting --- .../components/consul/bucket/list/README.mdx | 5 +- .../components/consul/bucket/list/index.js | 131 +++++---- .../consul/intention/list/table/index.hbs | 43 +-- .../components/consul/bucket/list-test.js | 255 ++++++++++++++++++ 4 files changed, 344 insertions(+), 90 deletions(-) create mode 100644 ui/packages/consul-ui/tests/integration/components/consul/bucket/list-test.js diff --git a/ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx b/ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx index d6bdc01c6..39d1703b2 100644 --- a/ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx +++ b/ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx @@ -11,6 +11,9 @@ the namespace will be displayed, whereas if the partition is different it will show both the partition and namespace (as a namespace called 'team-1' in `partition-1` is different to a namespace called 'team-1' in `partition-2`) +When the passed item contains a `PeerName`, this will be displayed in place of +the `Partition`. + Showing the service name is a tiny bit awkward (different boolean type, doesn't care about difference) and could be improved but we only use it for the read only view of intentions. @@ -89,7 +92,7 @@ At the time of writing, this is not currently used across the entire UI | Argument/Attribute | Type | Default | Description | | --- | --- | --- | --- | -| `item` | `array` | | A Consul object that could have both a `Partition` and a `Namespace` property | +| `item` | `object` | | A Consul object that could have a `Partition`, a `Namespace`, a `PeerName` and a `Service` property | | `nspace` | `string` | | The name of the current namespace | | `partition` | `string` | | The name of the current partition | | `service` | `boolean` | | Whether to show the service name on the end or not. Please note you must also pass a item.Service for it to show. We flag this incase an API request has a Service property but you don't want to show it | diff --git a/ui/packages/consul-ui/app/components/consul/bucket/list/index.js b/ui/packages/consul-ui/app/components/consul/bucket/list/index.js index 9c48f1bd9..c874ac91b 100644 --- a/ui/packages/consul-ui/app/components/consul/bucket/list/index.js +++ b/ui/packages/consul-ui/app/components/consul/bucket/list/index.js @@ -5,84 +5,105 @@ export default class ConsulBucketList extends Component { @service abilities; get itemsToDisplay() { - const { item, partition, nspace } = this.args; - const { abilities } = this; + const { peerOrPartitionPart, namespacePart, servicePart } = this; - let items = []; + return [...peerOrPartitionPart, ...namespacePart, ...servicePart]; + } + + get peerOrPartitionPart() { + const { peerPart, partitionPart } = this; + + if (peerPart.length) { + return peerPart; + } else { + return partitionPart; + } + } + + get partitionPart() { + const { item, partition } = this.args; + + const { abilities } = this; if (partition && abilities.can('use partitions')) { if (item.Partition !== partition) { - this._addPeer(items); - this._addPartition(items); - this._addNamespace(items); - this._addService(items); - } else { - this._addPeerInfo(items); + return [ + { + type: 'partition', + label: 'Admin Partition', + item: item.Partition, + }, + ]; } - } else if (nspace && abilities.can('use nspace')) { - if (item.Namespace !== nspace) { - this._addPeerInfo(items); - this._addService(items); - } else { - this._addPeerInfo(items); - } - } else { - this._addPeerInfo(items); } - return items; + return []; } - _addPeerInfo(items) { + get peerPart() { const { item } = this.args; if (item.PeerName) { - this._addPeer(items); - this._addNamespace(items); + return [ + { + type: 'peer', + label: 'Peer', + item: item.PeerName, + }, + ]; } + + return []; } - _addPartition(items) { - const { item } = this.args; + get namespacePart() { + const { item, nspace } = this.args; + const { abilities, partitionPart } = this; - items.push({ - type: 'partition', - label: 'Admin Partition', - item: item.Partition, - }); - } - - _addNamespace(items) { - const { item } = this.args; - - items.push({ + const nspaceItem = { type: 'nspace', label: 'Namespace', item: item.Namespace, - }); + }; + + // when we surface a partition - show a namespace with it + if (partitionPart.length) { + return [nspaceItem]; + } + + if (nspace && abilities.can('use nspaces')) { + if (item.Namespace !== nspace) { + return [ + { + type: 'nspace', + label: 'Namespace', + item: item.Namespace, + }, + ]; + } + } + + return []; } - _addService(items) { - const { service, item } = this.args; + get servicePart() { + const { item, service } = this.args; - if (service && item.Service) { - items.push({ - type: 'service', - label: 'Service', - item: item.Service, - }); + const { partitionPart, namespacePart } = this; + + // when we show partitionPart or namespacePart -> consider service part + if (partitionPart.length || namespacePart.length) { + if (item.Service && service) { + return [ + { + type: 'service', + label: 'Service', + item: item.Service, + }, + ]; + } } - } - _addPeer(items) { - const { item } = this.args; - - if (item?.PeerName) { - items.push({ - type: 'peer', - label: 'Peer', - item: item.PeerName, - }); - } + return []; } } diff --git a/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs b/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs index 4bb110a89..03e47c17e 100644 --- a/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs @@ -24,43 +24,18 @@ as |item index|> {{else}} {{item.SourceName}} {{/if}} - {{#if (or (can 'use nspaces') (can 'use partitions'))}} {{! TODO: slugify }} - {{#if item.SourcePeer}} - - - - - - - - - {{item.SourcePeer}} - - {{else}} - - {{or item.SourcePartition 'default'}} - - {{/if}} - / - {{or item.SourceNS 'default'}} + - {{/if}} diff --git a/ui/packages/consul-ui/tests/integration/components/consul/bucket/list-test.js b/ui/packages/consul-ui/tests/integration/components/consul/bucket/list-test.js new file mode 100644 index 000000000..15d6b81ce --- /dev/null +++ b/ui/packages/consul-ui/tests/integration/components/consul/bucket/list-test.js @@ -0,0 +1,255 @@ +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; +import { render } from '@ember/test-helpers'; +import Service from '@ember/service'; + +module('Integration | Component | consul bucket list', function(hooks) { + setupRenderingTest(hooks); + + module('without nspace or partition feature on', function(hooks) { + hooks.beforeEach(function() { + this.owner.register( + 'service:abilities', + class Stub extends Service { + can(permission) { + if (permission === 'use partitions') { + return false; + } + if (permission === 'use nspaces') { + return false; + } + + return false; + } + } + ); + }); + + test('it displays a peer when the item passed has a peer name', async function(assert) { + const PEER_NAME = 'Tomster'; + + this.set('peerName', PEER_NAME); + + await render(hbs` + + `); + + assert.dom('[data-test-bucket-item="peer"]').hasText(PEER_NAME, 'Peer name is displayed'); + assert.dom('[data-test-bucket-item="nspace"]').doesNotExist('namespace is not shown'); + assert.dom('[data-test-bucket-item="partition"]').doesNotExist('partition is not shown'); + }); + + test('it does not display a bucket list when item has no peer name', async function(assert) { + await render(hbs` + + `); + + assert.dom('[data-test-bucket-list]').doesNotExist('no bucket list displayed'); + }); + }); + + module('with partition feature on', function(hooks) { + hooks.beforeEach(function() { + this.owner.register( + 'service:abilities', + class Stub extends Service { + can(permission) { + if (permission === 'use partitions') { + return true; + } + if (permission === 'use nspaces') { + return true; + } + + return false; + } + } + ); + }); + + test("it displays a peer and nspace and service and no partition when item.Partition and partition don't match", async function(assert) { + const PEER_NAME = 'Tomster'; + const NAMESPACE_NAME = 'Mascot'; + const SERVICE_NAME = 'Ember.js'; + + this.set('peerName', PEER_NAME); + this.set('namespace', NAMESPACE_NAME); + this.set('service', SERVICE_NAME); + + await render(hbs` + + `); + + assert.dom('[data-test-bucket-item="peer"]').hasText(PEER_NAME, 'Peer is displayed'); + assert + .dom('[data-test-bucket-item="nspace"]') + .hasText(NAMESPACE_NAME, 'namespace is displayed'); + assert.dom('[data-test-bucket-item="service"]').hasText(SERVICE_NAME, 'service is displayed'); + assert.dom('[data-test-bucket-item="partition"]').doesNotExist('partition is not displayed'); + }); + + test("it displays partition and nspace and service when item.Partition and partition don't match and peer is not set", async function(assert) { + const PARTITION_NAME = 'Ember.js'; + const NAMESPACE_NAME = 'Mascot'; + const SERVICE_NAME = 'Consul'; + + this.set('partition', PARTITION_NAME); + this.set('namespace', NAMESPACE_NAME); + this.set('service', SERVICE_NAME); + + await render(hbs` + + `); + + assert.dom('[data-test-bucket-item="peer"]').doesNotExist('peer is not displayed'); + assert + .dom('[data-test-bucket-item="nspace"]') + .hasText(NAMESPACE_NAME, 'namespace is displayed'); + assert.dom('[data-test-bucket-item="service"]').hasText(SERVICE_NAME, 'service is displayed'); + assert + .dom('[data-test-bucket-item="partition"]') + .hasText(PARTITION_NAME, 'partition is displayed'); + }); + + test('it displays nspace and peer and service when item.Partition and partition match and peer is set', async function(assert) { + const PEER_NAME = 'Tomster'; + const PARTITION_NAME = 'Ember.js'; + const NAMESPACE_NAME = 'Mascot'; + const SERVICE_NAME = 'Ember.js'; + + this.set('peerName', PEER_NAME); + this.set('partition', PARTITION_NAME); + this.set('namespace', NAMESPACE_NAME); + this.set('service', SERVICE_NAME); + + await render(hbs` + + `); + + assert.dom('[data-test-bucket-item="peer"]').hasText(PEER_NAME, 'peer is displayed'); + assert + .dom('[data-test-bucket-item="nspace"]') + .hasText(NAMESPACE_NAME, 'namespace is displayed'); + assert.dom('[data-test-bucket-item="service"]').hasText(SERVICE_NAME, 'service is displayed'); + assert.dom('[data-test-bucket-item="partition"]').doesNotExist('partition is not displayed'); + }); + }); + + module('with nspace on but partition feature off', function(hooks) { + hooks.beforeEach(function() { + this.owner.register( + 'service:abilities', + class Stub extends Service { + can(permission) { + if (permission === 'use partitions') { + return false; + } + if (permission === 'use nspaces') { + return true; + } + + return false; + } + } + ); + }); + + test("it displays a peer and nspace and service when item.namespace and nspace don't match", async function(assert) { + const PEER_NAME = 'Tomster'; + const NAMESPACE_NAME = 'Mascot'; + const SERVICE_NAME = 'Ember.js'; + + this.set('peerName', PEER_NAME); + this.set('namespace', NAMESPACE_NAME); + this.set('service', SERVICE_NAME); + + await render(hbs` + + `); + + assert.dom('[data-test-bucket-item="peer"]').hasText(PEER_NAME, 'Peer is displayed'); + assert + .dom('[data-test-bucket-item="nspace"]') + .hasText(NAMESPACE_NAME, 'namespace is displayed'); + assert.dom('[data-test-bucket-item="service"]').hasText(SERVICE_NAME, 'service is displayed'); + assert.dom('[data-test-bucket-item="partition"]').doesNotExist('partition is not displayed'); + }); + + test('it displays a peer and no nspace and no service when item.namespace and nspace match', async function(assert) { + const PEER_NAME = 'Tomster'; + const NAMESPACE_NAME = 'Mascot'; + const SERVICE_NAME = 'Ember.js'; + + this.set('peerName', PEER_NAME); + this.set('namespace', NAMESPACE_NAME); + this.set('service', SERVICE_NAME); + + await render(hbs` + + `); + + assert.dom('[data-test-bucket-item="peer"]').hasText(PEER_NAME, 'Peer is displayed'); + assert.dom('[data-test-bucket-item="nspace"]').doesNotExist('namespace is not displayed'); + assert.dom('[data-test-bucket-item="service"]').doesNotExist('service is not displayed'); + assert.dom('[data-test-bucket-item="partition"]').doesNotExist('partition is not displayed'); + }); + }); +}); From 28dd74a9d1f7583fedeeb94da2b8f42804369603 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Wed, 20 Jul 2022 11:31:02 -0700 Subject: [PATCH 045/107] Add heartbeat proto to peer stream (#13804) --- proto/pbpeerstream/peerstream.pb.binary.go | 10 + proto/pbpeerstream/peerstream.pb.go | 256 +++++++++++++-------- proto/pbpeerstream/peerstream.proto | 4 + 3 files changed, 180 insertions(+), 90 deletions(-) diff --git a/proto/pbpeerstream/peerstream.pb.binary.go b/proto/pbpeerstream/peerstream.pb.binary.go index c5d928949..926b40154 100644 --- a/proto/pbpeerstream/peerstream.pb.binary.go +++ b/proto/pbpeerstream/peerstream.pb.binary.go @@ -47,6 +47,16 @@ func (msg *ReplicationMessage_Terminated) UnmarshalBinary(b []byte) error { return proto.Unmarshal(b, msg) } +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ReplicationMessage_Heartbeat) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ReplicationMessage_Heartbeat) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + // MarshalBinary implements encoding.BinaryMarshaler func (msg *LeaderAddress) MarshalBinary() ([]byte, error) { return proto.Marshal(msg) diff --git a/proto/pbpeerstream/peerstream.pb.go b/proto/pbpeerstream/peerstream.pb.go index 8b71b4e8c..c90be922d 100644 --- a/proto/pbpeerstream/peerstream.pb.go +++ b/proto/pbpeerstream/peerstream.pb.go @@ -86,6 +86,7 @@ type ReplicationMessage struct { // *ReplicationMessage_Request_ // *ReplicationMessage_Response_ // *ReplicationMessage_Terminated_ + // *ReplicationMessage_Heartbeat_ Payload isReplicationMessage_Payload `protobuf_oneof:"Payload"` } @@ -149,6 +150,13 @@ func (x *ReplicationMessage) GetTerminated() *ReplicationMessage_Terminated { return nil } +func (x *ReplicationMessage) GetHeartbeat() *ReplicationMessage_Heartbeat { + if x, ok := x.GetPayload().(*ReplicationMessage_Heartbeat_); ok { + return x.Heartbeat + } + return nil +} + type isReplicationMessage_Payload interface { isReplicationMessage_Payload() } @@ -165,12 +173,18 @@ type ReplicationMessage_Terminated_ struct { Terminated *ReplicationMessage_Terminated `protobuf:"bytes,3,opt,name=terminated,proto3,oneof"` } +type ReplicationMessage_Heartbeat_ struct { + Heartbeat *ReplicationMessage_Heartbeat `protobuf:"bytes,4,opt,name=heartbeat,proto3,oneof"` +} + func (*ReplicationMessage_Request_) isReplicationMessage_Payload() {} func (*ReplicationMessage_Response_) isReplicationMessage_Payload() {} func (*ReplicationMessage_Terminated_) isReplicationMessage_Payload() {} +func (*ReplicationMessage_Heartbeat_) isReplicationMessage_Payload() {} + // LeaderAddress is sent when the peering service runs on a consul node // that is not a leader. The node either lost leadership, or never was a leader. type LeaderAddress struct { @@ -476,6 +490,45 @@ func (*ReplicationMessage_Terminated) Descriptor() ([]byte, []int) { return file_proto_pbpeerstream_peerstream_proto_rawDescGZIP(), []int{0, 2} } +// Heartbeat is sent to verify that the connection is still active. +type ReplicationMessage_Heartbeat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReplicationMessage_Heartbeat) Reset() { + *x = ReplicationMessage_Heartbeat{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicationMessage_Heartbeat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationMessage_Heartbeat) ProtoMessage() {} + +func (x *ReplicationMessage_Heartbeat) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeerstream_peerstream_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationMessage_Heartbeat.ProtoReflect.Descriptor instead. +func (*ReplicationMessage_Heartbeat) Descriptor() ([]byte, []int) { + return file_proto_pbpeerstream_peerstream_proto_rawDescGZIP(), []int{0, 3} +} + var File_proto_pbpeerstream_peerstream_proto protoreflect.FileDescriptor var file_proto_pbpeerstream_peerstream_proto_rawDesc = []byte{ @@ -489,7 +542,7 @@ var file_proto_pbpeerstream_peerstream_proto_rawDesc = []byte{ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xe5, 0x05, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0xd6, 0x06, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x5c, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, @@ -508,76 +561,84 @@ var file_proto_pbpeerstream_peerstream_proto_rawDesc = []byte{ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x48, 0x00, - 0x52, 0x0a, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xa9, 0x01, 0x0a, - 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, - 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, - 0x12, 0x24, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4e, 0x6f, 0x6e, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x3e, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0xe3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x1e, 0x0a, - 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x12, 0x30, 0x0a, - 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x4d, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x0c, - 0x0a, 0x0a, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x42, 0x09, 0x0a, 0x07, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x29, 0x0a, 0x0d, 0x4c, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x22, 0x5c, 0x0a, 0x0f, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x05, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x4e, 0x6f, 0x64, 0x65, 0x73, - 0x2a, 0x52, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, - 0x15, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x4f, 0x50, 0x45, 0x52, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x45, - 0x54, 0x45, 0x10, 0x02, 0x32, 0x9f, 0x01, 0x0a, 0x11, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x89, 0x01, 0x0a, 0x0f, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x9f, 0x02, 0x0a, 0x28, 0x63, 0x6f, 0x6d, 0x2e, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x42, 0x0f, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, - 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, 0x50, 0xaa, 0x02, 0x24, - 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0xca, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x5c, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xe2, 0x02, 0x30, 0x48, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, - 0x27, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x50, 0x65, - 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x0a, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x12, 0x62, 0x0a, 0x09, + 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x42, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, + 0x65, 0x61, 0x74, 0x48, 0x00, 0x52, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x1a, 0xa9, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, + 0x65, 0x72, 0x49, 0x44, 0x12, 0x24, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x3e, 0x0a, 0x05, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0xe3, 0x01, 0x0a, + 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, + 0x4c, 0x12, 0x1e, 0x0a, 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, + 0x44, 0x12, 0x30, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x0c, 0x0a, 0x0a, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, + 0x1a, 0x0b, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x42, 0x09, 0x0a, + 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x29, 0x0a, 0x0d, 0x4c, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x22, 0x5c, 0x0a, 0x0f, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x05, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x4e, 0x6f, 0x64, 0x65, + 0x73, 0x2a, 0x52, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, + 0x0a, 0x15, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x4f, 0x50, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, + 0x14, 0x0a, 0x10, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4c, + 0x45, 0x54, 0x45, 0x10, 0x02, 0x32, 0x9f, 0x01, 0x0a, 0x11, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x89, 0x01, 0x0a, 0x0f, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, + 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x9f, 0x02, 0x0a, 0x28, 0x63, 0x6f, 0x6d, 0x2e, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x42, 0x0f, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, + 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, 0x50, 0xaa, 0x02, + 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0xca, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5c, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xe2, 0x02, 0x30, 0x48, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x50, 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x27, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x50, + 0x65, 0x65, 0x72, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -593,7 +654,7 @@ func file_proto_pbpeerstream_peerstream_proto_rawDescGZIP() []byte { } var file_proto_pbpeerstream_peerstream_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_proto_pbpeerstream_peerstream_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_proto_pbpeerstream_peerstream_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_proto_pbpeerstream_peerstream_proto_goTypes = []interface{}{ (Operation)(0), // 0: hashicorp.consul.internal.peerstream.Operation (*ReplicationMessage)(nil), // 1: hashicorp.consul.internal.peerstream.ReplicationMessage @@ -602,25 +663,27 @@ var file_proto_pbpeerstream_peerstream_proto_goTypes = []interface{}{ (*ReplicationMessage_Request)(nil), // 4: hashicorp.consul.internal.peerstream.ReplicationMessage.Request (*ReplicationMessage_Response)(nil), // 5: hashicorp.consul.internal.peerstream.ReplicationMessage.Response (*ReplicationMessage_Terminated)(nil), // 6: hashicorp.consul.internal.peerstream.ReplicationMessage.Terminated - (*pbservice.CheckServiceNode)(nil), // 7: hashicorp.consul.internal.service.CheckServiceNode - (*pbstatus.Status)(nil), // 8: hashicorp.consul.internal.status.Status - (*anypb.Any)(nil), // 9: google.protobuf.Any + (*ReplicationMessage_Heartbeat)(nil), // 7: hashicorp.consul.internal.peerstream.ReplicationMessage.Heartbeat + (*pbservice.CheckServiceNode)(nil), // 8: hashicorp.consul.internal.service.CheckServiceNode + (*pbstatus.Status)(nil), // 9: hashicorp.consul.internal.status.Status + (*anypb.Any)(nil), // 10: google.protobuf.Any } var file_proto_pbpeerstream_peerstream_proto_depIdxs = []int32{ - 4, // 0: hashicorp.consul.internal.peerstream.ReplicationMessage.request:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Request - 5, // 1: hashicorp.consul.internal.peerstream.ReplicationMessage.response:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Response - 6, // 2: hashicorp.consul.internal.peerstream.ReplicationMessage.terminated:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Terminated - 7, // 3: hashicorp.consul.internal.peerstream.ExportedService.Nodes:type_name -> hashicorp.consul.internal.service.CheckServiceNode - 8, // 4: hashicorp.consul.internal.peerstream.ReplicationMessage.Request.Error:type_name -> hashicorp.consul.internal.status.Status - 9, // 5: hashicorp.consul.internal.peerstream.ReplicationMessage.Response.Resource:type_name -> google.protobuf.Any - 0, // 6: hashicorp.consul.internal.peerstream.ReplicationMessage.Response.operation:type_name -> hashicorp.consul.internal.peerstream.Operation - 1, // 7: hashicorp.consul.internal.peerstream.PeerStreamService.StreamResources:input_type -> hashicorp.consul.internal.peerstream.ReplicationMessage - 1, // 8: hashicorp.consul.internal.peerstream.PeerStreamService.StreamResources:output_type -> hashicorp.consul.internal.peerstream.ReplicationMessage - 8, // [8:9] is the sub-list for method output_type - 7, // [7:8] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 4, // 0: hashicorp.consul.internal.peerstream.ReplicationMessage.request:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Request + 5, // 1: hashicorp.consul.internal.peerstream.ReplicationMessage.response:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Response + 6, // 2: hashicorp.consul.internal.peerstream.ReplicationMessage.terminated:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Terminated + 7, // 3: hashicorp.consul.internal.peerstream.ReplicationMessage.heartbeat:type_name -> hashicorp.consul.internal.peerstream.ReplicationMessage.Heartbeat + 8, // 4: hashicorp.consul.internal.peerstream.ExportedService.Nodes:type_name -> hashicorp.consul.internal.service.CheckServiceNode + 9, // 5: hashicorp.consul.internal.peerstream.ReplicationMessage.Request.Error:type_name -> hashicorp.consul.internal.status.Status + 10, // 6: hashicorp.consul.internal.peerstream.ReplicationMessage.Response.Resource:type_name -> google.protobuf.Any + 0, // 7: hashicorp.consul.internal.peerstream.ReplicationMessage.Response.operation:type_name -> hashicorp.consul.internal.peerstream.Operation + 1, // 8: hashicorp.consul.internal.peerstream.PeerStreamService.StreamResources:input_type -> hashicorp.consul.internal.peerstream.ReplicationMessage + 1, // 9: hashicorp.consul.internal.peerstream.PeerStreamService.StreamResources:output_type -> hashicorp.consul.internal.peerstream.ReplicationMessage + 9, // [9:10] is the sub-list for method output_type + 8, // [8:9] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_proto_pbpeerstream_peerstream_proto_init() } @@ -701,11 +764,24 @@ func file_proto_pbpeerstream_peerstream_proto_init() { return nil } } + file_proto_pbpeerstream_peerstream_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationMessage_Heartbeat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_proto_pbpeerstream_peerstream_proto_msgTypes[0].OneofWrappers = []interface{}{ (*ReplicationMessage_Request_)(nil), (*ReplicationMessage_Response_)(nil), (*ReplicationMessage_Terminated_)(nil), + (*ReplicationMessage_Heartbeat_)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -713,7 +789,7 @@ func file_proto_pbpeerstream_peerstream_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proto_pbpeerstream_peerstream_proto_rawDesc, NumEnums: 1, - NumMessages: 6, + NumMessages: 7, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/pbpeerstream/peerstream.proto b/proto/pbpeerstream/peerstream.proto index 54be6e4b7..cbd9e590b 100644 --- a/proto/pbpeerstream/peerstream.proto +++ b/proto/pbpeerstream/peerstream.proto @@ -25,6 +25,7 @@ message ReplicationMessage { Request request = 1; Response response = 2; Terminated terminated = 3; + Heartbeat heartbeat = 4; } // A Request requests to subscribe to a resource of a given type. @@ -69,6 +70,9 @@ message ReplicationMessage { // Terminated is sent when a peering is deleted locally. // This message signals to the peer that they should clean up their local state about the peering. message Terminated {} + + // Heartbeat is sent to verify that the connection is still active. + message Heartbeat {} } // Operation enumerates supported operations for replicated resources. From 77a263ebbbb362605d8f65bfd5f7a0df1b15f5a2 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Wed, 20 Jul 2022 14:25:20 -0700 Subject: [PATCH 046/107] Fix duplicate Notify calls for discovery chains in ingress gateways --- agent/proxycfg/upstreams.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/agent/proxycfg/upstreams.go b/agent/proxycfg/upstreams.go index a47510543..600a89e09 100644 --- a/agent/proxycfg/upstreams.go +++ b/agent/proxycfg/upstreams.go @@ -436,7 +436,17 @@ type discoveryChainWatchOpts struct { } func (s *handlerUpstreams) watchDiscoveryChain(ctx context.Context, snap *ConfigSnapshot, opts discoveryChainWatchOpts) error { - if _, ok := snap.ConnectProxy.WatchedDiscoveryChains[opts.id]; ok { + var watchedDiscoveryChains map[UpstreamID]context.CancelFunc + switch s.kind { + case structs.ServiceKindIngressGateway: + watchedDiscoveryChains = snap.IngressGateway.WatchedDiscoveryChains + case structs.ServiceKindConnectProxy: + watchedDiscoveryChains = snap.ConnectProxy.WatchedDiscoveryChains + default: + return fmt.Errorf("unsupported kind %s", s.kind) + } + + if _, ok := watchedDiscoveryChains[opts.id]; ok { return nil } @@ -457,16 +467,7 @@ func (s *handlerUpstreams) watchDiscoveryChain(ctx context.Context, snap *Config return err } - switch s.kind { - case structs.ServiceKindIngressGateway: - snap.IngressGateway.WatchedDiscoveryChains[opts.id] = cancel - case structs.ServiceKindConnectProxy: - snap.ConnectProxy.WatchedDiscoveryChains[opts.id] = cancel - default: - cancel() - return fmt.Errorf("unsupported kind %s", s.kind) - } - + watchedDiscoveryChains[opts.id] = cancel return nil } From affbb28eb523d3705d465ae9faee5f70c6514132 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Wed, 20 Jul 2022 14:26:52 -0700 Subject: [PATCH 047/107] Cancel upstream watches when the discovery chain has been removed --- agent/proxycfg/ingress_gateway.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/agent/proxycfg/ingress_gateway.go b/agent/proxycfg/ingress_gateway.go index 3fb67ddab..3889bdba8 100644 --- a/agent/proxycfg/ingress_gateway.go +++ b/agent/proxycfg/ingress_gateway.go @@ -148,6 +148,16 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent, for uid, cancelFn := range snap.IngressGateway.WatchedDiscoveryChains { if _, ok := watchedSvcs[uid]; !ok { + for targetID, cancelUpstreamFn := range snap.IngressGateway.WatchedUpstreams[uid] { + s.logger.Debug("stopping watch of target", + "upstream", uid, + "target", targetID, + ) + delete(snap.IngressGateway.WatchedUpstreams[uid], targetID) + delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID) + cancelUpstreamFn() + } + cancelFn() delete(snap.IngressGateway.WatchedDiscoveryChains, uid) } From 7a58a4df96c2c237aec7404614f538485e8b775b Mon Sep 17 00:00:00 2001 From: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Date: Wed, 6 Jul 2022 18:24:03 -0400 Subject: [PATCH 048/107] docs: suggest using token header, not query param --- .../content/docs/agent/config/config-files.mdx | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index 4506d80ff..a8eaba6d5 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -869,11 +869,12 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `master` ((#acl_tokens_master)) **Renamed in Consul 1.11 to [`acl.tokens.initial_management`](#acl_tokens_initial_management).** - - `default` ((#acl_tokens_default)) - When provided, the agent will - use this token when making requests to the Consul servers. Clients can override - this token on a per-request basis by providing the "?token" query parameter. - When not provided, the empty token, which maps to the 'anonymous' ACL token, - is used. + - `default` ((#acl_tokens_default)) - When provided, this agent will + use this token by default when making requests to the Consul servers + instead of the [anonymous token](/docs/security/acl/acl-tokens#anonymous-token). + Consul HTTP API requests can provide an alternate token in their authorization header + to override the `default` or anonymous token on a per-request basis, + as described in [HTTP API Authentication](/api-docs#authentication). - `agent` ((#acl_tokens_agent)) - Used for clients and servers to perform internal operations. If this isn't specified, then the @@ -993,11 +994,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." in the cache can be resolved during the outage using the replicated set of ACLs. - `acl_token` ((#acl_token_legacy)) - **Deprecated in Consul 1.4.0. See - the [`acl.tokens.default`](#acl_tokens_default) field instead.** When provided, - the agent will use this token when making requests to the Consul servers. Clients - can override this token on a per-request basis by providing the "?token" query - parameter. When not provided, the empty token, which maps to the 'anonymous' ACL - policy, is used. + the [`acl.tokens.default`](#acl_tokens_default) field instead.** - `acl_ttl` ((#acl_ttl_legacy)) - **Deprecated in Consul 1.4.0. See the [`acl.token_ttl`](#acl_token_ttl) field instead.**Used to control Time-To-Live From 4cec3bd9db99abc1055262d37741cf3e83695d14 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Wed, 20 Jul 2022 15:48:18 -0700 Subject: [PATCH 049/107] Add send mutex to protect against concurrent sends (#13805) --- .../services/peerstream/stream_resources.go | 40 +++++++++++-------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index c67f7da04..5c69d08a7 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "strings" + "sync" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" @@ -204,6 +205,25 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { ) subCh := mgr.subscribe(streamReq.Stream.Context(), streamReq.LocalID, streamReq.PeerName, streamReq.Partition) + // We need a mutex to protect against simultaneous sends to the client. + var sendMutex sync.Mutex + + // streamSend is a helper function that sends msg over the stream + // respecting the send mutex. It also logs the send and calls status.TrackSendError + // on error. + streamSend := func(msg *pbpeerstream.ReplicationMessage) error { + logTraceSend(logger, msg) + + sendMutex.Lock() + err := streamReq.Stream.Send(msg) + sendMutex.Unlock() + + if err != nil { + status.TrackSendError(err.Error()) + } + return err + } + // Subscribe to all relevant resource types. for _, resourceURL := range []string{ pbpeerstream.TypeURLExportedService, @@ -213,16 +233,12 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { ResourceURL: resourceURL, PeerID: streamReq.RemoteID, }) - logTraceSend(logger, sub) - - if err := streamReq.Stream.Send(sub); err != nil { + if err := streamSend(sub); err != nil { if err == io.EOF { logger.Info("stream ended by peer") - status.TrackReceiveError(err.Error()) return nil } // TODO(peering) Test error handling in calls to Send/Recv - status.TrackSendError(err.Error()) return fmt.Errorf("failed to send subscription for %q to stream: %w", resourceURL, err) } } @@ -261,10 +277,7 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { Terminated: &pbpeerstream.ReplicationMessage_Terminated{}, }, } - logTraceSend(logger, term) - - if err := streamReq.Stream.Send(term); err != nil { - status.TrackSendError(err.Error()) + if err := streamSend(term); err != nil { return fmt.Errorf("failed to send to stream: %v", err) } @@ -401,9 +414,7 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { status.TrackReceiveSuccess() } - logTraceSend(logger, reply) - if err := streamReq.Stream.Send(reply); err != nil { - status.TrackSendError(err.Error()) + if err := streamSend(reply); err != nil { return fmt.Errorf("failed to send to stream: %v", err) } @@ -451,10 +462,7 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { } replResp := makeReplicationResponse(resp) - - logTraceSend(logger, replResp) - if err := streamReq.Stream.Send(replResp); err != nil { - status.TrackSendError(err.Error()) + if err := streamSend(replResp); err != nil { return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err) } } From e3bff8fb399d279c1d0e78d3ad6926b645dfa8b4 Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Thu, 21 Jul 2022 13:38:28 +0100 Subject: [PATCH 050/107] proxycfg-glue: server-local implementation of `PeeredUpstreams` This is the OSS portion of enterprise PR 2352. It adds a server-local implementation of the proxycfg.PeeredUpstreams interface based on a blocking query against the server's state store. It also fixes an omission in the Virtual IP freeing logic where we were never updating the max index (and therefore blocking queries against VirtualIPsForAllImportedServices would not return on service deletion). --- agent/agent.go | 1 + agent/consul/state/catalog.go | 9 +- agent/proxycfg-glue/glue.go | 7 +- agent/proxycfg-glue/peered_upstreams.go | 55 ++++++++++++ agent/proxycfg-glue/peered_upstreams_test.go | 88 ++++++++++++++++++++ 5 files changed, 152 insertions(+), 8 deletions(-) create mode 100644 agent/proxycfg-glue/peered_upstreams.go create mode 100644 agent/proxycfg-glue/peered_upstreams_test.go diff --git a/agent/agent.go b/agent/agent.go index 4b78b69a9..751e62d4b 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -4251,6 +4251,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { sources.Health = proxycfgglue.ServerHealth(deps, proxycfgglue.ClientHealth(a.rpcClientHealth)) sources.Intentions = proxycfgglue.ServerIntentions(deps) sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps) + sources.PeeredUpstreams = proxycfgglue.ServerPeeredUpstreams(deps) sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache)) sources.TrustBundle = proxycfgglue.ServerTrustBundle(deps) sources.TrustBundleList = proxycfgglue.ServerTrustBundleList(deps) diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 622cccd35..849d0820c 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -1990,7 +1990,7 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st } } psn := structs.PeeredServiceName{Peer: svc.PeerName, ServiceName: name} - if err := freeServiceVirtualIP(tx, psn, nil); err != nil { + if err := freeServiceVirtualIP(tx, idx, psn, nil); err != nil { return fmt.Errorf("failed to clean up virtual IP for %q: %v", name.String(), err) } if err := cleanupKindServiceName(tx, idx, svc.CompoundServiceName(), svc.ServiceKind); err != nil { @@ -2008,6 +2008,7 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st // is removed. func freeServiceVirtualIP( tx WriteTxn, + idx uint64, psn structs.PeeredServiceName, excludeGateway *structs.ServiceName, ) error { @@ -2059,6 +2060,10 @@ func freeServiceVirtualIP( return fmt.Errorf("failed updating freed virtual IP table: %v", err) } + if err := updateVirtualIPMaxIndexes(tx, idx, psn.ServiceName.PartitionOrDefault(), psn.Peer); err != nil { + return err + } + return nil } @@ -3497,7 +3502,7 @@ func updateTerminatingGatewayVirtualIPs(tx WriteTxn, idx uint64, conf *structs.T } if len(nodes) == 0 { psn := structs.PeeredServiceName{Peer: structs.DefaultPeerKeyword, ServiceName: sn} - if err := freeServiceVirtualIP(tx, psn, &gatewayName); err != nil { + if err := freeServiceVirtualIP(tx, idx, psn, &gatewayName); err != nil { return err } } diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index 7c3311f36..1e254f406 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -28,6 +28,7 @@ type Store interface { PeeringTrustBundleRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.PeeringTrustBundle, error) PeeringTrustBundleList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error) TrustBundleListByService(ws memdb.WatchSet, service, dc string, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error) + VirtualIPsForAllImportedServices(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []state.ServiceVirtualIP, error) } // CacheCARoots satisfies the proxycfg.CARoots interface by sourcing data from @@ -90,12 +91,6 @@ func CacheLeafCertificate(c *cache.Cache) proxycfg.LeafCertificate { return &cacheProxyDataSource[*cachetype.ConnectCALeafRequest]{c, cachetype.ConnectCALeafName} } -// CachePeeredUpstreams satisfies the proxycfg.PeeredUpstreams interface -// by sourcing data from the agent cache. -func CachePeeredUpstreams(c *cache.Cache) proxycfg.PeeredUpstreams { - return &cacheProxyDataSource[*structs.PartitionSpecificRequest]{c, cachetype.PeeredUpstreamsName} -} - // CachePrepraredQuery satisfies the proxycfg.PreparedQuery interface by // sourcing data from the agent cache. func CachePrepraredQuery(c *cache.Cache) proxycfg.PreparedQuery { diff --git a/agent/proxycfg-glue/peered_upstreams.go b/agent/proxycfg-glue/peered_upstreams.go new file mode 100644 index 000000000..4d3e85f81 --- /dev/null +++ b/agent/proxycfg-glue/peered_upstreams.go @@ -0,0 +1,55 @@ +package proxycfgglue + +import ( + "context" + + "github.com/hashicorp/go-memdb" + + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/consul/watch" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" +) + +// CachePeeredUpstreams satisfies the proxycfg.PeeredUpstreams interface +// by sourcing data from the agent cache. +func CachePeeredUpstreams(c *cache.Cache) proxycfg.PeeredUpstreams { + return &cacheProxyDataSource[*structs.PartitionSpecificRequest]{c, cachetype.PeeredUpstreamsName} +} + +// ServerPeeredUpstreams satisfies the proxycfg.PeeredUpstreams interface by +// sourcing data from a blocking query against the server's state store. +func ServerPeeredUpstreams(deps ServerDataSourceDeps) proxycfg.PeeredUpstreams { + return &serverPeeredUpstreams{deps} +} + +type serverPeeredUpstreams struct { + deps ServerDataSourceDeps +} + +func (s *serverPeeredUpstreams) Notify(ctx context.Context, req *structs.PartitionSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + // TODO(peering): ACL filtering. + return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore, + func(ws memdb.WatchSet, store Store) (uint64, *structs.IndexedPeeredServiceList, error) { + index, vips, err := store.VirtualIPsForAllImportedServices(ws, req.EnterpriseMeta) + if err != nil { + return 0, nil, err + } + + result := make([]structs.PeeredServiceName, 0, len(vips)) + for _, vip := range vips { + result = append(result, vip.Service) + } + + return index, &structs.IndexedPeeredServiceList{ + Services: result, + QueryMeta: structs.QueryMeta{ + Index: index, + Backend: structs.QueryBackendBlocking, + }, + }, nil + }, + dispatchBlockingQueryUpdate[*structs.IndexedPeeredServiceList](ch), + ) +} diff --git a/agent/proxycfg-glue/peered_upstreams_test.go b/agent/proxycfg-glue/peered_upstreams_test.go new file mode 100644 index 000000000..c2faa44da --- /dev/null +++ b/agent/proxycfg-glue/peered_upstreams_test.go @@ -0,0 +1,88 @@ +package proxycfgglue + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestServerPeeredUpstreams(t *testing.T) { + const ( + index uint64 = 123 + nodeName = "node-1" + ) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + store := state.NewStateStore(nil) + enableVirtualIPs(t, store) + + registerService := func(t *testing.T, index uint64, peerName, serviceName string) { + require.NoError(t, store.EnsureRegistration(index, &structs.RegisterRequest{ + Node: nodeName, + Service: &structs.NodeService{Service: serviceName, ID: serviceName}, + PeerName: peerName, + EnterpriseMeta: *acl.DefaultEnterpriseMeta(), + })) + + require.NoError(t, store.EnsureRegistration(index, &structs.RegisterRequest{ + Node: nodeName, + Service: &structs.NodeService{ + Service: fmt.Sprintf("%s-proxy", serviceName), + Kind: structs.ServiceKindConnectProxy, + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: serviceName, + }, + }, + PeerName: peerName, + EnterpriseMeta: *acl.DefaultEnterpriseMeta(), + })) + } + + registerService(t, index, "peer-1", "web") + + eventCh := make(chan proxycfg.UpdateEvent) + dataSource := ServerPeeredUpstreams(ServerDataSourceDeps{ + GetStore: func() Store { return store }, + }) + require.NoError(t, dataSource.Notify(ctx, &structs.PartitionSpecificRequest{EnterpriseMeta: *acl.DefaultEnterpriseMeta()}, "", eventCh)) + + testutil.RunStep(t, "initial state", func(t *testing.T) { + result := getEventResult[*structs.IndexedPeeredServiceList](t, eventCh) + require.Len(t, result.Services, 1) + require.Equal(t, "peer-1", result.Services[0].Peer) + require.Equal(t, "web", result.Services[0].ServiceName.Name) + }) + + testutil.RunStep(t, "register another service", func(t *testing.T) { + registerService(t, index+1, "peer-2", "db") + + result := getEventResult[*structs.IndexedPeeredServiceList](t, eventCh) + require.Len(t, result.Services, 2) + }) + + testutil.RunStep(t, "deregister service", func(t *testing.T) { + require.NoError(t, store.DeleteService(index+2, nodeName, "web", acl.DefaultEnterpriseMeta(), "peer-1")) + + result := getEventResult[*structs.IndexedPeeredServiceList](t, eventCh) + require.Len(t, result.Services, 1) + }) +} + +func enableVirtualIPs(t *testing.T, store *state.Store) { + t.Helper() + + require.NoError(t, store.SystemMetadataSet(0, &structs.SystemMetadataEntry{ + Key: structs.SystemMetadataVirtualIPsEnabled, + Value: "true", + })) +} From 1475ec0349f089aea02f0ae77f177055e76f8e33 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Thu, 21 Jul 2022 15:59:38 +0200 Subject: [PATCH 051/107] ui: Update peerings empty state copy (#13834) --- .../consul-peerings/app/templates/dc/peers/index.hbs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ui/packages/consul-peerings/app/templates/dc/peers/index.hbs b/ui/packages/consul-peerings/app/templates/dc/peers/index.hbs index c9fd41e36..d0d34b1ce 100644 --- a/ui/packages/consul-peerings/app/templates/dc/peers/index.hbs +++ b/ui/packages/consul-peerings/app/templates/dc/peers/index.hbs @@ -115,12 +115,11 @@ as |sort filters items|}} {{#if (gt items.length 0)}} - No peers where found matching that search, or you may not have access to view the peers you are searching for. +

      No peers where found matching that search, or you may not have access to view the peers you are searching for.

      {{else}} - Peering allows an admin partition in one datacenter to communicate with a partition in a different - datacenter. There don't seem to be any peers for this admin partition, or you may not have - peering:read permissions to - access this view. +

      + Cluster peering is the recommended way to connect services across or within Consul datacenters. Peering is a one-to-one relationship in which each peer is either a open-source Consul datacenter or a Consul enterprise admin partition. There don't seem to be any peers for this {{if (can "use partitions") "admin partition" "datacenter"}}, or you may not have the peering:read permissions to access this view. +

      {{/if}}
      From 7863a00e2c3480bb1b88a628efe97e3d5190bfc6 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Thu, 21 Jul 2022 16:35:54 +0200 Subject: [PATCH 052/107] ui: Surface peer info in nodes.show view (#13832) --- .../consul/{service/peer-info => peer/info}/index.hbs | 6 +++--- .../consul/{service/peer-info => peer/info}/index.scss | 4 ++-- ui/packages/consul-ui/app/styles/components.scss | 2 +- ui/packages/consul-ui/app/templates/dc/nodes/show.hbs | 1 + ui/packages/consul-ui/app/templates/dc/services/show.hbs | 2 +- ui/packages/consul-ui/tests/pages/dc/services/show.js | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) rename ui/packages/consul-ui/app/components/consul/{service/peer-info => peer/info}/index.hbs (86%) rename ui/packages/consul-ui/app/components/consul/{service/peer-info => peer/info}/index.scss (71%) diff --git a/ui/packages/consul-ui/app/components/consul/service/peer-info/index.hbs b/ui/packages/consul-ui/app/components/consul/peer/info/index.hbs similarity index 86% rename from ui/packages/consul-ui/app/components/consul/service/peer-info/index.hbs rename to ui/packages/consul-ui/app/components/consul/peer/info/index.hbs index 5ed3fd4e8..25fbb2180 100644 --- a/ui/packages/consul-ui/app/components/consul/service/peer-info/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/peer/info/index.hbs @@ -1,5 +1,5 @@ -{{#if @service.PeerName}} -
      +{{#if @item.PeerName}} +
      - Imported from {{@service.PeerName}} + Imported from {{@item.PeerName}}
      {{/if}} diff --git a/ui/packages/consul-ui/app/components/consul/service/peer-info/index.scss b/ui/packages/consul-ui/app/components/consul/peer/info/index.scss similarity index 71% rename from ui/packages/consul-ui/app/components/consul/service/peer-info/index.scss rename to ui/packages/consul-ui/app/components/consul/peer/info/index.scss index 88be476fa..a9d57b163 100644 --- a/ui/packages/consul-ui/app/components/consul/service/peer-info/index.scss +++ b/ui/packages/consul-ui/app/components/consul/peer/info/index.scss @@ -1,4 +1,4 @@ -.consul-service-peer-info { +.consul-peer-info { background: rgb(var(--gray-100)); color: rgb(var(--gray-600)); padding: 0px 8px; @@ -7,7 +7,7 @@ display: flex; align-items: center; - .consul-service-peer-info__description { + .consul-peer-info__description { margin-left: 4px; } } diff --git a/ui/packages/consul-ui/app/styles/components.scss b/ui/packages/consul-ui/app/styles/components.scss index e6dca0229..f94f14d44 100644 --- a/ui/packages/consul-ui/app/styles/components.scss +++ b/ui/packages/consul-ui/app/styles/components.scss @@ -107,5 +107,5 @@ @import 'consul-ui/components/consul/peer'; @import 'consul-ui/components/peerings/badge'; @import 'consul-ui/components/consul/node/peer-info'; -@import 'consul-ui/components/consul/service/peer-info'; +@import 'consul-ui/components/consul/peer/info'; @import 'consul-ui/components/consul/peer/form'; diff --git a/ui/packages/consul-ui/app/templates/dc/nodes/show.hbs b/ui/packages/consul-ui/app/templates/dc/nodes/show.hbs index 47459c399..ec7f46493 100644 --- a/ui/packages/consul-ui/app/templates/dc/nodes/show.hbs +++ b/ui/packages/consul-ui/app/templates/dc/nodes/show.hbs @@ -103,6 +103,7 @@ as |item tomography|}} + - + {{#if (not-eq item.Service.Kind 'mesh-gateway')}} diff --git a/ui/packages/consul-ui/tests/pages/dc/services/show.js b/ui/packages/consul-ui/tests/pages/dc/services/show.js index f0426a18a..a1bc3ae94 100644 --- a/ui/packages/consul-ui/tests/pages/dc/services/show.js +++ b/ui/packages/consul-ui/tests/pages/dc/services/show.js @@ -19,7 +19,7 @@ export default function( metricsAnchor: { href: attribute('href', '[data-test-metrics-anchor]'), }, - peer: text('[data-test-service-peer-info] [data-test-peer-name]'), + peer: text('[data-test-peer-info] [data-test-peer-name]'), tabs: tabs('tab', [ 'topology', 'instances', From fdddf7af4801f76ee029bd506330e803ee1a6c64 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Thu, 21 Jul 2022 17:09:54 +0200 Subject: [PATCH 053/107] ui: peered services only show instance- and tags-tabs (#13840) * Only show instances- and tags-tab peered services * Adapt show-with-slashes test to peering changes Tests always have the peering feature turned on and the default service we load from the mock-api will be peered. This is why the topology view of the service.show page will not be accessible in the updated test it will show the instances instead. This change does not change what the test is actually testing so just putting changing to the now different url is fine. --- .../consul-ui/app/templates/dc/services/show.hbs | 10 +++++----- .../acceptance/dc/services/show-with-slashes.feature | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ui/packages/consul-ui/app/templates/dc/services/show.hbs b/ui/packages/consul-ui/app/templates/dc/services/show.hbs index 8fa15b308..e8c0044e6 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/show.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/show.hbs @@ -117,12 +117,12 @@ as |items item dc|}} {{/if}} {{#let (hash - topology=(and dc.MeshEnabled item.IsMeshOrigin (or (gt proxies.length 0) (eq item.Service.Kind 'ingress-gateway'))) - services=(eq item.Service.Kind 'terminating-gateway') - upstreams=(eq item.Service.Kind 'ingress-gateway') + topology=(and dc.MeshEnabled item.IsMeshOrigin (or (gt proxies.length 0) (eq item.Service.Kind 'ingress-gateway')) (not item.Service.PeerName)) + services=(and (eq item.Service.Kind 'terminating-gateway') (not item.Service.PeerName)) + upstreams=(and (eq item.Service.Kind 'ingress-gateway') (not item.Service.PeerName)) instances=true - intentions=(and (not-eq item.Service.Kind 'terminating-gateway') (can 'read intention for service' item=item.Service)) - routing=(and dc.MeshEnabled item.IsOrigin) + intentions=(and (not-eq item.Service.Kind 'terminating-gateway') (can 'read intention for service' item=item.Service) (not item.Service.PeerName)) + routing=(and dc.MeshEnabled item.IsOrigin (not item.Service.PeerName)) tags=true ) as |tabs|}} diff --git a/ui/packages/consul-ui/tests/acceptance/dc/services/show-with-slashes.feature b/ui/packages/consul-ui/tests/acceptance/dc/services/show-with-slashes.feature index 88d8c111c..73c220e5d 100644 --- a/ui/packages/consul-ui/tests/acceptance/dc/services/show-with-slashes.feature +++ b/ui/packages/consul-ui/tests/acceptance/dc/services/show-with-slashes.feature @@ -17,5 +17,5 @@ Feature: dc / services / show-with-slashes: Show Service that has slashes in its Then the url should be /dc1/services Then I see 1 service model And I click service on the services - Then the url should be /:billing/dc1/services/hashicorp%2Fservice%2Fservice-0/topology + Then the url should be /:billing/dc1/services/hashicorp%2Fservice%2Fservice-0/instances From a7b8f7738bc276b4097e7047c9e0fe8eef9afb49 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Thu, 21 Jul 2022 17:38:10 +0100 Subject: [PATCH 054/107] ui: Remove peering detail page (#13836) * ui: Remove links to the peering detail page * 404 everything --- .../app/components/consul/peer/list/index.hbs | 24 +++---------------- .../vendor/consul-peerings/routes.js | 10 -------- 2 files changed, 3 insertions(+), 31 deletions(-) diff --git a/ui/packages/consul-peerings/app/components/consul/peer/list/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/list/index.hbs index 249c5a976..30272119e 100644 --- a/ui/packages/consul-peerings/app/components/consul/peer/list/index.hbs +++ b/ui/packages/consul-peerings/app/components/consul/peer/list/index.hbs @@ -2,21 +2,11 @@ class="consul-peer-list" ...attributes @items={{@items}} - @linkable="linkable peer" as |item index|> -{{#if (can 'delete peer' item=item)}} - - {{item.Name}} - -{{else}} -

      - {{item.Name}} -

      -{{/if}} +

      + {{item.Name}} +

      @@ -52,14 +42,6 @@ as |item index|> {{#if (can 'delete peer' item=item)}} - - - View - - Date: Thu, 21 Jul 2022 17:38:57 +0100 Subject: [PATCH 055/107] ui: Allow searching for peerings by ID (#13837) --- ui/packages/consul-peerings/vendor/consul-peerings/routes.js | 2 +- ui/packages/consul-ui/app/models/peer.js | 1 + ui/packages/consul-ui/app/search/predicates/peer.js | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ui/packages/consul-peerings/vendor/consul-peerings/routes.js b/ui/packages/consul-peerings/vendor/consul-peerings/routes.js index 89709bb4b..fd2a92aca 100644 --- a/ui/packages/consul-peerings/vendor/consul-peerings/routes.js +++ b/ui/packages/consul-peerings/vendor/consul-peerings/routes.js @@ -12,7 +12,7 @@ state: 'state', searchproperty: { as: 'searchproperty', - empty: [['Name']], + empty: [['Name', 'ID']], }, search: { as: 'filter', diff --git a/ui/packages/consul-ui/app/models/peer.js b/ui/packages/consul-ui/app/models/peer.js index 5b2c6a01a..cd050524b 100644 --- a/ui/packages/consul-ui/app/models/peer.js +++ b/ui/packages/consul-ui/app/models/peer.js @@ -22,6 +22,7 @@ export default class Peer extends Model { @attr('string') Name; @attr('string') State; + @attr('string') ID; @attr('number') ImportedServiceCount; @attr('number') ExportedServiceCount; @attr() PeerServerAddresses; diff --git a/ui/packages/consul-ui/app/search/predicates/peer.js b/ui/packages/consul-ui/app/search/predicates/peer.js index e97c2d2d1..5f1606a8e 100644 --- a/ui/packages/consul-ui/app/search/predicates/peer.js +++ b/ui/packages/consul-ui/app/search/predicates/peer.js @@ -1,3 +1,4 @@ export default { Name: item => item.Name, + ID: item => item.ID, }; From 2875cbe8562d91bc2a2921de89ae50f2cfa46845 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Thu, 21 Jul 2022 17:39:15 +0100 Subject: [PATCH 056/107] ui: Change initiate > establish for peering the modal tab (#13839) --- .../consul-peerings/app/components/consul/peer/form/index.hbs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/packages/consul-peerings/app/components/consul/peer/form/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/form/index.hbs index 374405b4a..4a9971a97 100644 --- a/ui/packages/consul-peerings/app/components/consul/peer/form/index.hbs +++ b/ui/packages/consul-peerings/app/components/consul/peer/form/index.hbs @@ -16,7 +16,7 @@ state="GENERATE" ) (hash - label='Initiate peering' + label='Establish peering' selected=(state-matches fsm.state 'initiate') state="INITIATE" ) From 00f9dc2a7039cb176901d9bc52ba0620299ce3c3 Mon Sep 17 00:00:00 2001 From: Chris Thain <32781396+cthain@users.noreply.github.com> Date: Thu, 21 Jul 2022 09:54:56 -0700 Subject: [PATCH 057/107] Add Consul Lambda integration tests (#13770) --- .circleci/config.yml | 5 ++ .../envoy/case-mesh-to-lambda/capture.sh | 4 ++ .../case-mesh-to-lambda/config_entries.hcl | 12 +++++ .../envoy/case-mesh-to-lambda/lambda_l1.json | 12 +++++ .../envoy/case-mesh-to-lambda/lambda_l2.json | 12 +++++ .../envoy/case-mesh-to-lambda/serverless.hcl | 3 ++ .../service_defaults_l1.json | 11 ++++ .../service_defaults_l2.json | 11 ++++ .../case-mesh-to-lambda/service_gateway.hcl | 5 ++ .../envoy/case-mesh-to-lambda/service_s1.hcl | 20 +++++++ .../envoy/case-mesh-to-lambda/setup.sh | 19 +++++++ .../connect/envoy/case-mesh-to-lambda/vars.sh | 15 ++++++ .../envoy/case-mesh-to-lambda/verify.bats | 39 ++++++++++++++ test/integration/connect/envoy/helpers.bash | 52 +++++++++++++++++++ test/integration/connect/envoy/run-tests.sh | 17 ++++++ 15 files changed, 237 insertions(+) create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/capture.sh create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/lambda_l1.json create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/lambda_l2.json create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/serverless.hcl create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/service_defaults_l1.json create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/service_defaults_l2.json create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/service_gateway.hcl create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/service_s1.hcl create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/setup.sh create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/vars.sh create mode 100644 test/integration/connect/envoy/case-mesh-to-lambda/verify.bats diff --git a/.circleci/config.yml b/.circleci/config.yml index de9620486..af1a2f5c6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -876,8 +876,13 @@ jobs: environment: ENVOY_VERSION: << parameters.envoy-version >> XDS_TARGET: << parameters.xds-target >> + AWS_LAMBDA_REGION: us-west-2 steps: &ENVOY_INTEGRATION_TEST_STEPS - checkout + - assume-role: + access-key: AWS_ACCESS_KEY_ID_LAMBDA + secret-key: AWS_SECRET_ACCESS_KEY_LAMBDA + role-arn: ROLE_ARN_LAMBDA # Get go binary from workspace - attach_workspace: at: . diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/capture.sh b/test/integration/connect/envoy/case-mesh-to-lambda/capture.sh new file mode 100644 index 000000000..19ddf49f6 --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/capture.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +snapshot_envoy_admin localhost:19000 s1 primary || true +snapshot_envoy_admin localhost:20000 terminating-gateway primary || true diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/config_entries.hcl b/test/integration/connect/envoy/case-mesh-to-lambda/config_entries.hcl new file mode 100644 index 000000000..2cf6a2e28 --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/config_entries.hcl @@ -0,0 +1,12 @@ +config_entries { + bootstrap { + kind = "terminating-gateway" + name = "terminating-gateway" + + services = [ + { + name = "l2" + } + ] + } +} diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/lambda_l1.json b/test/integration/connect/envoy/case-mesh-to-lambda/lambda_l1.json new file mode 100644 index 000000000..f5ef7cd50 --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/lambda_l1.json @@ -0,0 +1,12 @@ +{ + "Node": "lambdas", + "SkipNodeUpdate": true, + "NodeMeta": { + "external-node": "true", + "external-probe": "true" + }, + "Service": { + "ID": "l1", + "Service": "l1" + } +} diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/lambda_l2.json b/test/integration/connect/envoy/case-mesh-to-lambda/lambda_l2.json new file mode 100644 index 000000000..e4997bd63 --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/lambda_l2.json @@ -0,0 +1,12 @@ +{ + "Node": "lambdas", + "SkipNodeUpdate": true, + "NodeMeta": { + "external-node": "true", + "external-probe": "true" + }, + "Service": { + "ID": "l2", + "Service": "l2" + } +} diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/serverless.hcl b/test/integration/connect/envoy/case-mesh-to-lambda/serverless.hcl new file mode 100644 index 000000000..41447a466 --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/serverless.hcl @@ -0,0 +1,3 @@ +connect { + enable_serverless_plugin = true +} diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/service_defaults_l1.json b/test/integration/connect/envoy/case-mesh-to-lambda/service_defaults_l1.json new file mode 100644 index 000000000..2d67c3558 --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/service_defaults_l1.json @@ -0,0 +1,11 @@ +{ + "Kind": "service-defaults", + "Name": "l1", + "Protocol": "http", + "Meta": { + "serverless.consul.hashicorp.com/v1alpha1/lambda/enabled": "true", + "serverless.consul.hashicorp.com/v1alpha1/lambda/region": "${AWS_LAMBDA_REGION}", + "serverless.consul.hashicorp.com/v1alpha1/lambda/arn": "${AWS_LAMBDA_ARN}", + "serverless.consul.hashicorp.com/v1alpha1/lambda/payload-passthrough": "true" + } +} diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/service_defaults_l2.json b/test/integration/connect/envoy/case-mesh-to-lambda/service_defaults_l2.json new file mode 100644 index 000000000..263126060 --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/service_defaults_l2.json @@ -0,0 +1,11 @@ +{ + "Kind": "service-defaults", + "Name": "l2", + "Protocol": "http", + "Meta": { + "serverless.consul.hashicorp.com/v1alpha1/lambda/enabled": "true", + "serverless.consul.hashicorp.com/v1alpha1/lambda/region": "${AWS_LAMBDA_REGION}", + "serverless.consul.hashicorp.com/v1alpha1/lambda/arn": "${AWS_LAMBDA_ARN}", + "serverless.consul.hashicorp.com/v1alpha1/lambda/payload-passthrough": "false" + } +} diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/service_gateway.hcl b/test/integration/connect/envoy/case-mesh-to-lambda/service_gateway.hcl new file mode 100644 index 000000000..0958221ed --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/service_gateway.hcl @@ -0,0 +1,5 @@ +services { + name = "terminating-gateway" + kind = "terminating-gateway" + port = 8443 +} diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/service_s1.hcl b/test/integration/connect/envoy/case-mesh-to-lambda/service_s1.hcl new file mode 100644 index 000000000..36217f42d --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/service_s1.hcl @@ -0,0 +1,20 @@ +services { + name = "s1" + port = 8080 + connect { + sidecar_service { + proxy { + upstreams = [ + { + destination_name = "l1" + local_bind_port = 1234 + }, + { + destination_name = "l2" + local_bind_port = 5678 + } + ] + } + } + } +} diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/setup.sh b/test/integration/connect/envoy/case-mesh-to-lambda/setup.sh new file mode 100644 index 000000000..c187c8df2 --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/setup.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -eEuo pipefail + +# Copy lambda config files into the register dir +find ${CASE_DIR} -maxdepth 1 -name '*_l*.json' -type f -exec cp -f {} workdir/${CLUSTER}/register \; + +# wait for tgw config entry +wait_for_config_entry terminating-gateway terminating-gateway + +register_services primary +register_lambdas primary + +# wait for Lambda config entries +wait_for_config_entry service-defaults l1 +wait_for_config_entry service-defaults l2 + +gen_envoy_bootstrap s1 19000 primary +gen_envoy_bootstrap terminating-gateway 20000 primary true diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/vars.sh b/test/integration/connect/envoy/case-mesh-to-lambda/vars.sh new file mode 100644 index 000000000..fa47ec4ae --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/vars.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# Ensure that the environment variables required to configure and invoke the Lambda function are present, otherwise skip. +# Note that `set | grep ...` is used here because we cannot check the vars directly. If they are unbound the test will +# fail instead of being skipped. +export SKIP_CASE="" +[ -n "$(set | grep '^AWS_LAMBDA_REGION=')" ] || export SKIP_CASE="AWS_LAMBDA_REGION is not present in the environment" +[ -n "$(set | grep '^AWS_LAMBDA_ARN=')" ] || export SKIP_CASE="AWS_LAMBDA_ARN is not present in the environment" +[ -n "$(set | grep '^AWS_SESSION_TOKEN=')" ] || export SKIP_CASE="AWS_SESSION_TOKEN is not present in the environment" +[ -n "$(set | grep '^AWS_SECRET_ACCESS_KEY=')" ] || export SKIP_CASE="AWS_SECRET_ACCESS_KEY is not present in the environment" +[ -n "$(set | grep '^AWS_ACCESS_KEY_ID=')" ] || export SKIP_CASE="AWS_ACCESS_KEY_ID is not present in the environment" + +[ -n "$SKIP_CASE" ] && return 0 + +export REQUIRED_SERVICES="s1 s1-sidecar-proxy terminating-gateway-primary" diff --git a/test/integration/connect/envoy/case-mesh-to-lambda/verify.bats b/test/integration/connect/envoy/case-mesh-to-lambda/verify.bats new file mode 100644 index 000000000..6a88e83f3 --- /dev/null +++ b/test/integration/connect/envoy/case-mesh-to-lambda/verify.bats @@ -0,0 +1,39 @@ +#!/usr/bin/env bats + +load helpers + +@test "s1 has lambda cluster for l1" { + assert_lambda_envoy_dynamic_cluster_exists localhost:19000 l1 +} + +@test "s1 has lambda http filter for l1" { + assert_lambda_envoy_dynamic_http_filter_exists localhost:19000 l1 $AWS_LAMBDA_ARN +} + +@test "terminating gateway has lambda cluster for l2" { + assert_lambda_envoy_dynamic_cluster_exists localhost:20000 l2 +} + +@test "terminating gateway has lambda http filter for l2" { + assert_lambda_envoy_dynamic_http_filter_exists localhost:20000 l2 $AWS_LAMBDA_ARN +} + +@test "s1 can call l1 through its sidecar-proxy" { + run retry_default curl -s -f -H "Content-type: application/json" -d '"hello"' 'localhost:1234' + [ "$status" -eq 0 ] + + # l1 is configured with payload_passthrough = true so the response needs to be unwrapped + [ $(echo "$output" | jq -r '.statusCode') -eq 200 ] + [ $(echo "$output" | jq -r '.body') == "hello" ] +} + +@test "s1 can call l2 through the terminating gateway" { + run retry_default curl -s -f -H "Content-type: application/json" -d '"hello"' 'localhost:5678' + [ "$status" -eq 0 ] + [ "$output" == '"hello"' ] + + # Omitting the Content-type in the request will cause envoy to base64 encode the request. + run curl -s -f -d '{"message":"hello"}' 'localhost:5678' + [ "$status" -eq 0 ] + [ "$output" == '{"message":"hello"}' ] +} diff --git a/test/integration/connect/envoy/helpers.bash b/test/integration/connect/envoy/helpers.bash index 7ac897c74..2fd9be7e3 100755 --- a/test/integration/connect/envoy/helpers.bash +++ b/test/integration/connect/envoy/helpers.bash @@ -966,3 +966,55 @@ function create_peering { # echo "$output" >&3 [ "$status" == 0 ] } + +function get_lambda_envoy_http_filter { + local HOSTPORT=$1 + local NAME_PREFIX=$2 + run retry_default curl -s -f $HOSTPORT/config_dump + [ "$status" -eq 0 ] + # get the full http filter object so the individual fields can be validated. + echo "$output" | jq --raw-output ".configs[2].dynamic_listeners[] | .active_state.listener.filter_chains[].filters[] | select(.name == \"envoy.filters.network.http_connection_manager\") | .typed_config.http_filters[] | select(.name == \"envoy.filters.http.aws_lambda\") | .typed_config" +} + +function register_lambdas { + local DC=${1:-primary} + # register lambdas to the catalog + for f in $(find workdir/${DC}/register -type f -name 'lambda_*.json'); do + retry_default curl -sL -XPUT -d @${f} "http://localhost:8500/v1/catalog/register" >/dev/null && \ + echo "Registered Lambda: $(jq -r .Service.Service $f)" + done + # write service-defaults config entries for lambdas + for f in $(find workdir/${DC}/register -type f -name 'service_defaults_*.json'); do + varsub ${f} AWS_LAMBDA_REGION AWS_LAMBDA_ARN + retry_default curl -sL -XPUT -d @${f} "http://localhost:8500/v1/config" >/dev/null && \ + echo "Wrote config: $(jq -r '.Kind + " / " + .Name' $f)" + done +} + +function assert_lambda_envoy_dynamic_cluster_exists { + local HOSTPORT=$1 + local NAME_PREFIX=$2 + + local BODY=$(get_envoy_dynamic_cluster_once $HOSTPORT $NAME_PREFIX) + [ -n "$BODY" ] + + [ "$(echo $BODY | jq -r '.cluster.transport_socket.typed_config.sni')" == '*.amazonaws.com' ] +} + +function assert_lambda_envoy_dynamic_http_filter_exists { + local HOSTPORT=$1 + local NAME_PREFIX=$2 + local ARN=$3 + + local FILTER=$(get_lambda_envoy_http_filter $HOSTPORT $NAME_PREFIX) + [ -n "$FILTER" ] + + [ "$(echo $FILTER | jq -r '.arn')" == "$ARN" ] +} + +function varsub { + local file=$1 ; shift + for v in "$@"; do + sed -i "s/\${$v}/${!v}/g" $file + done +} diff --git a/test/integration/connect/envoy/run-tests.sh b/test/integration/connect/envoy/run-tests.sh index fc885f9a1..cc678b503 100755 --- a/test/integration/connect/envoy/run-tests.sh +++ b/test/integration/connect/envoy/run-tests.sh @@ -43,6 +43,20 @@ function network_snippet { echo "--net container:envoy_consul-${DC}_1" } +function aws_snippet { + local snippet="" + + # The Lambda integration cases assume that a Lambda function exists in $AWS_REGION with an ARN of $AWS_LAMBDA_ARN. + # The AWS credentials must have permission to invoke the Lambda function. + [ -n "$(set | grep '^AWS_ACCESS_KEY_ID=')" ] && snippet="${snippet} -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" + [ -n "$(set | grep '^AWS_SECRET_ACCESS_KEY=')" ] && snippet="${snippet} -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" + [ -n "$(set | grep '^AWS_SESSION_TOKEN=')" ] && snippet="${snippet} -e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN" + [ -n "$(set | grep '^AWS_LAMBDA_REGION=')" ] && snippet="${snippet} -e AWS_LAMBDA_REGION=$AWS_LAMBDA_REGION" + [ -n "$(set | grep '^AWS_LAMBDA_ARN=')" ] && snippet="${snippet} -e AWS_LAMBDA_ARN=$AWS_LAMBDA_ARN" + + echo "$snippet" +} + function init_workdir { local CLUSTER="$1" @@ -333,6 +347,7 @@ function verify { $WORKDIR_SNIPPET \ --pid=host \ $(network_snippet $CLUSTER) \ + $(aws_snippet) \ bats-verify \ --pretty /workdir/${CLUSTER}/bats ; then echogreen "✓ PASS" @@ -679,6 +694,7 @@ function common_run_container_sidecar_proxy { docker run -d --name $(container_name_prev) \ $WORKDIR_SNIPPET \ $(network_snippet $CLUSTER) \ + $(aws_snippet) \ "${HASHICORP_DOCKER_PROXY}/envoyproxy/envoy:v${ENVOY_VERSION}" \ envoy \ -c /workdir/${CLUSTER}/envoy/${service}-bootstrap.json \ @@ -765,6 +781,7 @@ function common_run_container_gateway { docker run -d --name $(container_name_prev) \ $WORKDIR_SNIPPET \ $(network_snippet $DC) \ + $(aws_snippet) \ "${HASHICORP_DOCKER_PROXY}/envoyproxy/envoy:v${ENVOY_VERSION}" \ envoy \ -c /workdir/${DC}/envoy/${name}-bootstrap.json \ From ba7f3fbebcd2a0f6c2edd5a59472a8c2265129d4 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Thu, 21 Jul 2022 10:03:27 -0700 Subject: [PATCH 058/107] peering: Add heartbeating to peering streams (#13806) * Add heartbeating to peering streams --- .../services/peerstream/server.go | 19 ++ .../services/peerstream/stream_resources.go | 57 ++++++ .../services/peerstream/stream_test.go | 192 +++++++++++++++++- 3 files changed, 267 insertions(+), 1 deletion(-) diff --git a/agent/grpc-external/services/peerstream/server.go b/agent/grpc-external/services/peerstream/server.go index a71c30d31..96694d63e 100644 --- a/agent/grpc-external/services/peerstream/server.go +++ b/agent/grpc-external/services/peerstream/server.go @@ -1,6 +1,8 @@ package peerstream import ( + "time" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "google.golang.org/grpc" @@ -17,6 +19,11 @@ import ( // TODO(peering): fix up these interfaces to be more testable now that they are // extracted from private peering +const ( + defaultOutgoingHeartbeatInterval = 15 * time.Second + defaultIncomingHeartbeatTimeout = 2 * time.Minute +) + type Server struct { Config } @@ -30,6 +37,12 @@ type Config struct { // Datacenter of the Consul server this gRPC server is hosted on Datacenter string ConnectEnabled bool + + // outgoingHeartbeatInterval is how often we send a heartbeat. + outgoingHeartbeatInterval time.Duration + + // incomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection. + incomingHeartbeatTimeout time.Duration } //go:generate mockery --name ACLResolver --inpackage @@ -46,6 +59,12 @@ func NewServer(cfg Config) *Server { if cfg.Datacenter == "" { panic("Datacenter is required") } + if cfg.outgoingHeartbeatInterval == 0 { + cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval + } + if cfg.incomingHeartbeatTimeout == 0 { + cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout + } return &Server{ Config: cfg, } diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index 5c69d08a7..3d10cdfa0 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -6,6 +6,7 @@ import ( "io" "strings" "sync" + "time" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" @@ -266,6 +267,40 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { } }() + // Heartbeat sender. + go func() { + tick := time.NewTicker(s.outgoingHeartbeatInterval) + defer tick.Stop() + + for { + select { + case <-streamReq.Stream.Context().Done(): + return + + case <-tick.C: + } + + heartbeat := &pbpeerstream.ReplicationMessage{ + Payload: &pbpeerstream.ReplicationMessage_Heartbeat_{ + Heartbeat: &pbpeerstream.ReplicationMessage_Heartbeat{}, + }, + } + if err := streamSend(heartbeat); err != nil { + logger.Warn("error sending heartbeat", "err", err) + } + } + }() + + // incomingHeartbeatCtx will complete if incoming heartbeats time out. + incomingHeartbeatCtx, incomingHeartbeatCtxCancel := + context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout) + // NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're + // re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned + // value, not the current value. + defer func() { + incomingHeartbeatCtxCancel() + }() + for { select { // When the doneCh is closed that means that the peering was deleted locally. @@ -278,6 +313,9 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { }, } if err := streamSend(term); err != nil { + // Nolint directive needed due to bug in govet that doesn't see that the cancel + // func of the incomingHeartbeatTimer _does_ get called. + //nolint:govet return fmt.Errorf("failed to send to stream: %v", err) } @@ -286,6 +324,11 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { return nil + // We haven't received a heartbeat within the expected interval. Kill the stream. + case <-incomingHeartbeatCtx.Done(): + logger.Error("ending stream due to heartbeat timeout") + return fmt.Errorf("heartbeat timeout") + case msg, open := <-recvChan: if !open { // The only time we expect the stream to end is when we've received a "Terminated" message. @@ -431,6 +474,20 @@ func (s *Server) HandleStream(streamReq HandleStreamRequest) error { return nil } + if msg.GetHeartbeat() != nil { + // Reset the heartbeat timeout by creating a new context. + // We first must cancel the old context so there's no leaks. This is safe to do because we're only + // reading that context within this for{} loop, and so we won't accidentally trigger the heartbeat + // timeout. + incomingHeartbeatCtxCancel() + // NOTE: IDEs and govet think that the reassigned cancel below never gets + // called, but it does by the defer when the heartbeat ctx is first created. + // They just can't trace the execution properly for some reason (possibly golang/go#29587). + //nolint:govet + incomingHeartbeatCtx, incomingHeartbeatCtxCancel = + context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout) + } + case update := <-subCh: var resp *pbpeerstream.ReplicationMessage_Response switch { diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index d5f9e2c36..41c24dc1b 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -272,7 +272,6 @@ func TestStreamResources_Server_FirstRequest(t *testing.T) { run(t, tc) }) } - } func TestStreamResources_Server_Terminate(t *testing.T) { @@ -869,6 +868,197 @@ func TestStreamResources_Server_CARootUpdates(t *testing.T) { }) } +// Test that when the client doesn't send a heartbeat in time, the stream is terminated. +func TestStreamResources_Server_TerminatesOnHeartbeatTimeout(t *testing.T) { + it := incrementalTime{ + base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + + srv, store := newTestServer(t, func(c *Config) { + c.Tracker.SetClock(it.Now) + c.incomingHeartbeatTimeout = 5 * time.Millisecond + }) + + p := writePeeringToBeDialed(t, store, 1, "my-peer") + require.Empty(t, p.PeerID, "should be empty if being dialed") + peerID := p.ID + + // Set the initial roots and CA configuration. + _, _ = writeInitialRootsAndCA(t, store) + + client := makeClient(t, srv, peerID) + + // TODO(peering): test fails if we don't drain the stream with this call because the + // server gets blocked sending the termination message. Figure out a way to let + // messages queue and filter replication messages. + receiveRoots, err := client.Recv() + require.NoError(t, err) + require.NotNil(t, receiveRoots.GetResponse()) + require.Equal(t, pbpeerstream.TypeURLPeeringTrustBundle, receiveRoots.GetResponse().ResourceURL) + + testutil.RunStep(t, "new stream gets tracked", func(t *testing.T) { + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.True(r, status.Connected) + }) + }) + + testutil.RunStep(t, "stream is disconnected due to heartbeat timeout", func(t *testing.T) { + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.False(r, status.Connected) + }) + }) +} + +// Test that the server sends heartbeats at the expected interval. +func TestStreamResources_Server_SendsHeartbeats(t *testing.T) { + it := incrementalTime{ + base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + outgoingHeartbeatInterval := 5 * time.Millisecond + + srv, store := newTestServer(t, func(c *Config) { + c.Tracker.SetClock(it.Now) + c.outgoingHeartbeatInterval = outgoingHeartbeatInterval + }) + + p := writePeeringToBeDialed(t, store, 1, "my-peer") + require.Empty(t, p.PeerID, "should be empty if being dialed") + peerID := p.ID + + // Set the initial roots and CA configuration. + _, _ = writeInitialRootsAndCA(t, store) + + client := makeClient(t, srv, peerID) + + // TODO(peering): test fails if we don't drain the stream with this call because the + // server gets blocked sending the termination message. Figure out a way to let + // messages queue and filter replication messages. + receiveRoots, err := client.Recv() + require.NoError(t, err) + require.NotNil(t, receiveRoots.GetResponse()) + require.Equal(t, pbpeerstream.TypeURLPeeringTrustBundle, receiveRoots.GetResponse().ResourceURL) + + testutil.RunStep(t, "new stream gets tracked", func(t *testing.T) { + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.True(r, status.Connected) + }) + }) + + testutil.RunStep(t, "sends first heartbeat", func(t *testing.T) { + retry.RunWith(&retry.Timer{ + Timeout: outgoingHeartbeatInterval * 2, + Wait: outgoingHeartbeatInterval / 2, + }, t, func(r *retry.R) { + heartbeat, err := client.Recv() + require.NoError(t, err) + require.NotNil(t, heartbeat.GetHeartbeat()) + }) + }) + + testutil.RunStep(t, "sends second heartbeat", func(t *testing.T) { + retry.RunWith(&retry.Timer{ + Timeout: outgoingHeartbeatInterval * 2, + Wait: outgoingHeartbeatInterval / 2, + }, t, func(r *retry.R) { + heartbeat, err := client.Recv() + require.NoError(t, err) + require.NotNil(t, heartbeat.GetHeartbeat()) + }) + }) +} + +// Test that as long as the server receives heartbeats it keeps the connection open. +func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) { + it := incrementalTime{ + base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + incomingHeartbeatTimeout := 10 * time.Millisecond + + srv, store := newTestServer(t, func(c *Config) { + c.Tracker.SetClock(it.Now) + c.incomingHeartbeatTimeout = incomingHeartbeatTimeout + }) + + p := writePeeringToBeDialed(t, store, 1, "my-peer") + require.Empty(t, p.PeerID, "should be empty if being dialed") + peerID := p.ID + + // Set the initial roots and CA configuration. + _, _ = writeInitialRootsAndCA(t, store) + + client := makeClient(t, srv, peerID) + + // TODO(peering): test fails if we don't drain the stream with this call because the + // server gets blocked sending the termination message. Figure out a way to let + // messages queue and filter replication messages. + receiveRoots, err := client.Recv() + require.NoError(t, err) + require.NotNil(t, receiveRoots.GetResponse()) + require.Equal(t, pbpeerstream.TypeURLPeeringTrustBundle, receiveRoots.GetResponse().ResourceURL) + + testutil.RunStep(t, "new stream gets tracked", func(t *testing.T) { + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.True(r, status.Connected) + }) + }) + + heartbeatMsg := &pbpeerstream.ReplicationMessage{ + Payload: &pbpeerstream.ReplicationMessage_Heartbeat_{ + Heartbeat: &pbpeerstream.ReplicationMessage_Heartbeat{}}} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // errCh is used to collect any send errors from within the goroutine. + errCh := make(chan error) + + // Set up a goroutine to send the heartbeat every 1/2 of the timeout. + go func() { + // This is just a do while loop. We want to send the heartbeat right away to start + // because the test setup above takes some time and we might be close to the heartbeat + // timeout already. + for { + err := client.Send(heartbeatMsg) + if err != nil { + select { + case errCh <- err: + case <-ctx.Done(): + } + return + } + select { + case <-time.After(incomingHeartbeatTimeout / 2): + case <-ctx.Done(): + close(errCh) + return + } + } + }() + + // Assert that the stream remains connected for 5 heartbeat timeouts. + require.Never(t, func() bool { + status, ok := srv.StreamStatus(peerID) + if !ok { + return true + } + return !status.Connected + }, incomingHeartbeatTimeout*5, incomingHeartbeatTimeout) + + // Kill the heartbeat sending goroutine and check if it had any errors. + cancel() + err, ok := <-errCh + if ok { + require.NoError(t, err) + } +} + // makeClient sets up a *MockClient with the initial subscription // message handshake. func makeClient(t *testing.T, srv pbpeerstream.PeerStreamServiceServer, peerID string) *MockClient { From b847f656a8cc066667701bbfdf41afdc4ad80bfd Mon Sep 17 00:00:00 2001 From: acpana <8968914+acpana@users.noreply.github.com> Date: Thu, 21 Jul 2022 10:51:05 -0700 Subject: [PATCH 059/107] Rename peering internal to ~ sync ENT to 5679392c81 Signed-off-by: acpana <8968914+acpana@users.noreply.github.com> --- agent/consul/state/catalog_oss_test.go | 80 +++++++++++++------------- agent/structs/structs.go | 2 +- 2 files changed, 41 insertions(+), 41 deletions(-) diff --git a/agent/consul/state/catalog_oss_test.go b/agent/consul/state/catalog_oss_test.go index 2603e85b7..0fa912973 100644 --- a/agent/consul/state/catalog_oss_test.go +++ b/agent/consul/state/catalog_oss_test.go @@ -34,11 +34,11 @@ func testIndexerTableChecks() map[string]indexerTestCase { Node: "NoDe", CheckID: "CheckId", }, - expected: []byte("internal\x00node\x00checkid\x00"), + expected: []byte("~\x00node\x00checkid\x00"), }, write: indexValue{ source: obj, - expected: []byte("internal\x00node\x00checkid\x00"), + expected: []byte("~\x00node\x00checkid\x00"), }, prefix: []indexValue{ { @@ -47,7 +47,7 @@ func testIndexerTableChecks() map[string]indexerTestCase { }, { source: Query{Value: "nOdE"}, - expected: []byte("internal\x00node\x00"), + expected: []byte("~\x00node\x00"), }, }, extra: []indexerTestCase{ @@ -77,11 +77,11 @@ func testIndexerTableChecks() map[string]indexerTestCase { indexStatus: { read: indexValue{ source: Query{Value: "PASSING"}, - expected: []byte("internal\x00passing\x00"), + expected: []byte("~\x00passing\x00"), }, write: indexValue{ source: obj, - expected: []byte("internal\x00passing\x00"), + expected: []byte("~\x00passing\x00"), }, extra: []indexerTestCase{ { @@ -99,11 +99,11 @@ func testIndexerTableChecks() map[string]indexerTestCase { indexService: { read: indexValue{ source: Query{Value: "ServiceName"}, - expected: []byte("internal\x00servicename\x00"), + expected: []byte("~\x00servicename\x00"), }, write: indexValue{ source: obj, - expected: []byte("internal\x00servicename\x00"), + expected: []byte("~\x00servicename\x00"), }, extra: []indexerTestCase{ { @@ -124,11 +124,11 @@ func testIndexerTableChecks() map[string]indexerTestCase { Node: "NoDe", Service: "SeRvIcE", }, - expected: []byte("internal\x00node\x00service\x00"), + expected: []byte("~\x00node\x00service\x00"), }, write: indexValue{ source: obj, - expected: []byte("internal\x00node\x00service\x00"), + expected: []byte("~\x00node\x00service\x00"), }, extra: []indexerTestCase{ { @@ -152,11 +152,11 @@ func testIndexerTableChecks() map[string]indexerTestCase { source: Query{ Value: "NoDe", }, - expected: []byte("internal\x00node\x00"), + expected: []byte("~\x00node\x00"), }, write: indexValue{ source: obj, - expected: []byte("internal\x00node\x00"), + expected: []byte("~\x00node\x00"), }, extra: []indexerTestCase{ { @@ -272,11 +272,11 @@ func testIndexerTableNodes() map[string]indexerTestCase { indexID: { read: indexValue{ source: Query{Value: "NoDeId"}, - expected: []byte("internal\x00nodeid\x00"), + expected: []byte("~\x00nodeid\x00"), }, write: indexValue{ source: &structs.Node{Node: "NoDeId"}, - expected: []byte("internal\x00nodeid\x00"), + expected: []byte("~\x00nodeid\x00"), }, prefix: []indexValue{ { @@ -289,11 +289,11 @@ func testIndexerTableNodes() map[string]indexerTestCase { }, { source: Query{Value: "NoDeId"}, - expected: []byte("internal\x00nodeid\x00"), + expected: []byte("~\x00nodeid\x00"), }, { source: Query{}, - expected: []byte("internal\x00"), + expected: []byte("~\x00"), }, }, extra: []indexerTestCase{ @@ -322,27 +322,27 @@ func testIndexerTableNodes() map[string]indexerTestCase { indexUUID: { read: indexValue{ source: Query{Value: uuid}, - expected: append([]byte("internal\x00"), uuidBuf...), + expected: append([]byte("~\x00"), uuidBuf...), }, write: indexValue{ source: &structs.Node{ ID: types.NodeID(uuid), Node: "NoDeId", }, - expected: append([]byte("internal\x00"), uuidBuf...), + expected: append([]byte("~\x00"), uuidBuf...), }, prefix: []indexValue{ { // partial length source: Query{Value: uuid[:6]}, - expected: append([]byte("internal\x00"), uuidBuf[:3]...), + expected: append([]byte("~\x00"), uuidBuf[:3]...), }, { // full length source: Query{Value: uuid}, - expected: append([]byte("internal\x00"), uuidBuf...), + expected: append([]byte("~\x00"), uuidBuf...), }, { source: Query{}, - expected: []byte("internal\x00"), + expected: []byte("~\x00"), }, }, extra: []indexerTestCase{ @@ -382,7 +382,7 @@ func testIndexerTableNodes() map[string]indexerTestCase { Key: "KeY", Value: "VaLuE", }, - expected: []byte("internal\x00KeY\x00VaLuE\x00"), + expected: []byte("~\x00KeY\x00VaLuE\x00"), }, writeMulti: indexValueMulti{ source: &structs.Node{ @@ -393,8 +393,8 @@ func testIndexerTableNodes() map[string]indexerTestCase { }, }, expected: [][]byte{ - []byte("internal\x00MaP-kEy-1\x00mAp-VaL-1\x00"), - []byte("internal\x00mAp-KeY-2\x00MaP-vAl-2\x00"), + []byte("~\x00MaP-kEy-1\x00mAp-VaL-1\x00"), + []byte("~\x00mAp-KeY-2\x00MaP-vAl-2\x00"), }, }, extra: []indexerTestCase{ @@ -449,11 +449,11 @@ func testIndexerTableServices() map[string]indexerTestCase { Node: "NoDeId", Service: "SeRvIcE", }, - expected: []byte("internal\x00nodeid\x00service\x00"), + expected: []byte("~\x00nodeid\x00service\x00"), }, write: indexValue{ source: obj, - expected: []byte("internal\x00nodeid\x00service\x00"), + expected: []byte("~\x00nodeid\x00service\x00"), }, prefix: []indexValue{ { @@ -466,11 +466,11 @@ func testIndexerTableServices() map[string]indexerTestCase { }, { source: Query{}, - expected: []byte("internal\x00"), + expected: []byte("~\x00"), }, { source: Query{Value: "NoDeId"}, - expected: []byte("internal\x00nodeid\x00"), + expected: []byte("~\x00nodeid\x00"), }, }, extra: []indexerTestCase{ @@ -505,11 +505,11 @@ func testIndexerTableServices() map[string]indexerTestCase { source: Query{ Value: "NoDeId", }, - expected: []byte("internal\x00nodeid\x00"), + expected: []byte("~\x00nodeid\x00"), }, write: indexValue{ source: obj, - expected: []byte("internal\x00nodeid\x00"), + expected: []byte("~\x00nodeid\x00"), }, extra: []indexerTestCase{ { @@ -530,11 +530,11 @@ func testIndexerTableServices() map[string]indexerTestCase { indexService: { read: indexValue{ source: Query{Value: "ServiceName"}, - expected: []byte("internal\x00servicename\x00"), + expected: []byte("~\x00servicename\x00"), }, write: indexValue{ source: obj, - expected: []byte("internal\x00servicename\x00"), + expected: []byte("~\x00servicename\x00"), }, extra: []indexerTestCase{ { @@ -552,14 +552,14 @@ func testIndexerTableServices() map[string]indexerTestCase { indexConnect: { read: indexValue{ source: Query{Value: "ConnectName"}, - expected: []byte("internal\x00connectname\x00"), + expected: []byte("~\x00connectname\x00"), }, write: indexValue{ source: &structs.ServiceNode{ ServiceName: "ConnectName", ServiceConnect: structs.ServiceConnect{Native: true}, }, - expected: []byte("internal\x00connectname\x00"), + expected: []byte("~\x00connectname\x00"), }, extra: []indexerTestCase{ { @@ -571,7 +571,7 @@ func testIndexerTableServices() map[string]indexerTestCase { DestinationServiceName: "ConnectName", }, }, - expected: []byte("internal\x00connectname\x00"), + expected: []byte("~\x00connectname\x00"), }, }, { @@ -621,13 +621,13 @@ func testIndexerTableServices() map[string]indexerTestCase { indexKind: { read: indexValue{ source: Query{Value: "connect-proxy"}, - expected: []byte("internal\x00connect-proxy\x00"), + expected: []byte("~\x00connect-proxy\x00"), }, write: indexValue{ source: &structs.ServiceNode{ ServiceKind: structs.ServiceKindConnectProxy, }, - expected: []byte("internal\x00connect-proxy\x00"), + expected: []byte("~\x00connect-proxy\x00"), }, extra: []indexerTestCase{ { @@ -636,7 +636,7 @@ func testIndexerTableServices() map[string]indexerTestCase { ServiceName: "ServiceName", ServiceKind: structs.ServiceKindTypical, }, - expected: []byte("internal\x00\x00"), + expected: []byte("~\x00\x00"), }, }, { @@ -694,18 +694,18 @@ func testIndexerTableServiceVirtualIPs() map[string]indexerTestCase { Name: "foo", }, }, - expected: []byte("internal\x00foo\x00"), + expected: []byte("~\x00foo\x00"), }, write: indexValue{ source: obj, - expected: []byte("internal\x00foo\x00"), + expected: []byte("~\x00foo\x00"), }, prefix: []indexValue{ { source: Query{ Value: "foo", }, - expected: []byte("internal\x00foo\x00"), + expected: []byte("~\x00foo\x00"), }, { source: Query{ diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 275bf4c18..afc913385 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -88,7 +88,7 @@ const ( const ( // LocalPeerKeyword is a reserved keyword used for indexing in the state store for objects in the local peer. - LocalPeerKeyword = "internal" + LocalPeerKeyword = "~" // DefaultPeerKeyword is the PeerName to use to refer to the local // cluster's own data, rather than replicated peered data. From cbafabde1641ab245f841498d5a81d9572990968 Mon Sep 17 00:00:00 2001 From: Nitya Dhanushkodi Date: Thu, 21 Jul 2022 14:56:11 -0700 Subject: [PATCH 060/107] update generate token endpoint to take external addresses (#13844) Update generate token endpoint (rpc, http, and api module) If ServerExternalAddresses are set, it will override any addresses gotten from the "consul" service, and be used in the token instead, and dialed by the dialer. This allows for setting up a load balancer for example, in front of the consul servers. --- agent/peering_endpoint_test.go | 33 ++++ agent/rpc/peering/service.go | 12 +- agent/rpc/peering/service_test.go | 34 +++++ api/go.mod | 23 ++- api/go.sum | 137 ++++++++++++++--- api/peering.go | 6 +- api/peering_test.go | 30 ++++ go.sum | 2 +- proto/pbpeering/peering.gen.go | 2 + proto/pbpeering/peering.pb.go | 246 ++++++++++++++++-------------- proto/pbpeering/peering.proto | 5 + 11 files changed, 381 insertions(+), 149 deletions(-) diff --git a/agent/peering_endpoint_test.go b/agent/peering_endpoint_test.go index 545e4f5ec..0b82e6399 100644 --- a/agent/peering_endpoint_test.go +++ b/agent/peering_endpoint_test.go @@ -113,6 +113,39 @@ func TestHTTP_Peering_GenerateToken(t *testing.T) { // The PeerID in the token is randomly generated so we don't assert on its value. require.NotEmpty(t, token.PeerID) }) + + t.Run("Success with external address", func(t *testing.T) { + externalAddress := "32.1.2.3" + body := &pbpeering.GenerateTokenRequest{ + PeerName: "peering-a", + ServerExternalAddresses: []string{externalAddress}, + } + + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + + req, err := http.NewRequest("POST", "/v1/peering/token", bytes.NewReader(bodyBytes)) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code, "expected 200, got %d: %v", resp.Code, resp.Body.String()) + + var r pbpeering.GenerateTokenResponse + require.NoError(t, json.NewDecoder(resp.Body).Decode(&r)) + + tokenJSON, err := base64.StdEncoding.DecodeString(r.PeeringToken) + require.NoError(t, err) + + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + require.Nil(t, token.CA) + require.Equal(t, []string{externalAddress}, token.ServerAddresses) + require.Equal(t, "server.dc1.consul", token.ServerName) + + // The PeerID in the token is randomly generated so we don't assert on its value. + require.NotEmpty(t, token.PeerID) + }) } func TestHTTP_Peering_Establish(t *testing.T) { diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 4b7d051bc..1d0d219f6 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -176,9 +176,15 @@ func (s *Server) GenerateToken( return nil, err } - serverAddrs, err := s.Backend.GetServerAddresses() - if err != nil { - return nil, err + // ServerExternalAddresses must be formatted as addr:port. + var serverAddrs []string + if len(req.ServerExternalAddresses) > 0 { + serverAddrs = req.ServerExternalAddresses + } else { + serverAddrs, err = s.Backend.GetServerAddresses() + if err != nil { + return nil, err + } } canRetry := true diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index e4ab2947a..939a304d2 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -108,6 +108,40 @@ func TestPeeringService_GenerateToken(t *testing.T) { require.Equal(t, expect, peers[0]) } +func TestPeeringService_GenerateTokenExternalAddress(t *testing.T) { + dir := testutil.TempDir(t, "consul") + signer, _, _ := tlsutil.GeneratePrivateKey() + ca, _, _ := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) + cafile := path.Join(dir, "cacert.pem") + require.NoError(t, ioutil.WriteFile(cafile, []byte(ca), 0600)) + + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(c *consul.Config) { + c.SerfLANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.1" + c.TLSConfig.GRPC.CAFile = cafile + c.DataDir = dir + }) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + externalAddress := "32.1.2.3:8502" + // happy path + req := pbpeering.GenerateTokenRequest{PeerName: "peerB", Datacenter: "dc1", Meta: map[string]string{"foo": "bar"}, ServerExternalAddresses: []string{externalAddress}} + resp, err := client.GenerateToken(ctx, &req) + require.NoError(t, err) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + token := &structs.PeeringToken{} + require.NoError(t, json.Unmarshal(tokenJSON, token)) + require.Equal(t, "server.dc1.consul", token.ServerName) + require.Len(t, token.ServerAddresses, 1) + require.Equal(t, externalAddress, token.ServerAddresses[0]) + require.Equal(t, []string{ca}, token.CA) +} + func TestPeeringService_Establish(t *testing.T) { validToken := peering.TestPeeringToken("83474a06-cca4-4ff4-99a4-4152929c8160") validTokenJSON, _ := json.Marshal(&validToken) diff --git a/api/go.mod b/api/go.mod index 5511d7d7e..f798021b5 100644 --- a/api/go.mod +++ b/api/go.mod @@ -5,13 +5,26 @@ go 1.12 replace github.com/hashicorp/consul/sdk => ../sdk require ( + github.com/armon/go-metrics v0.3.10 // indirect + github.com/google/btree v1.0.0 // indirect github.com/google/go-cmp v0.5.7 github.com/hashicorp/consul/sdk v0.10.0 github.com/hashicorp/go-cleanhttp v0.5.1 - github.com/hashicorp/go-hclog v0.12.0 + github.com/hashicorp/go-hclog v0.14.1 + github.com/hashicorp/go-immutable-radix v1.3.0 // indirect + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 - github.com/hashicorp/go-uuid v1.0.1 - github.com/hashicorp/serf v0.9.6 - github.com/mitchellh/mapstructure v1.1.2 - github.com/stretchr/testify v1.4.0 + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.2 + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/memberlist v0.3.1 // indirect + github.com/hashicorp/serf v0.9.7 + github.com/kr/pretty v0.2.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mitchellh/mapstructure v1.4.1 + github.com/pkg/errors v0.9.1 // indirect + github.com/stretchr/testify v1.7.0 + golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect ) diff --git a/api/go.sum b/api/go.sum index ebf25c8c8..c47deef05 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,53 +1,97 @@ +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA= github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= +github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -58,54 +102,95 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 h1:4qWs8cYYH6PoEFy4dfhDFgoMGkwAcETd+MmPdCPMzUc= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -116,11 +201,17 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/peering.go b/api/peering.go index b880380de..bd24d9849 100644 --- a/api/peering.go +++ b/api/peering.go @@ -83,8 +83,12 @@ type PeeringGenerateTokenRequest struct { Partition string `json:",omitempty"` Datacenter string `json:",omitempty"` Token string `json:",omitempty"` - // Meta is a mapping of some string value to any other string value + // Meta is a mapping of some string value to any other string value. Meta map[string]string `json:",omitempty"` + // ServerExternalAddresses is a list of addresses to put into the generated token. This could be used to specify + // load balancer(s) or external IPs to reach the servers from the dialing side, and will override any server + // addresses obtained from the "consul" service. + ServerExternalAddresses []string `json:",omitempty"` } type PeeringGenerateTokenResponse struct { diff --git a/api/peering_test.go b/api/peering_test.go index fcd7c5b3c..8a42a454b 100644 --- a/api/peering_test.go +++ b/api/peering_test.go @@ -2,6 +2,7 @@ package api import ( "context" + "encoding/base64" "reflect" "testing" "time" @@ -133,6 +134,35 @@ func TestAPI_Peering_GenerateToken(t *testing.T) { }) } +func TestAPI_Peering_GenerateToken_ExternalAddresses(t *testing.T) { + t.Parallel() + + c, s := makeClient(t) // this is "dc1" + defer s.Stop() + s.WaitForSerfCheck(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + externalAddress := "32.1.2.3:8502" + + // Generate a token happy path + p1 := PeeringGenerateTokenRequest{ + PeerName: "peer1", + Meta: map[string]string{"foo": "bar"}, + ServerExternalAddresses: []string{externalAddress}, + } + resp, wm, err := c.Peerings().GenerateToken(ctx, p1, nil) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotNil(t, resp) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + require.Contains(t, string(tokenJSON), externalAddress) +} + // TODO(peering): cover the following test cases: bad/ malformed input, peering with wrong token, // peering with the wrong PeerName diff --git a/go.sum b/go.sum index 5e859cc7a..8f2afaa45 100644 --- a/go.sum +++ b/go.sum @@ -379,7 +379,7 @@ github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 h1:Ye8SofeDH github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42/go.mod h1:wcXL8otVu5cpJVLjcmq7pmfdRCdaP+xnvu7WQcKJAhs= github.com/hashicorp/raft-boltdb/v2 v2.2.2 h1:rlkPtOllgIcKLxVT4nutqlTH2NRFn+tO1wwZk/4Dxqw= github.com/hashicorp/raft-boltdb/v2 v2.2.2/go.mod h1:N8YgaZgNJLpZC+h+by7vDu5rzsRgONThTEeUS3zWbfY= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.8 h1:JGklO/2Drf1QGa312EieQN3zhxQ+aJg6pG+aC3MFaVo= github.com/hashicorp/serf v0.9.8/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 h1:OKsyxKi2sNmqm1Gv93adf2AID2FOBFdCbbZn9fGtIdg= diff --git a/proto/pbpeering/peering.gen.go b/proto/pbpeering/peering.gen.go index 3f3b400b4..b7afc6a4c 100644 --- a/proto/pbpeering/peering.gen.go +++ b/proto/pbpeering/peering.gen.go @@ -45,6 +45,7 @@ func GenerateTokenRequestToAPI(s *GenerateTokenRequest, t *api.PeeringGenerateTo t.Datacenter = s.Datacenter t.Token = s.Token t.Meta = s.Meta + t.ServerExternalAddresses = s.ServerExternalAddresses } func GenerateTokenRequestFromAPI(t *api.PeeringGenerateTokenRequest, s *GenerateTokenRequest) { if s == nil { @@ -55,6 +56,7 @@ func GenerateTokenRequestFromAPI(t *api.PeeringGenerateTokenRequest, s *Generate s.Datacenter = t.Datacenter s.Token = t.Token s.Meta = t.Meta + s.ServerExternalAddresses = t.ServerExternalAddresses } func GenerateTokenResponseToAPI(s *GenerateTokenResponse, t *api.PeeringGenerateTokenResponse) { if s == nil { diff --git a/proto/pbpeering/peering.pb.go b/proto/pbpeering/peering.pb.go index 8fc14b83e..a5e0f2cb5 100644 --- a/proto/pbpeering/peering.pb.go +++ b/proto/pbpeering/peering.pb.go @@ -1362,6 +1362,10 @@ type GenerateTokenRequest struct { Token string `protobuf:"bytes,4,opt,name=Token,proto3" json:"Token,omitempty"` // Meta is a mapping of some string value to any other string value Meta map[string]string `protobuf:"bytes,5,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ServerExternalAddresses is a list of addresses to put into the generated token. This could be used to specify + // load balancer(s) or external IPs to reach the servers from the dialing side, and will override any server + // addresses obtained from the "consul" service. + ServerExternalAddresses []string `protobuf:"bytes,6,rep,name=ServerExternalAddresses,proto3" json:"ServerExternalAddresses,omitempty"` } func (x *GenerateTokenRequest) Reset() { @@ -1431,6 +1435,13 @@ func (x *GenerateTokenRequest) GetMeta() map[string]string { return nil } +func (x *GenerateTokenRequest) GetServerExternalAddresses() []string { + if x != nil { + return x.ServerExternalAddresses + } + return nil +} + // mog annotation: // // target=github.com/hashicorp/consul/api.PeeringGenerateTokenResponse @@ -1807,7 +1818,7 @@ var file_proto_pbpeering_peering_proto_rawDesc = []byte{ 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x22, 0x0a, 0x20, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x96, 0x02, 0x0a, 0x14, 0x47, 0x65, 0x6e, 0x65, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd0, 0x02, 0x0a, 0x14, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, @@ -1821,128 +1832,131 @@ var file_proto_pbpeering_peering_proto_rawDesc = []byte{ 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x3b, 0x0a, 0x15, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, - 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb2, 0x02, - 0x0a, 0x10, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x22, - 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, - 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x51, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, - 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, - 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x13, 0x0a, 0x11, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x73, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, - 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, - 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x53, 0x54, 0x41, 0x42, 0x4c, 0x49, 0x53, 0x48, - 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, - 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x0c, - 0x0a, 0x08, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, - 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x45, 0x44, 0x10, 0x06, 0x32, 0xc0, 0x08, 0x0a, - 0x0e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x82, 0x01, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a, 0x09, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, - 0x68, 0x12, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, - 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, - 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x0b, - 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x12, 0x35, 0x2e, 0x68, 0x61, + 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3b, 0x0a, 0x15, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb2, 0x02, 0x0a, 0x10, 0x45, 0x73, 0x74, 0x61, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, + 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x51, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, + 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x13, 0x0a, 0x11, + 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2a, 0x73, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, + 0x0c, 0x45, 0x53, 0x54, 0x41, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, + 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x46, + 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x4c, 0x45, + 0x54, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, + 0x41, 0x54, 0x45, 0x44, 0x10, 0x06, 0x32, 0xc0, 0x08, 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x0d, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x0b, 0x50, 0x65, - 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, - 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, + 0x0a, 0x09, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x33, 0x2e, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x34, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x0d, 0x50, 0x65, 0x65, - 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x37, 0x2e, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, - 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, - 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7f, 0x0a, - 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x36, 0x2e, - 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x61, 0x64, 0x12, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xa3, - 0x01, 0x0a, 0x18, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, - 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x42, 0x2e, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, - 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, - 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, - 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x12, 0x39, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, - 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, + 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x68, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, - 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, - 0x8a, 0x02, 0x0a, 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, + 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, + 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x0d, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x12, 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7f, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xa3, 0x01, 0x0a, 0x18, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x42, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, - 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, - 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, 0x50, 0xaa, 0x02, 0x21, - 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0xca, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x50, 0x65, - 0x65, 0x72, 0x69, 0x6e, 0x67, 0xe2, 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x5c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, + 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x88, + 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, + 0x61, 0x64, 0x12, 0x39, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, + 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x8a, 0x02, 0x0a, 0x25, 0x63, 0x6f, + 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x42, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, 0x50, 0xaa, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xca, 0x02, 0x21, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xe2, + 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x50, + 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/pbpeering/peering.proto b/proto/pbpeering/peering.proto index 44df1df15..fc29d2458 100644 --- a/proto/pbpeering/peering.proto +++ b/proto/pbpeering/peering.proto @@ -272,6 +272,11 @@ message GenerateTokenRequest { // Meta is a mapping of some string value to any other string value map Meta = 5; + + // ServerExternalAddresses is a list of addresses to put into the generated token. This could be used to specify + // load balancer(s) or external IPs to reach the servers from the dialing side, and will override any server + // addresses obtained from the "consul" service. + repeated string ServerExternalAddresses = 6; } // mog annotation: From e044343105d1f206524d43b7a215182ce0927826 Mon Sep 17 00:00:00 2001 From: Eric Haberkorn Date: Fri, 22 Jul 2022 09:14:43 -0400 Subject: [PATCH 061/107] Add Cluster Peering Failover Support to Prepared Queries (#13835) Add peering failover support to prepared queries --- agent/consul/prepared_query/template_test.go | 4 +- agent/consul/prepared_query/walk_test.go | 2 +- agent/consul/prepared_query_endpoint.go | 81 +++-- agent/consul/prepared_query_endpoint_test.go | 324 +++++++++++++++---- agent/dns_test.go | 2 +- agent/prepared_query_endpoint_test.go | 4 +- agent/structs/prepared_query.go | 35 +- api/prepared_query.go | 18 +- 8 files changed, 369 insertions(+), 101 deletions(-) diff --git a/agent/consul/prepared_query/template_test.go b/agent/consul/prepared_query/template_test.go index 05cbc17da..3fbf2d5af 100644 --- a/agent/consul/prepared_query/template_test.go +++ b/agent/consul/prepared_query/template_test.go @@ -22,7 +22,7 @@ var ( }, Service: structs.ServiceQuery{ Service: "${name.full}", - Failover: structs.QueryDatacenterOptions{ + Failover: structs.QueryFailoverOptions{ Datacenters: []string{ "${name.full}", "${name.prefix}", @@ -69,7 +69,7 @@ var ( }, Service: structs.ServiceQuery{ Service: "${name.full}", - Failover: structs.QueryDatacenterOptions{ + Failover: structs.QueryFailoverOptions{ Datacenters: []string{ "dc1", "dc2", diff --git a/agent/consul/prepared_query/walk_test.go b/agent/consul/prepared_query/walk_test.go index e45aa3a1e..ad71e0fed 100644 --- a/agent/consul/prepared_query/walk_test.go +++ b/agent/consul/prepared_query/walk_test.go @@ -20,7 +20,7 @@ func TestWalk_ServiceQuery(t *testing.T) { service := &structs.ServiceQuery{ Service: "the-service", - Failover: structs.QueryDatacenterOptions{ + Failover: structs.QueryFailoverOptions{ Datacenters: []string{"dc1", "dc2"}, }, Near: "_agent", diff --git a/agent/consul/prepared_query_endpoint.go b/agent/consul/prepared_query_endpoint.go index fc0642b6f..7215161f3 100644 --- a/agent/consul/prepared_query_endpoint.go +++ b/agent/consul/prepared_query_endpoint.go @@ -187,11 +187,16 @@ func parseService(svc *structs.ServiceQuery) error { return fmt.Errorf("Must provide a Service name to query") } + failover := svc.Failover // NearestN can be 0 which means "don't fail over by RTT". - if svc.Failover.NearestN < 0 { + if failover.NearestN < 0 { return fmt.Errorf("Bad NearestN '%d', must be >= 0", svc.Failover.NearestN) } + if (failover.NearestN != 0 || len(failover.Datacenters) != 0) && len(failover.Targets) != 0 { + return fmt.Errorf("Targets cannot be populated with NearestN or Datacenters") + } + // Make sure the metadata filters are valid if err := structs.ValidateNodeMetadata(svc.NodeMeta, true); err != nil { return err @@ -462,7 +467,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, // and bail out. Otherwise, we fail over and try remote DCs, as allowed // by the query setup. if len(reply.Nodes) == 0 { - wrapper := &queryServerWrapper{p.srv} + wrapper := &queryServerWrapper{srv: p.srv, executeRemote: p.ExecuteRemote} if err := queryFailover(wrapper, query, args, reply); err != nil { return err } @@ -565,8 +570,13 @@ func (p *PreparedQuery) execute(query *structs.PreparedQuery, reply.Nodes = nodes reply.DNS = query.DNS - // Stamp the result for this datacenter. - reply.Datacenter = p.srv.config.Datacenter + // Stamp the result with its this datacenter or peer. + if peerName := query.Service.PeerName; peerName != "" { + reply.PeerName = peerName + reply.Datacenter = "" + } else { + reply.Datacenter = p.srv.config.Datacenter + } return nil } @@ -651,12 +661,24 @@ func serviceMetaFilter(filters map[string]string, nodes structs.CheckServiceNode type queryServer interface { GetLogger() hclog.Logger GetOtherDatacentersByDistance() ([]string, error) - ForwardDC(method, dc string, args interface{}, reply interface{}) error + GetLocalDC() string + ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error } // queryServerWrapper applies the queryServer interface to a Server. type queryServerWrapper struct { - srv *Server + srv *Server + executeRemote func(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error +} + +// GetLocalDC returns the name of the local datacenter. +func (q *queryServerWrapper) GetLocalDC() string { + return q.srv.config.Datacenter +} + +// ExecuteRemote calls ExecuteRemote on PreparedQuery. +func (q *queryServerWrapper) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + return q.executeRemote(args, reply) } // GetLogger returns the server's logger. @@ -683,11 +705,6 @@ func (q *queryServerWrapper) GetOtherDatacentersByDistance() ([]string, error) { return result, nil } -// ForwardDC calls into the server's RPC forwarder. -func (q *queryServerWrapper) ForwardDC(method, dc string, args interface{}, reply interface{}) error { - return q.srv.forwardDC(method, dc, args, reply) -} - // queryFailover runs an algorithm to determine which DCs to try and then calls // them to try to locate alternative services. func queryFailover(q queryServer, query *structs.PreparedQuery, @@ -709,7 +726,7 @@ func queryFailover(q queryServer, query *structs.PreparedQuery, // Build a candidate list of DCs to try, starting with the nearest N // from RTTs. - var dcs []string + var targets []structs.QueryFailoverTarget index := make(map[string]struct{}) if query.Service.Failover.NearestN > 0 { for i, dc := range nearest { @@ -717,30 +734,36 @@ func queryFailover(q queryServer, query *structs.PreparedQuery, break } - dcs = append(dcs, dc) + targets = append(targets, structs.QueryFailoverTarget{Datacenter: dc}) index[dc] = struct{}{} } } // Then add any DCs explicitly listed that weren't selected above. - for _, dc := range query.Service.Failover.Datacenters { + for _, target := range query.Service.Failover.AsTargets() { // This will prevent a log of other log spammage if we do not // attempt to talk to datacenters we don't know about. - if _, ok := known[dc]; !ok { - q.GetLogger().Debug("Skipping unknown datacenter in prepared query", "datacenter", dc) - continue + if dc := target.Datacenter; dc != "" { + if _, ok := known[dc]; !ok { + q.GetLogger().Debug("Skipping unknown datacenter in prepared query", "datacenter", dc) + continue + } + + // This will make sure we don't re-try something that fails + // from the NearestN list. + if _, ok := index[dc]; !ok { + targets = append(targets, target) + } } - // This will make sure we don't re-try something that fails - // from the NearestN list. - if _, ok := index[dc]; !ok { - dcs = append(dcs, dc) + if target.PeerName != "" { + targets = append(targets, target) } } // Now try the selected DCs in priority order. failovers := 0 - for _, dc := range dcs { + for _, target := range targets { // This keeps track of how many iterations we actually run. failovers++ @@ -752,7 +775,15 @@ func queryFailover(q queryServer, query *structs.PreparedQuery, // through this slice across successive RPC calls. reply.Nodes = nil - // Note that we pass along the limit since it can be applied + // Reset PeerName because it may have been set by a previous failover + // target. + query.Service.PeerName = target.PeerName + dc := target.Datacenter + if target.PeerName != "" { + dc = q.GetLocalDC() + } + + // Note that we pass along the limit since may be applied // remotely to save bandwidth. We also pass along the consistency // mode information and token we were given, so that applies to // the remote query as well. @@ -763,9 +794,11 @@ func queryFailover(q queryServer, query *structs.PreparedQuery, QueryOptions: args.QueryOptions, Connect: args.Connect, } - if err := q.ForwardDC("PreparedQuery.ExecuteRemote", dc, remote, reply); err != nil { + + if err = q.ExecuteRemote(remote, reply); err != nil { q.GetLogger().Warn("Failed querying for service in datacenter", "service", query.Service.Service, + "peerName", query.Service.PeerName, "datacenter", dc, "error", err, ) diff --git a/agent/consul/prepared_query_endpoint_test.go b/agent/consul/prepared_query_endpoint_test.go index 30de90fb2..4965a2a0d 100644 --- a/agent/consul/prepared_query_endpoint_test.go +++ b/agent/consul/prepared_query_endpoint_test.go @@ -2,6 +2,9 @@ package consul import ( "bytes" + "context" + "encoding/base64" + "encoding/json" "fmt" "os" "reflect" @@ -14,6 +17,7 @@ import ( "github.com/hashicorp/serf/coordinate" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/hashicorp/consul-net-rpc/net/rpc" @@ -23,6 +27,7 @@ import ( "github.com/hashicorp/consul/agent/structs/aclfilter" tokenStore "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/types" @@ -82,8 +87,25 @@ func TestPreparedQuery_Apply(t *testing.T) { t.Fatalf("bad: %v", err) } - // Fix that and make sure it propagates an error from the Raft apply. + // Fix that and ensure Targets and NearestN cannot be set at the same time. + query.Query.Service.Failover.NearestN = 1 + query.Query.Service.Failover.Targets = []structs.QueryFailoverTarget{{PeerName: "peer"}} + err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.Apply", &query, &reply) + if err == nil || !strings.Contains(err.Error(), "Targets cannot be populated with") { + t.Fatalf("bad: %v", err) + } + + // Fix that and ensure Targets and Datacenters cannot be set at the same time. query.Query.Service.Failover.NearestN = 0 + query.Query.Service.Failover.Datacenters = []string{"dc2"} + query.Query.Service.Failover.Targets = []structs.QueryFailoverTarget{{PeerName: "peer"}} + err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.Apply", &query, &reply) + if err == nil || !strings.Contains(err.Error(), "Targets cannot be populated with") { + t.Fatalf("bad: %v", err) + } + + // Fix that and make sure it propagates an error from the Raft apply. + query.Query.Service.Failover.Targets = nil query.Query.Session = "nope" err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.Apply", &query, &reply) if err == nil || !strings.Contains(err.Error(), "invalid session") { @@ -1442,6 +1464,17 @@ func TestPreparedQuery_Execute(t *testing.T) { s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig) + dir3, s3 := testServerWithConfig(t, func(c *Config) { + c.Datacenter = "dc3" + c.PrimaryDatacenter = "dc3" + c.NodeName = "acceptingServer.dc3" + }) + defer os.RemoveAll(dir3) + defer s3.Shutdown() + waitForLeaderEstablishment(t, s3) + codec3 := rpcClient(t, s3) + defer codec3.Close() + // Try to WAN join. joinWAN(t, s2, s1) retry.Run(t, func(r *retry.R) { @@ -1456,6 +1489,70 @@ func TestPreparedQuery_Execute(t *testing.T) { // check for RPC forwarding testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) testrpc.WaitForLeader(t, s1.RPC, "dc2", testrpc.WithToken("root")) + testrpc.WaitForLeader(t, s3.RPC, "dc3") + + acceptingPeerName := "my-peer-accepting-server" + dialingPeerName := "my-peer-dialing-server" + + // Set up peering between dc1 (dailing) and dc3 (accepting) and export the foo service + { + // Create a peering by generating a token. + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err := grpc.DialContext(ctx, s3.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(s3.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + req := pbpeering.GenerateTokenRequest{ + PeerName: dialingPeerName, + } + resp, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + p := &pbpeering.Peering{ + ID: "cc56f0b8-3885-4e78-8d7b-614a0c45712d", + Name: acceptingPeerName, + PeerID: token.PeerID, + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p.ShouldDial()) + require.NoError(t, s1.fsm.State().PeeringWrite(1000, p)) + + // Wait for the stream to be connected. + retry.Run(t, func(r *retry.R) { + status, found := s1.peerStreamServer.StreamStatus(p.ID) + require.True(r, found) + require.True(r, status.Connected) + }) + + exportedServices := structs.ConfigEntryRequest{ + Op: structs.ConfigEntryUpsert, + Datacenter: "dc3", + Entry: &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "foo", + Consumers: []structs.ServiceConsumer{{PeerName: dialingPeerName}}, + }, + }, + }, + } + var configOutput bool + require.NoError(t, msgpackrpc.CallWithCodec(codec3, "ConfigEntry.Apply", &exportedServices, &configOutput)) + require.True(t, configOutput) + } execNoNodesToken := createTokenWithPolicyName(t, codec1, "no-nodes", `service_prefix "foo" { policy = "read" }`, "root") rules := ` @@ -1485,9 +1582,16 @@ func TestPreparedQuery_Execute(t *testing.T) { // Set up some nodes in each DC that host the service. { for i := 0; i < 10; i++ { - for _, dc := range []string{"dc1", "dc2"} { + for _, d := range []struct { + codec rpc.ClientCodec + dc string + }{ + {codec1, "dc1"}, + {codec2, "dc2"}, + {codec3, "dc3"}, + } { req := structs.RegisterRequest{ - Datacenter: dc, + Datacenter: d.dc, Node: fmt.Sprintf("node%d", i+1), Address: fmt.Sprintf("127.0.0.%d", i+1), NodeMeta: map[string]string{ @@ -1497,7 +1601,7 @@ func TestPreparedQuery_Execute(t *testing.T) { Service: &structs.NodeService{ Service: "foo", Port: 8000, - Tags: []string{dc, fmt.Sprintf("tag%d", i+1)}, + Tags: []string{d.dc, fmt.Sprintf("tag%d", i+1)}, Meta: map[string]string{ "svc-group": fmt.Sprintf("%d", i%2), "foo": "true", @@ -1510,15 +1614,8 @@ func TestPreparedQuery_Execute(t *testing.T) { req.Service.Meta["unique"] = "true" } - var codec rpc.ClientCodec - if dc == "dc1" { - codec = codec1 - } else { - codec = codec2 - } - var reply struct{} - if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply); err != nil { + if err := msgpackrpc.CallWithCodec(d.codec, "Catalog.Register", &req, &reply); err != nil { t.Fatalf("err: %v", err) } } @@ -1576,6 +1673,17 @@ func TestPreparedQuery_Execute(t *testing.T) { assert.True(t, reply.QueryMeta.KnownLeader) } + expectFailoverPeerNodes := func(t *testing.T, query *structs.PreparedQueryRequest, reply *structs.PreparedQueryExecuteResponse, n int) { + t.Helper() + assert.Len(t, reply.Nodes, n) + assert.Equal(t, "", reply.Datacenter) + assert.Equal(t, acceptingPeerName, reply.PeerName) + assert.Equal(t, 2, reply.Failovers) + assert.Equal(t, query.Query.Service.Service, reply.Service) + assert.Equal(t, query.Query.DNS, reply.DNS) + assert.True(t, reply.QueryMeta.KnownLeader) + } + t.Run("run the registered query", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", @@ -1962,10 +2070,10 @@ func TestPreparedQuery_Execute(t *testing.T) { require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // Update the health of a node to mark it critical. - setHealth := func(t *testing.T, node string, health string) { + setHealth := func(t *testing.T, codec rpc.ClientCodec, dc string, node string, health string) { t.Helper() req := structs.RegisterRequest{ - Datacenter: "dc1", + Datacenter: dc, Node: node, Address: "127.0.0.1", Service: &structs.NodeService{ @@ -1981,9 +2089,9 @@ func TestPreparedQuery_Execute(t *testing.T) { WriteRequest: structs.WriteRequest{Token: "root"}, } var reply struct{} - require.NoError(t, msgpackrpc.CallWithCodec(codec1, "Catalog.Register", &req, &reply)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply)) } - setHealth(t, "node1", api.HealthCritical) + setHealth(t, codec1, "dc1", "node1", api.HealthCritical) // The failing node should be filtered. t.Run("failing node filtered", func(t *testing.T) { @@ -2003,7 +2111,7 @@ func TestPreparedQuery_Execute(t *testing.T) { }) // Upgrade it to a warning and re-query, should be 10 nodes again. - setHealth(t, "node1", api.HealthWarning) + setHealth(t, codec1, "dc1", "node1", api.HealthWarning) t.Run("warning nodes are included", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", @@ -2173,7 +2281,7 @@ func TestPreparedQuery_Execute(t *testing.T) { // Now fail everything in dc1 and we should get an empty list back. for i := 0; i < 10; i++ { - setHealth(t, fmt.Sprintf("node%d", i+1), api.HealthCritical) + setHealth(t, codec1, "dc1", fmt.Sprintf("node%d", i+1), api.HealthCritical) } t.Run("everything is failing so should get empty list", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ @@ -2308,6 +2416,61 @@ func TestPreparedQuery_Execute(t *testing.T) { assert.NotEqual(t, "node3", node.Node.Node) } }) + + // Modify the query to have it fail over to a bogus DC and then dc2. + query.Query.Service.Failover = structs.QueryFailoverOptions{ + Targets: []structs.QueryFailoverTarget{ + {Datacenter: "dc2"}, + {PeerName: acceptingPeerName}, + }, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) + + // Ensure the foo service has fully replicated. + retry.Run(t, func(r *retry.R) { + _, nodes, err := s1.fsm.State().CheckServiceNodes(nil, "foo", nil, acceptingPeerName) + require.NoError(r, err) + require.Len(r, nodes, 10) + }) + + // Now we should see 9 nodes from dc2 + t.Run("failing over to cluster peers", func(t *testing.T) { + req := structs.PreparedQueryExecuteRequest{ + Datacenter: "dc1", + QueryIDOrName: query.Query.ID, + QueryOptions: structs.QueryOptions{Token: execToken}, + } + + var reply structs.PreparedQueryExecuteResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + + for _, node := range reply.Nodes { + assert.NotEqual(t, "node3", node.Node.Node) + } + expectFailoverNodes(t, &query, &reply, 9) + }) + + // Set all checks in dc2 as critical + for i := 0; i < 10; i++ { + setHealth(t, codec2, "dc2", fmt.Sprintf("node%d", i+1), api.HealthCritical) + } + + // Now we should see 9 nodes from dc3 (we have the tag filter still) + t.Run("failing over to cluster peers", func(t *testing.T) { + req := structs.PreparedQueryExecuteRequest{ + Datacenter: "dc1", + QueryIDOrName: query.Query.ID, + QueryOptions: structs.QueryOptions{Token: execToken}, + } + + var reply structs.PreparedQueryExecuteResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + + for _, node := range reply.Nodes { + assert.NotEqual(t, "node3", node.Node.Node) + } + expectFailoverPeerNodes(t, &query, &reply, 9) + }) } func TestPreparedQuery_Execute_ForwardLeader(t *testing.T) { @@ -2724,7 +2887,9 @@ func TestPreparedQuery_Wrapper(t *testing.T) { joinWAN(t, s2, s1) // Try all the operations on a real server via the wrapper. - wrapper := &queryServerWrapper{s1} + wrapper := &queryServerWrapper{srv: s1, executeRemote: func(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + return nil + }} wrapper.GetLogger().Debug("Test") ret, err := wrapper.GetOtherDatacentersByDistance() @@ -2746,7 +2911,7 @@ type mockQueryServer struct { Datacenters []string DatacentersError error QueryLog []string - QueryFn func(dc string, args interface{}, reply interface{}) error + QueryFn func(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error Logger hclog.Logger LogBuffer *bytes.Buffer } @@ -2768,17 +2933,27 @@ func (m *mockQueryServer) GetLogger() hclog.Logger { return m.Logger } +func (m *mockQueryServer) GetLocalDC() string { + return "dc1" +} + func (m *mockQueryServer) GetOtherDatacentersByDistance() ([]string, error) { return m.Datacenters, m.DatacentersError } -func (m *mockQueryServer) ForwardDC(method, dc string, args interface{}, reply interface{}) error { - m.QueryLog = append(m.QueryLog, fmt.Sprintf("%s:%s", dc, method)) - if ret, ok := reply.(*structs.PreparedQueryExecuteResponse); ok { - ret.Datacenter = dc +func (m *mockQueryServer) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + peerName := args.Query.Service.PeerName + dc := args.Datacenter + if peerName != "" { + m.QueryLog = append(m.QueryLog, fmt.Sprintf("peer:%s", peerName)) + } else { + m.QueryLog = append(m.QueryLog, fmt.Sprintf("%s:%s", dc, "PreparedQuery.ExecuteRemote")) } + reply.PeerName = peerName + reply.Datacenter = dc + if m.QueryFn != nil { - return m.QueryFn(dc, args, reply) + return m.QueryFn(args, reply) } return nil } @@ -2788,7 +2963,7 @@ func TestPreparedQuery_queryFailover(t *testing.T) { query := &structs.PreparedQuery{ Name: "test", Service: structs.ServiceQuery{ - Failover: structs.QueryDatacenterOptions{ + Failover: structs.QueryFailoverOptions{ NearestN: 0, Datacenters: []string{""}, }, @@ -2862,10 +3037,9 @@ func TestPreparedQuery_queryFailover(t *testing.T) { { mock := &mockQueryServer{ Datacenters: []string{"dc1", "dc2", "dc3", "xxx", "dc4"}, - QueryFn: func(dc string, _ interface{}, reply interface{}) error { - ret := reply.(*structs.PreparedQueryExecuteResponse) - if dc == "dc1" { - ret.Nodes = nodes() + QueryFn: func(req *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + if req.Datacenter == "dc1" { + reply.Nodes = nodes() } return nil }, @@ -2890,10 +3064,9 @@ func TestPreparedQuery_queryFailover(t *testing.T) { { mock := &mockQueryServer{ Datacenters: []string{"dc1", "dc2", "dc3", "xxx", "dc4"}, - QueryFn: func(dc string, _ interface{}, reply interface{}) error { - ret := reply.(*structs.PreparedQueryExecuteResponse) - if dc == "dc3" { - ret.Nodes = nodes() + QueryFn: func(req *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + if req.Datacenter == "dc3" { + reply.Nodes = nodes() } return nil }, @@ -2926,7 +3099,7 @@ func TestPreparedQuery_queryFailover(t *testing.T) { } if len(reply.Nodes) != 0 || reply.Datacenter != "xxx" || reply.Failovers != 4 { - t.Fatalf("bad: %v", reply) + t.Fatalf("bad: %+v", reply) } if queries := mock.JoinQueryLog(); queries != "dc1:PreparedQuery.ExecuteRemote|dc2:PreparedQuery.ExecuteRemote|dc3:PreparedQuery.ExecuteRemote|xxx:PreparedQuery.ExecuteRemote" { t.Fatalf("bad: %s", queries) @@ -2940,10 +3113,9 @@ func TestPreparedQuery_queryFailover(t *testing.T) { { mock := &mockQueryServer{ Datacenters: []string{"dc1", "dc2", "dc3", "xxx", "dc4"}, - QueryFn: func(dc string, _ interface{}, reply interface{}) error { - ret := reply.(*structs.PreparedQueryExecuteResponse) - if dc == "dc4" { - ret.Nodes = nodes() + QueryFn: func(req *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + if req.Datacenter == "dc4" { + reply.Nodes = nodes() } return nil }, @@ -2969,10 +3141,9 @@ func TestPreparedQuery_queryFailover(t *testing.T) { { mock := &mockQueryServer{ Datacenters: []string{"dc1", "dc2", "dc3", "xxx", "dc4"}, - QueryFn: func(dc string, _ interface{}, reply interface{}) error { - ret := reply.(*structs.PreparedQueryExecuteResponse) - if dc == "dc4" { - ret.Nodes = nodes() + QueryFn: func(req *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + if req.Datacenter == "dc4" { + reply.Nodes = nodes() } return nil }, @@ -2998,10 +3169,9 @@ func TestPreparedQuery_queryFailover(t *testing.T) { { mock := &mockQueryServer{ Datacenters: []string{"dc1", "dc2", "dc3", "xxx", "dc4"}, - QueryFn: func(dc string, _ interface{}, reply interface{}) error { - ret := reply.(*structs.PreparedQueryExecuteResponse) - if dc == "dc4" { - ret.Nodes = nodes() + QueryFn: func(req *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + if req.Datacenter == "dc4" { + reply.Nodes = nodes() } return nil }, @@ -3029,12 +3199,11 @@ func TestPreparedQuery_queryFailover(t *testing.T) { { mock := &mockQueryServer{ Datacenters: []string{"dc1", "dc2", "dc3", "xxx", "dc4"}, - QueryFn: func(dc string, _ interface{}, reply interface{}) error { - ret := reply.(*structs.PreparedQueryExecuteResponse) - if dc == "dc1" { + QueryFn: func(req *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + if req.Datacenter == "dc1" { return fmt.Errorf("XXX") - } else if dc == "dc4" { - ret.Nodes = nodes() + } else if req.Datacenter == "dc4" { + reply.Nodes = nodes() } return nil }, @@ -3063,10 +3232,9 @@ func TestPreparedQuery_queryFailover(t *testing.T) { { mock := &mockQueryServer{ Datacenters: []string{"dc1", "dc2", "dc3", "xxx", "dc4"}, - QueryFn: func(dc string, _ interface{}, reply interface{}) error { - ret := reply.(*structs.PreparedQueryExecuteResponse) - if dc == "xxx" { - ret.Nodes = nodes() + QueryFn: func(req *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + if req.Datacenter == "xxx" { + reply.Nodes = nodes() } return nil }, @@ -3092,17 +3260,15 @@ func TestPreparedQuery_queryFailover(t *testing.T) { { mock := &mockQueryServer{ Datacenters: []string{"dc1", "dc2", "dc3", "xxx", "dc4"}, - QueryFn: func(dc string, args interface{}, reply interface{}) error { - inp := args.(*structs.PreparedQueryExecuteRemoteRequest) - ret := reply.(*structs.PreparedQueryExecuteResponse) - if dc == "xxx" { - if inp.Limit != 5 { - t.Fatalf("bad: %d", inp.Limit) + QueryFn: func(req *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + if req.Datacenter == "xxx" { + if req.Limit != 5 { + t.Fatalf("bad: %d", req.Limit) } - if inp.RequireConsistent != true { - t.Fatalf("bad: %v", inp.RequireConsistent) + if req.RequireConsistent != true { + t.Fatalf("bad: %v", req.RequireConsistent) } - ret.Nodes = nodes() + reply.Nodes = nodes() } return nil }, @@ -3124,4 +3290,32 @@ func TestPreparedQuery_queryFailover(t *testing.T) { t.Fatalf("bad: %s", queries) } } + + // Failover returns data from the first cluster peer with data. + query.Service.Failover.Datacenters = nil + query.Service.Failover.Targets = []structs.QueryFailoverTarget{ + {PeerName: "cluster-01"}, + {Datacenter: "dc44"}, + {PeerName: "cluster-02"}, + } + { + mock := &mockQueryServer{ + Datacenters: []string{"dc44"}, + QueryFn: func(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { + if args.Query.Service.PeerName == "cluster-02" { + reply.Nodes = nodes() + } + return nil + }, + } + + var reply structs.PreparedQueryExecuteResponse + if err := queryFailover(mock, query, &structs.PreparedQueryExecuteRequest{}, &reply); err != nil { + t.Fatalf("err: %v", err) + } + require.Equal(t, "cluster-02", reply.PeerName) + require.Equal(t, 3, reply.Failovers) + require.Equal(t, nodes(), reply.Nodes) + require.Equal(t, "peer:cluster-01|dc44:PreparedQuery.ExecuteRemote|peer:cluster-02", mock.JoinQueryLog()) + } } diff --git a/agent/dns_test.go b/agent/dns_test.go index f0d82d2e7..51f2b6d54 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -6075,7 +6075,7 @@ func TestDNS_PreparedQuery_Failover(t *testing.T) { Name: "my-query", Service: structs.ServiceQuery{ Service: "db", - Failover: structs.QueryDatacenterOptions{ + Failover: structs.QueryFailoverOptions{ Datacenters: []string{"dc2"}, }, }, diff --git a/agent/prepared_query_endpoint_test.go b/agent/prepared_query_endpoint_test.go index 34b8975fd..9cf805b88 100644 --- a/agent/prepared_query_endpoint_test.go +++ b/agent/prepared_query_endpoint_test.go @@ -92,7 +92,7 @@ func TestPreparedQuery_Create(t *testing.T) { Session: "my-session", Service: structs.ServiceQuery{ Service: "my-service", - Failover: structs.QueryDatacenterOptions{ + Failover: structs.QueryFailoverOptions{ NearestN: 4, Datacenters: []string{"dc1", "dc2"}, }, @@ -883,7 +883,7 @@ func TestPreparedQuery_Update(t *testing.T) { Session: "my-session", Service: structs.ServiceQuery{ Service: "my-service", - Failover: structs.QueryDatacenterOptions{ + Failover: structs.QueryFailoverOptions{ NearestN: 4, Datacenters: []string{"dc1", "dc2"}, }, diff --git a/agent/structs/prepared_query.go b/agent/structs/prepared_query.go index 440053f0b..cd8ec574b 100644 --- a/agent/structs/prepared_query.go +++ b/agent/structs/prepared_query.go @@ -10,9 +10,9 @@ import ( "github.com/hashicorp/consul/types" ) -// QueryDatacenterOptions sets options about how we fail over if there are no +// QueryFailoverOptions sets options about how we fail over if there are no // healthy nodes in the local datacenter. -type QueryDatacenterOptions struct { +type QueryFailoverOptions struct { // NearestN is set to the number of remote datacenters to try, based on // network coordinates. NearestN int @@ -21,6 +21,32 @@ type QueryDatacenterOptions struct { // never try a datacenter multiple times, so those are subtracted from // this list before proceeding. Datacenters []string + + // Targets is a fixed list of datacenters and peers to try. This field cannot + // be populated with NearestN or Datacenters. + Targets []QueryFailoverTarget +} + +// AsTargets either returns Targets as is or Datacenters converted into +// Targets. +func (f *QueryFailoverOptions) AsTargets() []QueryFailoverTarget { + if dcs := f.Datacenters; len(dcs) > 0 { + var targets []QueryFailoverTarget + for _, dc := range dcs { + targets = append(targets, QueryFailoverTarget{Datacenter: dc}) + } + return targets + } + + return f.Targets +} + +type QueryFailoverTarget struct { + // PeerName specifies a peer to try during failover. + PeerName string + + // Datacenter specifies a datacenter to try during failover. + Datacenter string } // QueryDNSOptions controls settings when query results are served over DNS. @@ -37,7 +63,7 @@ type ServiceQuery struct { // Failover controls what we do if there are no healthy nodes in the // local datacenter. - Failover QueryDatacenterOptions + Failover QueryFailoverOptions // If OnlyPassing is true then we will only include nodes with passing // health checks (critical AND warning checks will cause a node to be @@ -323,6 +349,9 @@ type PreparedQueryExecuteResponse struct { // Datacenter is the datacenter that these results came from. Datacenter string + // PeerName specifies the cluster peer that these results came from. + PeerName string + // Failovers is a count of how many times we had to query a remote // datacenter. Failovers int diff --git a/api/prepared_query.go b/api/prepared_query.go index b3dd7be6f..60cd437cb 100644 --- a/api/prepared_query.go +++ b/api/prepared_query.go @@ -1,8 +1,8 @@ package api -// QueryDatacenterOptions sets options about how we fail over if there are no +// QueryFailoverOptions sets options about how we fail over if there are no // healthy nodes in the local datacenter. -type QueryDatacenterOptions struct { +type QueryFailoverOptions struct { // NearestN is set to the number of remote datacenters to try, based on // network coordinates. NearestN int @@ -11,6 +11,18 @@ type QueryDatacenterOptions struct { // never try a datacenter multiple times, so those are subtracted from // this list before proceeding. Datacenters []string + + // Targets is a fixed list of datacenters and peers to try. This field cannot + // be populated with NearestN or Datacenters. + Targets []QueryFailoverTarget +} + +type QueryFailoverTarget struct { + // PeerName specifies a peer to try during failover. + PeerName string + + // Datacenter specifies a datacenter to try during failover. + Datacenter string } // QueryDNSOptions controls settings when query results are served over DNS. @@ -35,7 +47,7 @@ type ServiceQuery struct { // Failover controls what we do if there are no healthy nodes in the // local datacenter. - Failover QueryDatacenterOptions + Failover QueryFailoverOptions // IgnoreCheckIDs is an optional list of health check IDs to ignore when // considering which nodes are healthy. It is useful as an emergency measure From f018bd6e090944e4fd6be986ace159a79e2c92d9 Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Fri, 22 Jul 2022 11:52:05 +0100 Subject: [PATCH 062/107] proxycfg-glue: server-local implementation of `ExportedPeeredServices` This is the OSS portion of enterprise PR 2377. Adds a server-local implementation of the proxycfg.ExportedPeeredServices interface that sources data from a blocking query against the server's state store. --- agent/agent.go | 1 + .../proxycfg-glue/exported_peered_services.go | 60 ++++++++++ .../exported_peered_services_test.go | 112 ++++++++++++++++++ agent/proxycfg-glue/glue.go | 7 +- agent/proxycfg-glue/helpers_test.go | 8 ++ 5 files changed, 182 insertions(+), 6 deletions(-) create mode 100644 agent/proxycfg-glue/exported_peered_services.go create mode 100644 agent/proxycfg-glue/exported_peered_services_test.go diff --git a/agent/agent.go b/agent/agent.go index 751e62d4b..a7c89a727 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -4246,6 +4246,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources { sources.ConfigEntry = proxycfgglue.ServerConfigEntry(deps) sources.ConfigEntryList = proxycfgglue.ServerConfigEntryList(deps) sources.CompiledDiscoveryChain = proxycfgglue.ServerCompiledDiscoveryChain(deps, proxycfgglue.CacheCompiledDiscoveryChain(a.cache)) + sources.ExportedPeeredServices = proxycfgglue.ServerExportedPeeredServices(deps) sources.FederationStateListMeshGateways = proxycfgglue.ServerFederationStateListMeshGateways(deps) sources.GatewayServices = proxycfgglue.ServerGatewayServices(deps) sources.Health = proxycfgglue.ServerHealth(deps, proxycfgglue.ClientHealth(a.rpcClientHealth)) diff --git a/agent/proxycfg-glue/exported_peered_services.go b/agent/proxycfg-glue/exported_peered_services.go new file mode 100644 index 000000000..3ce8db632 --- /dev/null +++ b/agent/proxycfg-glue/exported_peered_services.go @@ -0,0 +1,60 @@ +package proxycfgglue + +import ( + "context" + + "github.com/hashicorp/go-memdb" + + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/consul/watch" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/structs/aclfilter" +) + +// CacheExportedPeeredServices satisfies the proxycfg.ExportedPeeredServices +// interface by sourcing data from the agent cache. +func CacheExportedPeeredServices(c *cache.Cache) proxycfg.ExportedPeeredServices { + return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.ExportedPeeredServicesName} +} + +// ServerExportedPeeredServices satisifies the proxycfg.ExportedPeeredServices +// interface by sourcing data from a blocking query against the server's state +// store. +func ServerExportedPeeredServices(deps ServerDataSourceDeps) proxycfg.ExportedPeeredServices { + return &serverExportedPeeredServices{deps} +} + +type serverExportedPeeredServices struct { + deps ServerDataSourceDeps +} + +func (s *serverExportedPeeredServices) Notify(ctx context.Context, req *structs.DCSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore, + func(ws memdb.WatchSet, store Store) (uint64, *structs.IndexedExportedServiceList, error) { + // TODO(peering): acls: mesh gateway needs appropriate wildcard service:read + authz, err := s.deps.ACLResolver.ResolveTokenAndDefaultMeta(req.Token, &req.EnterpriseMeta, nil) + if err != nil { + return 0, nil, err + } + + index, serviceMap, err := store.ExportedServicesForAllPeersByName(ws, req.EnterpriseMeta) + if err != nil { + return 0, nil, err + } + + result := &structs.IndexedExportedServiceList{ + Services: serviceMap, + QueryMeta: structs.QueryMeta{ + Backend: structs.QueryBackendBlocking, + Index: index, + }, + } + aclfilter.New(authz, s.deps.Logger).Filter(result) + + return index, result, nil + }, + dispatchBlockingQueryUpdate[*structs.IndexedExportedServiceList](ch), + ) +} diff --git a/agent/proxycfg-glue/exported_peered_services_test.go b/agent/proxycfg-glue/exported_peered_services_test.go new file mode 100644 index 000000000..f0b41d9f3 --- /dev/null +++ b/agent/proxycfg-glue/exported_peered_services_test.go @@ -0,0 +1,112 @@ +package proxycfgglue + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +func TestServerExportedPeeredServices(t *testing.T) { + nextIndex := indexGenerator() + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + store := state.NewStateStore(nil) + + for _, peer := range []string{"peer-1", "peer-2", "peer-3"} { + require.NoError(t, store.PeeringWrite(nextIndex(), &pbpeering.Peering{ + ID: testUUID(t), + Name: peer, + State: pbpeering.PeeringState_ACTIVE, + })) + } + + require.NoError(t, store.EnsureConfigEntry(nextIndex(), &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "web", + Consumers: []structs.ServiceConsumer{ + {PeerName: "peer-1"}, + }, + }, + { + Name: "db", + Consumers: []structs.ServiceConsumer{ + {PeerName: "peer-2"}, + }, + }, + }, + })) + + authz := policyAuthorizer(t, ` + service "web" { policy = "read" } + service "api" { policy = "read" } + service "db" { policy = "deny" } + `) + + eventCh := make(chan proxycfg.UpdateEvent) + dataSource := ServerExportedPeeredServices(ServerDataSourceDeps{ + GetStore: func() Store { return store }, + ACLResolver: newStaticResolver(authz), + }) + require.NoError(t, dataSource.Notify(ctx, &structs.DCSpecificRequest{}, "", eventCh)) + + t.Run("initial state", func(t *testing.T) { + result := getEventResult[*structs.IndexedExportedServiceList](t, eventCh) + require.Equal(t, + map[string]structs.ServiceList{ + "peer-1": {structs.NewServiceName("web", nil)}, + }, + result.Services, + ) + }) + + t.Run("update exported services", func(t *testing.T) { + require.NoError(t, store.EnsureConfigEntry(nextIndex(), &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "web", + Consumers: []structs.ServiceConsumer{ + {PeerName: "peer-1"}, + }, + }, + { + Name: "db", + Consumers: []structs.ServiceConsumer{ + {PeerName: "peer-2"}, + }, + }, + { + Name: "api", + Consumers: []structs.ServiceConsumer{ + {PeerName: "peer-1"}, + {PeerName: "peer-3"}, + }, + }, + }, + })) + + result := getEventResult[*structs.IndexedExportedServiceList](t, eventCh) + require.Equal(t, + map[string]structs.ServiceList{ + "peer-1": { + structs.NewServiceName("api", nil), + structs.NewServiceName("web", nil), + }, + "peer-3": { + structs.NewServiceName("api", nil), + }, + }, + result.Services, + ) + }) +} diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index 1e254f406..04451c3d2 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -21,6 +21,7 @@ import ( type Store interface { watch.StateStore + ExportedServicesForAllPeersByName(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, map[string]structs.ServiceList, error) FederationStateList(ws memdb.WatchSet) (uint64, []*structs.FederationState, error) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *acl.EnterpriseMeta) (uint64, structs.GatewayServices, error) IntentionTopology(ws memdb.WatchSet, target structs.ServiceName, downstreams bool, defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType) (uint64, structs.ServiceList, error) @@ -103,12 +104,6 @@ func CacheResolvedServiceConfig(c *cache.Cache) proxycfg.ResolvedServiceConfig { return &cacheProxyDataSource[*structs.ServiceConfigRequest]{c, cachetype.ResolvedServiceConfigName} } -// CacheExportedPeeredServices satisfies the proxycfg.ExportedPeeredServices -// interface by sourcing data from the agent cache. -func CacheExportedPeeredServices(c *cache.Cache) proxycfg.ExportedPeeredServices { - return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.ExportedPeeredServicesName} -} - // cacheProxyDataSource implements a generic wrapper around the agent cache to // provide data to the proxycfg.Manager. type cacheProxyDataSource[ReqType cache.Request] struct { diff --git a/agent/proxycfg-glue/helpers_test.go b/agent/proxycfg-glue/helpers_test.go index 7e3b9078e..7a0c67df1 100644 --- a/agent/proxycfg-glue/helpers_test.go +++ b/agent/proxycfg-glue/helpers_test.go @@ -9,6 +9,14 @@ import ( "github.com/hashicorp/consul/agent/proxycfg" ) +func indexGenerator() func() uint64 { + var idx uint64 + return func() uint64 { + idx++ + return idx + } +} + func getEventResult[ResultType any](t *testing.T, eventCh <-chan proxycfg.UpdateEvent) ResultType { t.Helper() From b8131704ea6cc7b5bab3186facc695fb8d5b5b07 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Fri, 22 Jul 2022 19:28:13 +0200 Subject: [PATCH 063/107] Improve peered service empty downstreams message (#13854) --- .../consul-ui/app/components/topology-metrics/index.js | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ui/packages/consul-ui/app/components/topology-metrics/index.js b/ui/packages/consul-ui/app/components/topology-metrics/index.js index 43d39e9ec..627c2394b 100644 --- a/ui/packages/consul-ui/app/components/topology-metrics/index.js +++ b/ui/packages/consul-ui/app/components/topology-metrics/index.js @@ -5,6 +5,7 @@ import { inject as service } from '@ember/service'; export default class TopologyMetrics extends Component { @service('env') env; + @service() abilities; // =attributes @tracked centerDimensions; @@ -87,8 +88,12 @@ export default class TopologyMetrics extends Component { Namespace: '', }); } else if (downstreams.length === 0) { + const canUsePeers = this.abilities.can('use peers'); + items.push({ - Name: 'No downstreams, or the downstreams are imported services.', + Name: canUsePeers + ? 'No downstreams, or the downstreams are imported services.' + : 'No downstreams.', Datacenter: '', Namespace: '', }); From 9f9ac78243dc0137c2d17988d8892a59c0c3147c Mon Sep 17 00:00:00 2001 From: "A.J. Sanon" <47250909+sanon-dev@users.noreply.github.com> Date: Fri, 22 Jul 2022 13:30:25 -0400 Subject: [PATCH 064/107] Add ECS audit logging docs (#13729) --- website/content/docs/ecs/enterprise.mdx | 28 ++++++++++++- .../docs/ecs/manual/secure-configuration.mdx | 39 +++++++++++++++++++ .../ecs/terraform/secure-configuration.mdx | 5 ++- 3 files changed, 69 insertions(+), 3 deletions(-) diff --git a/website/content/docs/ecs/enterprise.mdx b/website/content/docs/ecs/enterprise.mdx index 29bd1a315..fb64e6332 100644 --- a/website/content/docs/ecs/enterprise.mdx +++ b/website/content/docs/ecs/enterprise.mdx @@ -56,7 +56,7 @@ If client support is required for any of the features, then you must use a Consu | Network Segments | No | Currently there is no capability to configure the network segment Consul clients on ECS run in. | | Namespaces | Yes | This feature requires Consul Enterprise servers. OSS clients can register into the `default` namespace. Registration into a non-default namespace requires a Consul Enterprise client. | | Admin Partitions | Yes | This feature requires Consul Enterprise servers. OSS clients can register into the `default` admin partition. Registration into a non-default partition requires a Consul Enterprise client. | -| Audit Logging | No* | Audit logging can be enabled on Consul servers that run outside of ECS but is not currently supported on the Consul clients that run inside ECS. | +| Audit Logging | Yes | This feature requires Consul Enterprise clients. | ### Admin Partitions and Namespaces @@ -121,3 +121,29 @@ module "my_task" { ``` + +### Audit Logging +Consul on ECS supports [audit logging](/docs/enterprise/audit-logging) when using Consul Enterprise clients. +This feature has the following requirements: + +- ACLs must be enabled. +- `mesh-task` must use a Consul Enterprise image. +- `gateway-task` must use a Consul Enterprise image. + +To enable audit logging, set `audit_logging = true` when configuring the client. + + + +```hcl +module "my_task" { + source = "hashicorp/consul-ecs/aws//modules/mesh-task" + family = "my_task" + + ... + + consul_image = "hashicorp/consul-enterprise:-ent" + audit_logging = true +} +``` + + diff --git a/website/content/docs/ecs/manual/secure-configuration.mdx b/website/content/docs/ecs/manual/secure-configuration.mdx index 1bb800c00..c888e5cba 100644 --- a/website/content/docs/ecs/manual/secure-configuration.mdx +++ b/website/content/docs/ecs/manual/secure-configuration.mdx @@ -472,6 +472,45 @@ The following table describes the additional fields that must be included in the | [`acl.tokens.agent`](/docs/agent/config/config-files#acl_tokens_agent) | string | Consul client token which authorizes this agent with Consul servers. | | [`partition`](/docs/agent/config/config-files#partition-1) | string | The Consul Enterprise admin partition this agent belongs to. | +### Configure Audit Logging +[Audit logging](/docs/enterprise/audit-logging) is supported on clients running Consul Enterprise with ACLs enabled. +To enable audit logging, update the startup script to add an `audit` stanza to the Consul client configuration file. + +The following example modifies the `consul-client` startup script to configure audit logs to be written to the `stdout` of the `consul-client` container. + + + +```shell +... + +# Write the Consul agent configuration file. +cat << EOF > /consul/agent-defaults.hcl +... + +partition = "" + +audit { + enabled = true + sink "stdout" { + type = "file" + format = "json" + path = "/dev/stdout" + delivery_guarantee = "best-effort" + } +} + +EOF +``` + + + +The following table describes the fields that must be included to configure audit logging. + +| Field name | Type | Description | +| ------------------------------------------------------------------------------- | ------- | ------------------------------------------------------------------------------------ | +| [`audit.enabled`](/docs/agent/config/config-files#enabled) | boolean | Enable audit logging for this agent. | +| [`audit.sink`](/docs/agent/config/config-files#sink) | object | The audit logging sink for this agent. | + ## Configure `consul-ecs-mesh-init` and `consul-ecs-health-sync` The following *additional* options should be set in the [`CONSUL_ECS_CONFIG_JSON`](/docs/ecs/manual/install#consul_ecs_config_json) environment variable. When these options are specified, the `consul-ecs mesh-init` command will run the `consul login` command to obtain a service token from the Consul AWS IAM Auth method. The `consul-ecs health-sync` command is responsible for running a `consul logout` command for both the service and client tokens when the task shuts down. diff --git a/website/content/docs/ecs/terraform/secure-configuration.mdx b/website/content/docs/ecs/terraform/secure-configuration.mdx index 81e508d44..ef945902f 100644 --- a/website/content/docs/ecs/terraform/secure-configuration.mdx +++ b/website/content/docs/ecs/terraform/secure-configuration.mdx @@ -136,8 +136,7 @@ resource "aws_secretsmanager_secret_version" "gossip_key" { ### Enable secure deployment -Add the following configurations to enable secure deployment. The `acl_secret_name_prefix` -should be the same as the `name_prefix` you provide to the ACL controller module. +To enable secure deployment, add the following configuration to the task module. ```hcl module "my_task" { @@ -153,6 +152,7 @@ module "my_task" { acls = true consul_http_addr = "https://consul-server.example.com:8501" consul_https_ca_cert_arn = aws_secretsmanager_secret.ca_cert.arn + audit_logging = true } ``` @@ -166,6 +166,7 @@ The following table explains the `mesh-task` input variables relevant to a secur | `acls` | boolean | If true, ACLs are enabled. | | `consul_http_addr` | string | The Consul server address. Required when `acls = true` in order to log in to Consul's AWS IAM auth method to obtain ACL tokens. | | `consul_https_ca_cert_arn` | string | (optional) The Secrets Manager secret containing the CA cert for HTTPS communication with Consul servers. Required if the server's certificate is self-signed or signed by an internal CA. This is not required for Consul servers in HCP. | +| `audit_logging` | boolean | (optional) If true, ACL audit logging is enabled. Consul client is configured to print audit logs to `stdout`. | Complete the following steps described in the Installation with Terraform chapter to deploy and connect your services: From 55b7eb6838d200bc0e162e7a1ddd69b48297d249 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 22 Jul 2022 10:33:50 -0700 Subject: [PATCH 065/107] Add changelog note --- .changelog/13847.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/13847.txt diff --git a/.changelog/13847.txt b/.changelog/13847.txt new file mode 100644 index 000000000..2bbe7e241 --- /dev/null +++ b/.changelog/13847.txt @@ -0,0 +1,3 @@ +```release-note:bug +connect: Fixed a goroutine/memory leak that would occur when using the ingress gateway. +``` From a253d7e49b294a2fcb4fb165e4e2b3b992a3f11f Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Fri, 22 Jul 2022 14:59:34 -0400 Subject: [PATCH 066/107] Rename some protobuf package names to be fqdn like (#13861) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are used in various bits of the wire format (for gRPC) and internally with Go’s registry so we want to namespace things properly. --- proto-public/pbacl/acl.pb.go | 131 +++++---- proto-public/pbacl/acl.proto | 2 +- proto-public/pbacl/acl_grpc.pb.go | 10 +- proto-public/pbconnectca/ca.pb.go | 146 +++++----- proto-public/pbconnectca/ca.proto | 2 +- proto-public/pbconnectca/ca_grpc.pb.go | 8 +- proto-public/pbdataplane/dataplane.pb.go | 252 +++++++++--------- proto-public/pbdataplane/dataplane.proto | 2 +- proto-public/pbdataplane/dataplane_grpc.pb.go | 10 +- .../pbserverdiscovery/serverdiscovery.pb.go | 91 ++++--- .../pbserverdiscovery/serverdiscovery.proto | 2 +- .../serverdiscovery_grpc.pb.go | 4 +- 12 files changed, 354 insertions(+), 306 deletions(-) diff --git a/proto-public/pbacl/acl.pb.go b/proto-public/pbacl/acl.pb.go index 24fbd31a0..1a89a05c1 100644 --- a/proto-public/pbacl/acl.pb.go +++ b/proto-public/pbacl/acl.pb.go @@ -322,53 +322,66 @@ var File_proto_public_pbacl_acl_proto protoreflect.FileDescriptor var file_proto_public_pbacl_acl_proto_rawDesc = []byte{ 0x0a, 0x1c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, - 0x62, 0x61, 0x63, 0x6c, 0x2f, 0x61, 0x63, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, - 0x61, 0x63, 0x6c, 0x22, 0x10, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x98, 0x02, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x75, 0x74, - 0x68, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, - 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, - 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2f, 0x0a, 0x04, 0x6d, 0x65, - 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x4c, - 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, - 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, - 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, - 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, - 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x36, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x25, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4a, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x69, - 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x49, 0x64, 0x22, 0x45, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x64, - 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x32, 0x73, 0x0a, 0x0a, 0x41, - 0x43, 0x4c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x4c, 0x6f, 0x67, - 0x69, 0x6e, 0x12, 0x11, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x69, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x06, 0x4c, - 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x12, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x6f, - 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x61, 0x63, 0x6c, 0x2e, - 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x42, 0x6f, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x63, 0x6c, 0x42, 0x08, 0x41, 0x63, 0x6c, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, - 0x63, 0x2f, 0x70, 0x62, 0x61, 0x63, 0x6c, 0xa2, 0x02, 0x03, 0x41, 0x58, 0x58, 0xaa, 0x02, 0x03, - 0x41, 0x63, 0x6c, 0xca, 0x02, 0x03, 0x41, 0x63, 0x6c, 0xe2, 0x02, 0x0f, 0x41, 0x63, 0x6c, 0x5c, - 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x03, 0x41, 0x63, + 0x62, 0x61, 0x63, 0x6c, 0x2f, 0x61, 0x63, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x61, 0x63, 0x6c, 0x22, 0x10, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa9, 0x02, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x5f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x75, + 0x74, 0x68, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, + 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x40, 0x0a, 0x04, 0x6d, + 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x61, 0x63, 0x6c, + 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x0a, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x61, 0x74, + 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, + 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, + 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x47, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4a, 0x0a, 0x0a, 0x4c, + 0x6f, 0x67, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x49, 0x64, 0x22, 0x45, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x6f, 0x75, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1e, + 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x32, 0xb7, + 0x01, 0x0a, 0x0a, 0x41, 0x43, 0x4c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x52, 0x0a, + 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x22, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x4c, 0x6f, + 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x61, 0x63, + 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x55, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x23, 0x2e, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x61, + 0x63, 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x24, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x61, 0x63, 0x6c, 0x42, 0x08, 0x41, 0x63, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x61, 0x63, + 0x6c, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x41, 0xaa, 0x02, 0x14, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x41, 0x63, 0x6c, 0xca, 0x02, + 0x14, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x5c, 0x41, 0x63, 0x6c, 0xe2, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x41, 0x63, 0x6c, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x16, 0x48, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x41, 0x63, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } @@ -386,20 +399,20 @@ func file_proto_public_pbacl_acl_proto_rawDescGZIP() []byte { var file_proto_public_pbacl_acl_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_proto_public_pbacl_acl_proto_goTypes = []interface{}{ - (*LogoutResponse)(nil), // 0: acl.LogoutResponse - (*LoginRequest)(nil), // 1: acl.LoginRequest - (*LoginResponse)(nil), // 2: acl.LoginResponse - (*LoginToken)(nil), // 3: acl.LoginToken - (*LogoutRequest)(nil), // 4: acl.LogoutRequest - nil, // 5: acl.LoginRequest.MetaEntry + (*LogoutResponse)(nil), // 0: hashicorp.consul.acl.LogoutResponse + (*LoginRequest)(nil), // 1: hashicorp.consul.acl.LoginRequest + (*LoginResponse)(nil), // 2: hashicorp.consul.acl.LoginResponse + (*LoginToken)(nil), // 3: hashicorp.consul.acl.LoginToken + (*LogoutRequest)(nil), // 4: hashicorp.consul.acl.LogoutRequest + nil, // 5: hashicorp.consul.acl.LoginRequest.MetaEntry } var file_proto_public_pbacl_acl_proto_depIdxs = []int32{ - 5, // 0: acl.LoginRequest.meta:type_name -> acl.LoginRequest.MetaEntry - 3, // 1: acl.LoginResponse.token:type_name -> acl.LoginToken - 1, // 2: acl.ACLService.Login:input_type -> acl.LoginRequest - 4, // 3: acl.ACLService.Logout:input_type -> acl.LogoutRequest - 2, // 4: acl.ACLService.Login:output_type -> acl.LoginResponse - 0, // 5: acl.ACLService.Logout:output_type -> acl.LogoutResponse + 5, // 0: hashicorp.consul.acl.LoginRequest.meta:type_name -> hashicorp.consul.acl.LoginRequest.MetaEntry + 3, // 1: hashicorp.consul.acl.LoginResponse.token:type_name -> hashicorp.consul.acl.LoginToken + 1, // 2: hashicorp.consul.acl.ACLService.Login:input_type -> hashicorp.consul.acl.LoginRequest + 4, // 3: hashicorp.consul.acl.ACLService.Logout:input_type -> hashicorp.consul.acl.LogoutRequest + 2, // 4: hashicorp.consul.acl.ACLService.Login:output_type -> hashicorp.consul.acl.LoginResponse + 0, // 5: hashicorp.consul.acl.ACLService.Logout:output_type -> hashicorp.consul.acl.LogoutResponse 4, // [4:6] is the sub-list for method output_type 2, // [2:4] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name diff --git a/proto-public/pbacl/acl.proto b/proto-public/pbacl/acl.proto index aa315c58d..221742f67 100644 --- a/proto-public/pbacl/acl.proto +++ b/proto-public/pbacl/acl.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package acl; +package hashicorp.consul.acl; service ACLService { // Login exchanges the presented bearer token for a Consul ACL token using a diff --git a/proto-public/pbacl/acl_grpc.pb.go b/proto-public/pbacl/acl_grpc.pb.go index ff588154c..87155b8d0 100644 --- a/proto-public/pbacl/acl_grpc.pb.go +++ b/proto-public/pbacl/acl_grpc.pb.go @@ -39,7 +39,7 @@ func NewACLServiceClient(cc grpc.ClientConnInterface) ACLServiceClient { func (c *aCLServiceClient) Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error) { out := new(LoginResponse) - err := c.cc.Invoke(ctx, "/acl.ACLService/Login", in, out, opts...) + err := c.cc.Invoke(ctx, "/hashicorp.consul.acl.ACLService/Login", in, out, opts...) if err != nil { return nil, err } @@ -48,7 +48,7 @@ func (c *aCLServiceClient) Login(ctx context.Context, in *LoginRequest, opts ... func (c *aCLServiceClient) Logout(ctx context.Context, in *LogoutRequest, opts ...grpc.CallOption) (*LogoutResponse, error) { out := new(LogoutResponse) - err := c.cc.Invoke(ctx, "/acl.ACLService/Logout", in, out, opts...) + err := c.cc.Invoke(ctx, "/hashicorp.consul.acl.ACLService/Logout", in, out, opts...) if err != nil { return nil, err } @@ -98,7 +98,7 @@ func _ACLService_Login_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/acl.ACLService/Login", + FullMethod: "/hashicorp.consul.acl.ACLService/Login", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ACLServiceServer).Login(ctx, req.(*LoginRequest)) @@ -116,7 +116,7 @@ func _ACLService_Logout_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/acl.ACLService/Logout", + FullMethod: "/hashicorp.consul.acl.ACLService/Logout", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ACLServiceServer).Logout(ctx, req.(*LogoutRequest)) @@ -128,7 +128,7 @@ func _ACLService_Logout_Handler(srv interface{}, ctx context.Context, dec func(i // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var ACLService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "acl.ACLService", + ServiceName: "hashicorp.consul.acl.ACLService", HandlerType: (*ACLServiceServer)(nil), Methods: []grpc.MethodDesc{ { diff --git a/proto-public/pbconnectca/ca.pb.go b/proto-public/pbconnectca/ca.pb.go index b639d4a96..e690ae1c2 100644 --- a/proto-public/pbconnectca/ca.pb.go +++ b/proto-public/pbconnectca/ca.pb.go @@ -361,62 +361,74 @@ var File_proto_public_pbconnectca_ca_proto protoreflect.FileDescriptor var file_proto_public_pbconnectca_ca_proto_rawDesc = []byte{ 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2f, 0x63, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x1a, 0x1f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x13, 0x0a, 0x11, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x86, 0x01, 0x0a, 0x12, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x6f, - 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x49, - 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, - 0x43, 0x41, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x22, 0x9d, 0x02, - 0x0a, 0x06, 0x43, 0x41, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, - 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x69, 0x67, 0x6e, 0x69, - 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, - 0x43, 0x65, 0x72, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, - 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x65, - 0x72, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x72, - 0x6f, 0x74, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x75, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x0c, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x41, 0x74, 0x22, 0x1f, 0x0a, - 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x63, 0x73, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, 0x73, 0x72, 0x22, 0x29, - 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, - 0x0a, 0x08, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x63, 0x65, 0x72, 0x74, 0x50, 0x65, 0x6d, 0x32, 0x9c, 0x01, 0x0a, 0x10, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4d, - 0x0a, 0x0a, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x1c, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x6f, - 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x6f, 0x6f, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x39, 0x0a, - 0x04, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, - 0x61, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x92, 0x01, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x42, 0x07, 0x43, 0x61, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, - 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0xa2, 0x02, 0x03, 0x43, 0x58, - 0x58, 0xaa, 0x02, 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0xca, 0x02, 0x09, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0xe2, 0x02, 0x15, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x63, 0x61, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0xea, 0x02, 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x13, 0x0a, 0x11, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x97, 0x01, 0x0a, 0x12, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, + 0x6f, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x6f, 0x6f, 0x74, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x38, 0x0a, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, + 0x61, 0x2e, 0x43, 0x41, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x22, + 0x9d, 0x02, 0x0a, 0x06, 0x43, 0x41, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x69, 0x67, + 0x6e, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, + 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, + 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, + 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x40, 0x0a, + 0x0e, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x75, 0x74, 0x5f, 0x61, 0x74, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x0c, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x41, 0x74, 0x22, + 0x1f, 0x0a, 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, + 0x0a, 0x03, 0x63, 0x73, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, 0x73, 0x72, + 0x22, 0x29, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x63, 0x65, 0x72, 0x74, 0x50, 0x65, 0x6d, 0x32, 0xe0, 0x01, 0x0a, 0x10, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x6f, 0x0a, 0x0a, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x2d, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x57, 0x61, 0x74, 0x63, + 0x68, 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x52, 0x6f, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, + 0x01, 0x12, 0x5b, 0x0a, 0x04, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x27, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x2e, + 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xe9, + 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, + 0x61, 0x42, 0x07, 0x43, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x63, 0x61, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x43, 0xaa, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x63, 0x61, 0xca, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x63, 0x61, 0xe2, 0x02, 0x26, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x5c, + 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, + 0x3a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x63, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -433,20 +445,20 @@ func file_proto_public_pbconnectca_ca_proto_rawDescGZIP() []byte { var file_proto_public_pbconnectca_ca_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_proto_public_pbconnectca_ca_proto_goTypes = []interface{}{ - (*WatchRootsRequest)(nil), // 0: connectca.WatchRootsRequest - (*WatchRootsResponse)(nil), // 1: connectca.WatchRootsResponse - (*CARoot)(nil), // 2: connectca.CARoot - (*SignRequest)(nil), // 3: connectca.SignRequest - (*SignResponse)(nil), // 4: connectca.SignResponse + (*WatchRootsRequest)(nil), // 0: hashicorp.consul.connectca.WatchRootsRequest + (*WatchRootsResponse)(nil), // 1: hashicorp.consul.connectca.WatchRootsResponse + (*CARoot)(nil), // 2: hashicorp.consul.connectca.CARoot + (*SignRequest)(nil), // 3: hashicorp.consul.connectca.SignRequest + (*SignResponse)(nil), // 4: hashicorp.consul.connectca.SignResponse (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp } var file_proto_public_pbconnectca_ca_proto_depIdxs = []int32{ - 2, // 0: connectca.WatchRootsResponse.roots:type_name -> connectca.CARoot - 5, // 1: connectca.CARoot.rotated_out_at:type_name -> google.protobuf.Timestamp - 0, // 2: connectca.ConnectCAService.WatchRoots:input_type -> connectca.WatchRootsRequest - 3, // 3: connectca.ConnectCAService.Sign:input_type -> connectca.SignRequest - 1, // 4: connectca.ConnectCAService.WatchRoots:output_type -> connectca.WatchRootsResponse - 4, // 5: connectca.ConnectCAService.Sign:output_type -> connectca.SignResponse + 2, // 0: hashicorp.consul.connectca.WatchRootsResponse.roots:type_name -> hashicorp.consul.connectca.CARoot + 5, // 1: hashicorp.consul.connectca.CARoot.rotated_out_at:type_name -> google.protobuf.Timestamp + 0, // 2: hashicorp.consul.connectca.ConnectCAService.WatchRoots:input_type -> hashicorp.consul.connectca.WatchRootsRequest + 3, // 3: hashicorp.consul.connectca.ConnectCAService.Sign:input_type -> hashicorp.consul.connectca.SignRequest + 1, // 4: hashicorp.consul.connectca.ConnectCAService.WatchRoots:output_type -> hashicorp.consul.connectca.WatchRootsResponse + 4, // 5: hashicorp.consul.connectca.ConnectCAService.Sign:output_type -> hashicorp.consul.connectca.SignResponse 4, // [4:6] is the sub-list for method output_type 2, // [2:4] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name diff --git a/proto-public/pbconnectca/ca.proto b/proto-public/pbconnectca/ca.proto index f956a80c5..9aba55198 100644 --- a/proto-public/pbconnectca/ca.proto +++ b/proto-public/pbconnectca/ca.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package connectca; +package hashicorp.consul.connectca; import "google/protobuf/timestamp.proto"; diff --git a/proto-public/pbconnectca/ca_grpc.pb.go b/proto-public/pbconnectca/ca_grpc.pb.go index b23cf32f2..58669becb 100644 --- a/proto-public/pbconnectca/ca_grpc.pb.go +++ b/proto-public/pbconnectca/ca_grpc.pb.go @@ -40,7 +40,7 @@ func NewConnectCAServiceClient(cc grpc.ClientConnInterface) ConnectCAServiceClie } func (c *connectCAServiceClient) WatchRoots(ctx context.Context, in *WatchRootsRequest, opts ...grpc.CallOption) (ConnectCAService_WatchRootsClient, error) { - stream, err := c.cc.NewStream(ctx, &ConnectCAService_ServiceDesc.Streams[0], "/connectca.ConnectCAService/WatchRoots", opts...) + stream, err := c.cc.NewStream(ctx, &ConnectCAService_ServiceDesc.Streams[0], "/hashicorp.consul.connectca.ConnectCAService/WatchRoots", opts...) if err != nil { return nil, err } @@ -73,7 +73,7 @@ func (x *connectCAServiceWatchRootsClient) Recv() (*WatchRootsResponse, error) { func (c *connectCAServiceClient) Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) { out := new(SignResponse) - err := c.cc.Invoke(ctx, "/connectca.ConnectCAService/Sign", in, out, opts...) + err := c.cc.Invoke(ctx, "/hashicorp.consul.connectca.ConnectCAService/Sign", in, out, opts...) if err != nil { return nil, err } @@ -146,7 +146,7 @@ func _ConnectCAService_Sign_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/connectca.ConnectCAService/Sign", + FullMethod: "/hashicorp.consul.connectca.ConnectCAService/Sign", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ConnectCAServiceServer).Sign(ctx, req.(*SignRequest)) @@ -158,7 +158,7 @@ func _ConnectCAService_Sign_Handler(srv interface{}, ctx context.Context, dec fu // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var ConnectCAService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "connectca.ConnectCAService", + ServiceName: "hashicorp.consul.connectca.ConnectCAService", HandlerType: (*ConnectCAServiceServer)(nil), Methods: []grpc.MethodDesc{ { diff --git a/proto-public/pbdataplane/dataplane.pb.go b/proto-public/pbdataplane/dataplane.pb.go index 4f9346b3f..1da1eea15 100644 --- a/proto-public/pbdataplane/dataplane.pb.go +++ b/proto-public/pbdataplane/dataplane.pb.go @@ -193,7 +193,7 @@ type DataplaneFeatureSupport struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - FeatureName DataplaneFeatures `protobuf:"varint,1,opt,name=feature_name,json=featureName,proto3,enum=dataplane.DataplaneFeatures" json:"feature_name,omitempty"` + FeatureName DataplaneFeatures `protobuf:"varint,1,opt,name=feature_name,json=featureName,proto3,enum=hashicorp.consul.dataplane.DataplaneFeatures" json:"feature_name,omitempty"` Supported bool `protobuf:"varint,2,opt,name=supported,proto3" json:"supported,omitempty"` } @@ -400,7 +400,7 @@ type GetEnvoyBootstrapParamsResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ServiceKind ServiceKind `protobuf:"varint,1,opt,name=service_kind,json=serviceKind,proto3,enum=dataplane.ServiceKind" json:"service_kind,omitempty"` + ServiceKind ServiceKind `protobuf:"varint,1,opt,name=service_kind,json=serviceKind,proto3,enum=hashicorp.consul.dataplane.ServiceKind" json:"service_kind,omitempty"` // The destination service name Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` @@ -488,110 +488,124 @@ var File_proto_public_pbdataplane_dataplane_proto protoreflect.FileDescriptor var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{ 0x0a, 0x28, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x64, 0x61, 0x74, 0x61, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x26, 0x0a, 0x24, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x78, 0x0a, 0x17, 0x44, - 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x3f, 0x0a, 0x0c, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x64, - 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x0b, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x64, 0x0a, 0x1c, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x1a, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x26, 0x0a, 0x24, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xc2, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, - 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x6f, 0x64, - 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, - 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x0b, 0x0a, - 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x83, 0x02, 0x0a, 0x1f, 0x47, - 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, - 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x1e, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, - 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2a, 0xc7, 0x01, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, - 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x44, 0x41, - 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, - 0x5f, 0x57, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x53, 0x10, 0x01, - 0x12, 0x32, 0x0a, 0x2e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, - 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x44, 0x47, 0x45, 0x5f, 0x43, 0x45, 0x52, 0x54, - 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x4d, 0x45, - 0x4e, 0x54, 0x10, 0x02, 0x12, 0x34, 0x0a, 0x30, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, - 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x4e, 0x56, 0x4f, 0x59, - 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, - 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a, 0xcc, 0x01, 0x0a, 0x0b, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, - 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, 0x56, - 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x49, 0x43, 0x41, 0x4c, - 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, - 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, - 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, - 0x4e, 0x44, 0x5f, 0x4d, 0x45, 0x53, 0x48, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, - 0x03, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, - 0x44, 0x5f, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x41, - 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x53, 0x45, 0x52, 0x56, 0x49, - 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, - 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x05, 0x32, 0x8d, 0x02, 0x0a, 0x10, 0x44, 0x61, - 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x84, - 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, - 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x2f, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x89, 0x01, 0x0a, + 0x17, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x50, 0x0a, 0x0c, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x0b, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, + 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x22, 0x9e, 0x01, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x30, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, - 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0x29, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, - 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x64, 0x61, - 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, - 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x99, 0x01, 0x0a, 0x0d, 0x63, 0x6f, - 0x6d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x0e, 0x44, 0x61, 0x74, - 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0xa2, 0x02, 0x03, 0x44, 0x58, 0x58, 0xaa, 0x02, 0x09, 0x44, 0x61, 0x74, 0x61, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xca, 0x02, 0x09, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0xe2, 0x02, 0x15, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x5c, 0x47, 0x50, - 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x09, 0x44, 0x61, 0x74, 0x61, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x75, 0x0a, 0x1c, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x1a, 0x73, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xc2, 0x01, 0x0a, 0x1e, 0x47, 0x65, + 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x07, + 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x6e, 0x6f, + 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x94, + 0x02, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, + 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6b, 0x69, + 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, + 0x64, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, + 0x6e, 0x74, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a, 0xc7, 0x01, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x44, + 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, + 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x24, 0x0a, 0x20, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, + 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x57, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x32, 0x0a, 0x2e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, + 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x44, 0x47, 0x45, + 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, + 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x34, 0x0a, 0x30, 0x44, 0x41, 0x54, + 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, + 0x45, 0x4e, 0x56, 0x4f, 0x59, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, + 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a, + 0xcc, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, + 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, + 0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x59, + 0x50, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, + 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, + 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49, + 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4d, 0x45, 0x53, 0x48, 0x5f, 0x47, 0x41, 0x54, + 0x45, 0x57, 0x41, 0x59, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, + 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49, + 0x4e, 0x47, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x47, + 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x05, 0x32, 0xd2, + 0x02, 0x0a, 0x10, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x40, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, + 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x94, 0x01, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x42, 0xf0, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xa2, 0x02, + 0x03, 0x48, 0x43, 0x44, 0xaa, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0xca, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xe2, 0x02, + 0x26, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x44, 0x61, 0x74, + 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -609,24 +623,24 @@ func file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP() []byte { var file_proto_public_pbdataplane_dataplane_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_proto_public_pbdataplane_dataplane_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_proto_public_pbdataplane_dataplane_proto_goTypes = []interface{}{ - (DataplaneFeatures)(0), // 0: dataplane.DataplaneFeatures - (ServiceKind)(0), // 1: dataplane.ServiceKind - (*GetSupportedDataplaneFeaturesRequest)(nil), // 2: dataplane.GetSupportedDataplaneFeaturesRequest - (*DataplaneFeatureSupport)(nil), // 3: dataplane.DataplaneFeatureSupport - (*GetSupportedDataplaneFeaturesResponse)(nil), // 4: dataplane.GetSupportedDataplaneFeaturesResponse - (*GetEnvoyBootstrapParamsRequest)(nil), // 5: dataplane.GetEnvoyBootstrapParamsRequest - (*GetEnvoyBootstrapParamsResponse)(nil), // 6: dataplane.GetEnvoyBootstrapParamsResponse + (DataplaneFeatures)(0), // 0: hashicorp.consul.dataplane.DataplaneFeatures + (ServiceKind)(0), // 1: hashicorp.consul.dataplane.ServiceKind + (*GetSupportedDataplaneFeaturesRequest)(nil), // 2: hashicorp.consul.dataplane.GetSupportedDataplaneFeaturesRequest + (*DataplaneFeatureSupport)(nil), // 3: hashicorp.consul.dataplane.DataplaneFeatureSupport + (*GetSupportedDataplaneFeaturesResponse)(nil), // 4: hashicorp.consul.dataplane.GetSupportedDataplaneFeaturesResponse + (*GetEnvoyBootstrapParamsRequest)(nil), // 5: hashicorp.consul.dataplane.GetEnvoyBootstrapParamsRequest + (*GetEnvoyBootstrapParamsResponse)(nil), // 6: hashicorp.consul.dataplane.GetEnvoyBootstrapParamsResponse (*structpb.Struct)(nil), // 7: google.protobuf.Struct } var file_proto_public_pbdataplane_dataplane_proto_depIdxs = []int32{ - 0, // 0: dataplane.DataplaneFeatureSupport.feature_name:type_name -> dataplane.DataplaneFeatures - 3, // 1: dataplane.GetSupportedDataplaneFeaturesResponse.supported_dataplane_features:type_name -> dataplane.DataplaneFeatureSupport - 1, // 2: dataplane.GetEnvoyBootstrapParamsResponse.service_kind:type_name -> dataplane.ServiceKind - 7, // 3: dataplane.GetEnvoyBootstrapParamsResponse.config:type_name -> google.protobuf.Struct - 2, // 4: dataplane.DataplaneService.GetSupportedDataplaneFeatures:input_type -> dataplane.GetSupportedDataplaneFeaturesRequest - 5, // 5: dataplane.DataplaneService.GetEnvoyBootstrapParams:input_type -> dataplane.GetEnvoyBootstrapParamsRequest - 4, // 6: dataplane.DataplaneService.GetSupportedDataplaneFeatures:output_type -> dataplane.GetSupportedDataplaneFeaturesResponse - 6, // 7: dataplane.DataplaneService.GetEnvoyBootstrapParams:output_type -> dataplane.GetEnvoyBootstrapParamsResponse + 0, // 0: hashicorp.consul.dataplane.DataplaneFeatureSupport.feature_name:type_name -> hashicorp.consul.dataplane.DataplaneFeatures + 3, // 1: hashicorp.consul.dataplane.GetSupportedDataplaneFeaturesResponse.supported_dataplane_features:type_name -> hashicorp.consul.dataplane.DataplaneFeatureSupport + 1, // 2: hashicorp.consul.dataplane.GetEnvoyBootstrapParamsResponse.service_kind:type_name -> hashicorp.consul.dataplane.ServiceKind + 7, // 3: hashicorp.consul.dataplane.GetEnvoyBootstrapParamsResponse.config:type_name -> google.protobuf.Struct + 2, // 4: hashicorp.consul.dataplane.DataplaneService.GetSupportedDataplaneFeatures:input_type -> hashicorp.consul.dataplane.GetSupportedDataplaneFeaturesRequest + 5, // 5: hashicorp.consul.dataplane.DataplaneService.GetEnvoyBootstrapParams:input_type -> hashicorp.consul.dataplane.GetEnvoyBootstrapParamsRequest + 4, // 6: hashicorp.consul.dataplane.DataplaneService.GetSupportedDataplaneFeatures:output_type -> hashicorp.consul.dataplane.GetSupportedDataplaneFeaturesResponse + 6, // 7: hashicorp.consul.dataplane.DataplaneService.GetEnvoyBootstrapParams:output_type -> hashicorp.consul.dataplane.GetEnvoyBootstrapParamsResponse 6, // [6:8] is the sub-list for method output_type 4, // [4:6] is the sub-list for method input_type 4, // [4:4] is the sub-list for extension type_name diff --git a/proto-public/pbdataplane/dataplane.proto b/proto-public/pbdataplane/dataplane.proto index f3ac2c2ba..0502dcd70 100644 --- a/proto-public/pbdataplane/dataplane.proto +++ b/proto-public/pbdataplane/dataplane.proto @@ -2,7 +2,7 @@ syntax = "proto3"; -package dataplane; +package hashicorp.consul.dataplane; import "google/protobuf/struct.proto"; diff --git a/proto-public/pbdataplane/dataplane_grpc.pb.go b/proto-public/pbdataplane/dataplane_grpc.pb.go index 56ceed000..353326d7a 100644 --- a/proto-public/pbdataplane/dataplane_grpc.pb.go +++ b/proto-public/pbdataplane/dataplane_grpc.pb.go @@ -36,7 +36,7 @@ func NewDataplaneServiceClient(cc grpc.ClientConnInterface) DataplaneServiceClie func (c *dataplaneServiceClient) GetSupportedDataplaneFeatures(ctx context.Context, in *GetSupportedDataplaneFeaturesRequest, opts ...grpc.CallOption) (*GetSupportedDataplaneFeaturesResponse, error) { out := new(GetSupportedDataplaneFeaturesResponse) - err := c.cc.Invoke(ctx, "/dataplane.DataplaneService/GetSupportedDataplaneFeatures", in, out, opts...) + err := c.cc.Invoke(ctx, "/hashicorp.consul.dataplane.DataplaneService/GetSupportedDataplaneFeatures", in, out, opts...) if err != nil { return nil, err } @@ -45,7 +45,7 @@ func (c *dataplaneServiceClient) GetSupportedDataplaneFeatures(ctx context.Conte func (c *dataplaneServiceClient) GetEnvoyBootstrapParams(ctx context.Context, in *GetEnvoyBootstrapParamsRequest, opts ...grpc.CallOption) (*GetEnvoyBootstrapParamsResponse, error) { out := new(GetEnvoyBootstrapParamsResponse) - err := c.cc.Invoke(ctx, "/dataplane.DataplaneService/GetEnvoyBootstrapParams", in, out, opts...) + err := c.cc.Invoke(ctx, "/hashicorp.consul.dataplane.DataplaneService/GetEnvoyBootstrapParams", in, out, opts...) if err != nil { return nil, err } @@ -92,7 +92,7 @@ func _DataplaneService_GetSupportedDataplaneFeatures_Handler(srv interface{}, ct } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dataplane.DataplaneService/GetSupportedDataplaneFeatures", + FullMethod: "/hashicorp.consul.dataplane.DataplaneService/GetSupportedDataplaneFeatures", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DataplaneServiceServer).GetSupportedDataplaneFeatures(ctx, req.(*GetSupportedDataplaneFeaturesRequest)) @@ -110,7 +110,7 @@ func _DataplaneService_GetEnvoyBootstrapParams_Handler(srv interface{}, ctx cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dataplane.DataplaneService/GetEnvoyBootstrapParams", + FullMethod: "/hashicorp.consul.dataplane.DataplaneService/GetEnvoyBootstrapParams", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DataplaneServiceServer).GetEnvoyBootstrapParams(ctx, req.(*GetEnvoyBootstrapParamsRequest)) @@ -122,7 +122,7 @@ func _DataplaneService_GetEnvoyBootstrapParams_Handler(srv interface{}, ctx cont // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var DataplaneService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "dataplane.DataplaneService", + ServiceName: "hashicorp.consul.dataplane.DataplaneService", HandlerType: (*DataplaneServiceServer)(nil), Methods: []grpc.MethodDesc{ { diff --git a/proto-public/pbserverdiscovery/serverdiscovery.pb.go b/proto-public/pbserverdiscovery/serverdiscovery.pb.go index b9db1db2e..050158d38 100644 --- a/proto-public/pbserverdiscovery/serverdiscovery.pb.go +++ b/proto-public/pbserverdiscovery/serverdiscovery.pb.go @@ -192,41 +192,50 @@ var file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDesc = []byte{ 0x0a, 0x34, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, - 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x22, 0x27, 0x0a, 0x13, 0x57, 0x61, 0x74, 0x63, 0x68, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, - 0x0a, 0x03, 0x77, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x77, 0x61, 0x6e, - 0x22, 0x49, 0x0a, 0x14, 0x57, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x22, 0x4c, 0x0a, 0x06, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x79, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x0c, 0x57, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, - 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x57, 0x61, 0x74, 0x63, - 0x68, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x30, 0x01, 0x42, 0xc3, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x42, 0x14, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, - 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, - 0xa2, 0x02, 0x03, 0x53, 0x58, 0x58, 0xaa, 0x02, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, - 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0xca, 0x02, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0xe2, 0x02, 0x1b, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5c, 0x47, 0x50, 0x42, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x22, 0x27, 0x0a, 0x13, 0x57, 0x61, 0x74, 0x63, + 0x68, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x77, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x77, 0x61, + 0x6e, 0x22, 0x5a, 0x0a, 0x14, 0x57, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x22, 0x4c, 0x0a, + 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x9c, 0x01, 0x0a, 0x16, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x0c, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x9a, 0x02, 0x0a, 0x24, 0x63, + 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x42, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x53, 0xaa, 0x02, 0x20, + 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0xca, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x5c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0xe2, 0x02, 0x2c, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, + 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0xea, 0x02, 0x22, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, + 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -243,14 +252,14 @@ func file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescGZIP() []b var file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_proto_public_pbserverdiscovery_serverdiscovery_proto_goTypes = []interface{}{ - (*WatchServersRequest)(nil), // 0: serverdiscovery.WatchServersRequest - (*WatchServersResponse)(nil), // 1: serverdiscovery.WatchServersResponse - (*Server)(nil), // 2: serverdiscovery.Server + (*WatchServersRequest)(nil), // 0: hashicorp.consul.serverdiscovery.WatchServersRequest + (*WatchServersResponse)(nil), // 1: hashicorp.consul.serverdiscovery.WatchServersResponse + (*Server)(nil), // 2: hashicorp.consul.serverdiscovery.Server } var file_proto_public_pbserverdiscovery_serverdiscovery_proto_depIdxs = []int32{ - 2, // 0: serverdiscovery.WatchServersResponse.servers:type_name -> serverdiscovery.Server - 0, // 1: serverdiscovery.ServerDiscoveryService.WatchServers:input_type -> serverdiscovery.WatchServersRequest - 1, // 2: serverdiscovery.ServerDiscoveryService.WatchServers:output_type -> serverdiscovery.WatchServersResponse + 2, // 0: hashicorp.consul.serverdiscovery.WatchServersResponse.servers:type_name -> hashicorp.consul.serverdiscovery.Server + 0, // 1: hashicorp.consul.serverdiscovery.ServerDiscoveryService.WatchServers:input_type -> hashicorp.consul.serverdiscovery.WatchServersRequest + 1, // 2: hashicorp.consul.serverdiscovery.ServerDiscoveryService.WatchServers:output_type -> hashicorp.consul.serverdiscovery.WatchServersResponse 2, // [2:3] is the sub-list for method output_type 1, // [1:2] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name diff --git a/proto-public/pbserverdiscovery/serverdiscovery.proto b/proto-public/pbserverdiscovery/serverdiscovery.proto index 4af307e2f..d6df10bc1 100644 --- a/proto-public/pbserverdiscovery/serverdiscovery.proto +++ b/proto-public/pbserverdiscovery/serverdiscovery.proto @@ -3,7 +3,7 @@ syntax = "proto3"; -package serverdiscovery; +package hashicorp.consul.serverdiscovery; service ServerDiscoveryService { // WatchServers will stream back sets of ready servers as they change such as diff --git a/proto-public/pbserverdiscovery/serverdiscovery_grpc.pb.go b/proto-public/pbserverdiscovery/serverdiscovery_grpc.pb.go index 57718577d..bb75ee84e 100644 --- a/proto-public/pbserverdiscovery/serverdiscovery_grpc.pb.go +++ b/proto-public/pbserverdiscovery/serverdiscovery_grpc.pb.go @@ -38,7 +38,7 @@ func NewServerDiscoveryServiceClient(cc grpc.ClientConnInterface) ServerDiscover } func (c *serverDiscoveryServiceClient) WatchServers(ctx context.Context, in *WatchServersRequest, opts ...grpc.CallOption) (ServerDiscoveryService_WatchServersClient, error) { - stream, err := c.cc.NewStream(ctx, &ServerDiscoveryService_ServiceDesc.Streams[0], "/serverdiscovery.ServerDiscoveryService/WatchServers", opts...) + stream, err := c.cc.NewStream(ctx, &ServerDiscoveryService_ServiceDesc.Streams[0], "/hashicorp.consul.serverdiscovery.ServerDiscoveryService/WatchServers", opts...) if err != nil { return nil, err } @@ -124,7 +124,7 @@ func (x *serverDiscoveryServiceWatchServersServer) Send(m *WatchServersResponse) // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var ServerDiscoveryService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "serverdiscovery.ServerDiscoveryService", + ServiceName: "hashicorp.consul.serverdiscovery.ServerDiscoveryService", HandlerType: (*ServerDiscoveryServiceServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ From 7bd55578cc442e93f4ff9be42d8d473616241010 Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Fri, 22 Jul 2022 12:05:08 -0700 Subject: [PATCH 067/107] peering: emit exported services count metric (#13811) Signed-off-by: acpana <8968914+acpana@users.noreply.github.com> --- agent/consul/leader_peering.go | 67 +++++++++ agent/consul/leader_peering_test.go | 134 +++++++++++++++++- agent/consul/server.go | 1 + .../services/peerstream/stream_resources.go | 4 +- agent/setup.go | 3 +- logging/names.go | 1 + 6 files changed, 206 insertions(+), 4 deletions(-) diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index 28a8397df..3288a141a 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -8,6 +8,8 @@ import ( "fmt" "time" + "github.com/armon/go-metrics" + "github.com/armon/go-metrics/prometheus" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-multierror" @@ -27,8 +29,72 @@ import ( "github.com/hashicorp/consul/proto/pbpeerstream" ) +var leaderExportedServicesCountKey = []string{"consul", "peering", "exported_services"} +var LeaderPeeringMetrics = []prometheus.GaugeDefinition{ + { + Name: leaderExportedServicesCountKey, + Help: "A gauge that tracks how many services are exported for the peering. " + + "The labels are \"peering\" and, for enterprise, \"partition\". " + + "We emit this metric every 9 seconds", + }, +} + func (s *Server) startPeeringStreamSync(ctx context.Context) { s.leaderRoutineManager.Start(ctx, peeringStreamsRoutineName, s.runPeeringSync) + s.leaderRoutineManager.Start(ctx, peeringStreamsMetricsRoutineName, s.runPeeringMetrics) +} + +func (s *Server) runPeeringMetrics(ctx context.Context) error { + ticker := time.NewTicker(s.config.MetricsReportingInterval) + defer ticker.Stop() + + logger := s.logger.Named(logging.PeeringMetrics) + defaultMetrics := metrics.Default + + for { + select { + case <-ctx.Done(): + logger.Info("stopping peering metrics") + + // "Zero-out" the metric on exit so that when prometheus scrapes this + // metric from a non-leader, it does not get a stale value. + metrics.SetGauge(leaderExportedServicesCountKey, float32(0)) + return nil + case <-ticker.C: + if err := s.emitPeeringMetricsOnce(logger, defaultMetrics()); err != nil { + s.logger.Error("error emitting peering stream metrics", "error", err) + } + } + } +} + +func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metrics.Metrics) error { + _, peers, err := s.fsm.State().PeeringList(nil, *structs.NodeEnterpriseMetaInPartition(structs.WildcardSpecifier)) + if err != nil { + return err + } + + for _, peer := range peers { + status, found := s.peerStreamServer.StreamStatus(peer.ID) + if !found { + logger.Trace("did not find status for", "peer_name", peer.Name) + continue + } + + esc := status.GetExportedServicesCount() + part := peer.Partition + labels := []metrics.Label{ + {Name: "peer_name", Value: peer.Name}, + {Name: "peer_id", Value: peer.ID}, + } + if part != "" { + labels = append(labels, metrics.Label{Name: "partition", Value: part}) + } + + metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels) + } + + return nil } func (s *Server) runPeeringSync(ctx context.Context) error { @@ -51,6 +117,7 @@ func (s *Server) runPeeringSync(ctx context.Context) error { func (s *Server) stopPeeringStreamSync() { // will be a no-op when not started s.leaderRoutineManager.Stop(peeringStreamsRoutineName) + s.leaderRoutineManager.Stop(peeringStreamsMetricsRoutineName) } // syncPeeringsAndBlock is a long-running goroutine that is responsible for watching diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index c3196a54e..06cbda43d 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -4,10 +4,12 @@ import ( "context" "encoding/base64" "encoding/json" + "fmt" "io/ioutil" "testing" "time" + "github.com/armon/go-metrics" "github.com/stretchr/testify/require" "google.golang.org/grpc" @@ -615,7 +617,7 @@ func insertTestPeeringData(t *testing.T, store *state.Store, peer string, lastId return lastIdx } -// TODO(peering): once we move away from leader only request for PeeringList, move this test to consul/server_test maybe +// TODO(peering): once we move away from keeping state in stream tracker only on leaders, move this test to consul/server_test maybe func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -904,3 +906,133 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) { }) } } + +// TODO(peering): once we move away from keeping state in stream tracker only on leaders, move this test to consul/server_test maybe +func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + var ( + s2PeerID1 = generateUUID() + s2PeerID2 = generateUUID() + testContextTimeout = 60 * time.Second + lastIdx = uint64(0) + ) + + // TODO(peering): Configure with TLS + _, s1 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s1.dc1" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + }) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create a peering by generating a token + ctx, cancel := context.WithTimeout(context.Background(), testContextTimeout) + t.Cleanup(cancel) + + conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + req := pbpeering.GenerateTokenRequest{ + PeerName: "my-peer-s2", + } + resp, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + // Bring up s2 and store s1's token so that it attempts to dial. + _, s2 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s2.dc2" + c.Datacenter = "dc2" + c.PrimaryDatacenter = "dc2" + }) + testrpc.WaitForLeader(t, s2.RPC, "dc2") + + // Simulate exporting services in the tracker + { + // Simulate a peering initiation event by writing a peering with data from a peering token. + // Eventually the leader in dc2 should dial and connect to the leader in dc1. + p := &pbpeering.Peering{ + ID: s2PeerID1, + Name: "my-peer-s1", + PeerID: token.PeerID, + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p.ShouldDial()) + lastIdx++ + require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, p)) + + p2 := &pbpeering.Peering{ + ID: s2PeerID2, + Name: "my-peer-s3", + PeerID: token.PeerID, // doesn't much matter what these values are + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p2.ShouldDial()) + lastIdx++ + require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, p2)) + + // connect the stream + mst1, err := s2.peeringServer.Tracker.Connected(s2PeerID1) + require.NoError(t, err) + + // mimic tracking exported services + mst1.TrackExportedService(structs.ServiceName{Name: "a-service"}) + mst1.TrackExportedService(structs.ServiceName{Name: "b-service"}) + mst1.TrackExportedService(structs.ServiceName{Name: "c-service"}) + + // connect the stream + mst2, err := s2.peeringServer.Tracker.Connected(s2PeerID2) + require.NoError(t, err) + + // mimic tracking exported services + mst2.TrackExportedService(structs.ServiceName{Name: "d-service"}) + mst2.TrackExportedService(structs.ServiceName{Name: "e-service"}) + } + + // set up a metrics sink + sink := metrics.NewInmemSink(testContextTimeout, testContextTimeout) + cfg := metrics.DefaultConfig("us-west") + cfg.EnableHostname = false + met, err := metrics.New(cfg, sink) + require.NoError(t, err) + + errM := s2.emitPeeringMetricsOnce(s2.logger, met) + require.NoError(t, errM) + + retry.Run(t, func(r *retry.R) { + intervals := sink.Data() + require.Len(r, intervals, 1) + intv := intervals[0] + + // the keys for a Gauge value look like: {serviceName}.{prefix}.{key_name};{label=value};... + keyMetric1 := fmt.Sprintf("us-west.consul.peering.exported_services;peer_name=my-peer-s1;peer_id=%s", s2PeerID1) + metric1, ok := intv.Gauges[keyMetric1] + require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric1)) + + require.Equal(r, float32(3), metric1.Value) // for a, b, c services + + keyMetric2 := fmt.Sprintf("us-west.consul.peering.exported_services;peer_name=my-peer-s3;peer_id=%s", s2PeerID2) + metric2, ok := intv.Gauges[keyMetric2] + require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric2)) + + require.Equal(r, float32(2), metric2.Value) // for d, e services + }) +} diff --git a/agent/consul/server.go b/agent/consul/server.go index d4753bb3f..3c240c5f7 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -127,6 +127,7 @@ const ( virtualIPCheckRoutineName = "virtual IP version check" peeringStreamsRoutineName = "streaming peering resources" peeringDeletionRoutineName = "peering deferred deletion" + peeringStreamsMetricsRoutineName = "metrics for streaming peering resources" ) var ( diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index 3d10cdfa0..5702f0e13 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -539,8 +539,8 @@ func getTrustDomain(store StateStore, logger hclog.Logger) (string, error) { return connect.SpiffeIDSigningForCluster(cfg.ClusterID).Host(), nil } -func (s *Server) StreamStatus(peer string) (resp Status, found bool) { - return s.Tracker.StreamStatus(peer) +func (s *Server) StreamStatus(peerID string) (resp Status, found bool) { + return s.Tracker.StreamStatus(peerID) } // ConnectedStreams returns a map of connected stream IDs to the corresponding channel for tearing them down. diff --git a/agent/setup.go b/agent/setup.go index 9ac506ab6..c32960518 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -231,7 +231,8 @@ func getPrometheusDefs(cfg lib.TelemetryConfig, isServer bool) ([]prometheus.Gau if isServer { gauges = append(gauges, consul.AutopilotGauges, - consul.LeaderCertExpirationGauges) + consul.LeaderCertExpirationGauges, + consul.LeaderPeeringMetrics) } // Flatten definitions diff --git a/logging/names.go b/logging/names.go index 5fd904c7f..7f5e2bf60 100644 --- a/logging/names.go +++ b/logging/names.go @@ -51,6 +51,7 @@ const ( Snapshot string = "snapshot" Partition string = "partition" Peering string = "peering" + PeeringMetrics string = "peering_metrics" TerminatingGateway string = "terminating_gateway" TLSUtil string = "tlsutil" Transaction string = "txn" From 12858f4f90b825f5cabf0e69406c52741bcd79cf Mon Sep 17 00:00:00 2001 From: NicoletaPopoviciu <87660255+NicoletaPopoviciu@users.noreply.github.com> Date: Fri, 22 Jul 2022 16:26:31 -0400 Subject: [PATCH 068/107] docs: Updates k8s annotation docs (#13809) * Updates k8s annotation docs Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu --- website/content/docs/k8s/annotations-and-labels.mdx | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/website/content/docs/k8s/annotations-and-labels.mdx b/website/content/docs/k8s/annotations-and-labels.mdx index 2eba477f0..d67aa7297 100644 --- a/website/content/docs/k8s/annotations-and-labels.mdx +++ b/website/content/docs/k8s/annotations-and-labels.mdx @@ -233,6 +233,16 @@ The following Kubernetes resource annotations could be used on a pod to control - `consul.hashicorp.com/service-metrics-port` - Set the port where the Connect service exposes metrics. - `consul.hashicorp.com/service-metrics-path` - Set the path where the Connect service exposes metrics. - `consul.hashicorp.com/connect-inject-mount-volume` - Comma separated list of container names to mount the connect-inject volume into. The volume will be mounted at `/consul/connect-inject`. The connect-inject volume contains Consul internals data needed by the other sidecar containers, for example the `consul` binary, and the Pod's Consul ACL token. This data can be valuable for advanced use-cases, such as making requests to the Consul API from within application containers. +- `consul.hashicorp.com/consul-sidecar-user-volume` - JSON objects as specified by the [Volume pod spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core), that define volumes to add to the Envoy sidecar. + ```yaml + annotations: + "consul.hashicorp.com/consul-sidecar-user-volume": "[{\"name\": \"secrets-data\", \"hostPath\": "[{\"path\": \"/mnt/secrets-path\"}]"}]" + ``` +- `consul.hashicorp.com/consul-sidecar-user-volume-mount` - JSON objects as specified by the [Volume mount pod spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volumemount-v1-core), that define volumeMounts to add to the Envoy sidecar. + ```yaml + annotations: + "consul.hashicorp.com/consul-sidecar-user-volume-mount": "[{\"name\": \"secrets-store-mount\", \"mountPath\": \"/mnt/secrets-store\"}]" + ``` ## Labels From 922592d6bb0670c8913d874bb5eb406aacf88fd9 Mon Sep 17 00:00:00 2001 From: Freddy Date: Fri, 22 Jul 2022 14:42:23 -0600 Subject: [PATCH 069/107] [OSS] Add new peering ACL rule (#13848) This commit adds a new ACL rule named "peering" to authorize actions taken against peering-related endpoints. The "peering" rule has several key properties: - It is scoped to a partition, and MUST be defined in the default namespace. - Its access level must be "read', "write", or "deny". - Granting an access level will apply to all peerings. This ACL rule cannot be used to selective grant access to some peerings but not others. - If the peering rule is not specified, we fall back to the "operator" rule and then the default ACL rule. --- acl/acl_test.go | 344 +++++++++++++++++++++++++++++++++ acl/authorizer.go | 14 +- acl/authorizer_test.go | 26 ++- acl/chained_authorizer.go | 16 ++ acl/chained_authorizer_test.go | 16 ++ acl/policy.go | 7 + acl/policy_authorizer.go | 31 +++ acl/policy_authorizer_test.go | 2 + acl/policy_merger.go | 15 +- acl/policy_test.go | 122 ++++++++---- acl/static_authorizer.go | 14 ++ agent/acl_endpoint_test.go | 18 ++ agent/structs/acl.go | 1 + 13 files changed, 574 insertions(+), 52 deletions(-) diff --git a/acl/acl_test.go b/acl/acl_test.go index 3ce0fa59b..fae37e5a6 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -27,6 +27,7 @@ func legacyPolicy(policy *Policy) *Policy { Keyring: policy.Keyring, Operator: policy.Operator, Mesh: policy.Mesh, + Peering: policy.Peering, }, } } @@ -117,6 +118,14 @@ func checkAllowMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx * require.Equal(t, Allow, authz.MeshWrite(entCtx)) } +func checkAllowPeeringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Allow, authz.PeeringRead(entCtx)) +} + +func checkAllowPeeringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Allow, authz.PeeringWrite(entCtx)) +} + func checkAllowOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Allow, authz.OperatorRead(entCtx)) } @@ -241,6 +250,14 @@ func checkDenyMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx *A require.Equal(t, Deny, authz.MeshWrite(entCtx)) } +func checkDenyPeeringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Deny, authz.PeeringRead(entCtx)) +} + +func checkDenyPeeringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Deny, authz.PeeringWrite(entCtx)) +} + func checkDenyOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Deny, authz.OperatorRead(entCtx)) } @@ -365,6 +382,14 @@ func checkDefaultMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx require.Equal(t, Default, authz.MeshWrite(entCtx)) } +func checkDefaultPeeringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Default, authz.PeeringRead(entCtx)) +} + +func checkDefaultPeeringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Default, authz.PeeringWrite(entCtx)) +} + func checkDefaultOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Default, authz.OperatorRead(entCtx)) } @@ -446,6 +471,8 @@ func TestACL(t *testing.T) { {name: "DenyNodeWrite", check: checkDenyNodeWrite}, {name: "DenyMeshRead", check: checkDenyMeshRead}, {name: "DenyMeshWrite", check: checkDenyMeshWrite}, + {name: "DenyPeeringRead", check: checkDenyPeeringRead}, + {name: "DenyPeeringWrite", check: checkDenyPeeringWrite}, {name: "DenyOperatorRead", check: checkDenyOperatorRead}, {name: "DenyOperatorWrite", check: checkDenyOperatorWrite}, {name: "DenyPreparedQueryRead", check: checkDenyPreparedQueryRead}, @@ -480,6 +507,8 @@ func TestACL(t *testing.T) { {name: "AllowNodeWrite", check: checkAllowNodeWrite}, {name: "AllowMeshRead", check: checkAllowMeshRead}, {name: "AllowMeshWrite", check: checkAllowMeshWrite}, + {name: "AllowPeeringRead", check: checkAllowPeeringRead}, + {name: "AllowPeeringWrite", check: checkAllowPeeringWrite}, {name: "AllowOperatorRead", check: checkAllowOperatorRead}, {name: "AllowOperatorWrite", check: checkAllowOperatorWrite}, {name: "AllowPreparedQueryRead", check: checkAllowPreparedQueryRead}, @@ -514,6 +543,8 @@ func TestACL(t *testing.T) { {name: "AllowNodeWrite", check: checkAllowNodeWrite}, {name: "AllowMeshRead", check: checkAllowMeshRead}, {name: "AllowMeshWrite", check: checkAllowMeshWrite}, + {name: "AllowPeeringRead", check: checkAllowPeeringRead}, + {name: "AllowPeeringWrite", check: checkAllowPeeringWrite}, {name: "AllowOperatorRead", check: checkAllowOperatorRead}, {name: "AllowOperatorWrite", check: checkAllowOperatorWrite}, {name: "AllowPreparedQueryRead", check: checkAllowPreparedQueryRead}, @@ -1217,6 +1248,319 @@ func TestACL(t *testing.T) { {name: "WriteAllowed", check: checkAllowMeshWrite}, }, }, + { + name: "PeeringDefaultAllowPolicyDeny", + defaultPolicy: AllowAll(), + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Peering: PolicyDeny, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadDenied", check: checkDenyPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + name: "PeeringDefaultAllowPolicyRead", + defaultPolicy: AllowAll(), + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Peering: PolicyRead, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + name: "PeeringDefaultAllowPolicyWrite", + defaultPolicy: AllowAll(), + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Peering: PolicyWrite, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteAllowed", check: checkAllowPeeringWrite}, + }, + }, + { + name: "PeeringDefaultAllowPolicyNone", + defaultPolicy: AllowAll(), + policyStack: []*Policy{ + {}, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteAllowed", check: checkAllowPeeringWrite}, + }, + }, + { + name: "PeeringDefaultDenyPolicyDeny", + defaultPolicy: DenyAll(), + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Peering: PolicyDeny, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadDenied", check: checkDenyPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + name: "PeeringDefaultDenyPolicyRead", + defaultPolicy: DenyAll(), + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Peering: PolicyRead, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + name: "PeeringDefaultDenyPolicyWrite", + defaultPolicy: DenyAll(), + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Peering: PolicyWrite, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteAllowed", check: checkAllowPeeringWrite}, + }, + }, + { + name: "PeeringDefaultDenyPolicyNone", + defaultPolicy: DenyAll(), + policyStack: []*Policy{ + {}, + }, + checks: []aclCheck{ + {name: "ReadDenied", check: checkDenyPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + // o:deny, p:deny = deny + name: "PeeringOperatorDenyPolicyDeny", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyDeny, + Peering: PolicyDeny, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadDenied", check: checkDenyPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + // o:read, p:deny = deny + name: "PeeringOperatorReadPolicyDeny", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyRead, + Peering: PolicyDeny, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadDenied", check: checkDenyPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + // o:write, p:deny = deny + name: "PeeringOperatorWritePolicyDeny", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyWrite, + Peering: PolicyDeny, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadDenied", check: checkDenyPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + // o:deny, p:read = read + name: "PeeringOperatorDenyPolicyRead", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyDeny, + Peering: PolicyRead, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + // o:read, p:read = read + name: "PeeringOperatorReadPolicyRead", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyRead, + Peering: PolicyRead, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + // o:write, p:read = read + name: "PeeringOperatorWritePolicyRead", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyWrite, + Peering: PolicyRead, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + // o:deny, p:write = write + name: "PeeringOperatorDenyPolicyWrite", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyDeny, + Peering: PolicyWrite, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteAllowed", check: checkAllowPeeringWrite}, + }, + }, + { + // o:read, p:write = write + name: "PeeringOperatorReadPolicyWrite", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyRead, + Peering: PolicyWrite, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteAllowed", check: checkAllowPeeringWrite}, + }, + }, + { + // o:write, p:write = write + name: "PeeringOperatorWritePolicyWrite", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyWrite, + Peering: PolicyWrite, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteAllowed", check: checkAllowPeeringWrite}, + }, + }, + { + // o:deny, p: = deny + name: "PeeringOperatorDenyPolicyNone", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyDeny, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadDenied", check: checkDenyPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + // o:read, p: = read + name: "PeeringOperatorReadPolicyNone", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyRead, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteDenied", check: checkDenyPeeringWrite}, + }, + }, + { + // o:write, p: = write + name: "PeeringOperatorWritePolicyNone", + defaultPolicy: nil, // test both + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Operator: PolicyWrite, + }, + }, + }, + checks: []aclCheck{ + {name: "ReadAllowed", check: checkAllowPeeringRead}, + {name: "WriteAllowed", check: checkAllowPeeringWrite}, + }, + }, { name: "OperatorDefaultAllowPolicyDeny", defaultPolicy: AllowAll(), diff --git a/acl/authorizer.go b/acl/authorizer.go index fe28c05ed..b0e5326bc 100644 --- a/acl/authorizer.go +++ b/acl/authorizer.go @@ -114,6 +114,14 @@ type Authorizer interface { // functions can be used. MeshWrite(*AuthorizerContext) EnforcementDecision + // PeeringRead determines if the read-only Consul peering functions + // can be used. + PeeringRead(*AuthorizerContext) EnforcementDecision + + // PeeringWrite determines if the stage-changing Consul peering + // functions can be used. + PeeringWrite(*AuthorizerContext) EnforcementDecision + // NodeRead checks for permission to read (discover) a given node. NodeRead(string, *AuthorizerContext) EnforcementDecision @@ -542,12 +550,11 @@ func Enforce(authz Authorizer, rsc Resource, segment string, access string, ctx return authz.SessionWrite(segment, ctx), nil } case ResourcePeering: - // TODO (peering) switch this over to using PeeringRead & PeeringWrite methods once implemented switch lowerAccess { case "read": - return authz.OperatorRead(ctx), nil + return authz.PeeringRead(ctx), nil case "write": - return authz.OperatorWrite(ctx), nil + return authz.PeeringWrite(ctx), nil } default: if processed, decision, err := enforceEnterprise(authz, rsc, segment, lowerAccess, ctx); processed { @@ -561,6 +568,7 @@ func Enforce(authz Authorizer, rsc Resource, segment string, access string, ctx // NewAuthorizerFromRules is a convenience function to invoke NewPolicyFromSource followed by NewPolicyAuthorizer with // the parse policy. +// TODO(ACL-Legacy-Compat): remove syntax arg after removing SyntaxLegacy func NewAuthorizerFromRules(rules string, syntax SyntaxVersion, conf *Config, meta *EnterprisePolicyMeta) (Authorizer, error) { policy, err := NewPolicyFromSource(rules, syntax, conf, meta) if err != nil { diff --git a/acl/authorizer_test.go b/acl/authorizer_test.go index f8aeda3d4..03c0517a1 100644 --- a/acl/authorizer_test.go +++ b/acl/authorizer_test.go @@ -139,6 +139,20 @@ func (m *mockAuthorizer) MeshWrite(ctx *AuthorizerContext) EnforcementDecision { return ret.Get(0).(EnforcementDecision) } +// PeeringRead determines if the read-only Consul peering functions +// can be used. +func (m *mockAuthorizer) PeeringRead(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// PeeringWrite determines if the state-changing Consul peering +// functions can be used. +func (m *mockAuthorizer) PeeringWrite(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + // OperatorRead determines if the read-only Consul operator functions // can be used. ret := m.Called(segment, ctx) func (m *mockAuthorizer) OperatorRead(ctx *AuthorizerContext) EnforcementDecision { @@ -463,29 +477,25 @@ func TestACL_Enforce(t *testing.T) { err: "Invalid access level", }, { - // TODO (peering) Update to use PeeringRead - method: "OperatorRead", + method: "PeeringRead", resource: ResourcePeering, access: "read", ret: Allow, }, { - // TODO (peering) Update to use PeeringRead - method: "OperatorRead", + method: "PeeringRead", resource: ResourcePeering, access: "read", ret: Deny, }, { - // TODO (peering) Update to use PeeringWrite - method: "OperatorWrite", + method: "PeeringWrite", resource: ResourcePeering, access: "write", ret: Allow, }, { - // TODO (peering) Update to use PeeringWrite - method: "OperatorWrite", + method: "PeeringWrite", resource: ResourcePeering, access: "write", ret: Deny, diff --git a/acl/chained_authorizer.go b/acl/chained_authorizer.go index 77df69a3e..cf81cc4b1 100644 --- a/acl/chained_authorizer.go +++ b/acl/chained_authorizer.go @@ -161,6 +161,22 @@ func (c *ChainedAuthorizer) MeshWrite(entCtx *AuthorizerContext) EnforcementDeci }) } +// PeeringRead determines if the read-only Consul peering functions +// can be used. +func (c *ChainedAuthorizer) PeeringRead(entCtx *AuthorizerContext) EnforcementDecision { + return c.executeChain(func(authz Authorizer) EnforcementDecision { + return authz.PeeringRead(entCtx) + }) +} + +// PeeringWrite determines if the state-changing Consul peering +// functions can be used. +func (c *ChainedAuthorizer) PeeringWrite(entCtx *AuthorizerContext) EnforcementDecision { + return c.executeChain(func(authz Authorizer) EnforcementDecision { + return authz.PeeringWrite(entCtx) + }) +} + // NodeRead checks for permission to read (discover) a given node. func (c *ChainedAuthorizer) NodeRead(node string, entCtx *AuthorizerContext) EnforcementDecision { return c.executeChain(func(authz Authorizer) EnforcementDecision { diff --git a/acl/chained_authorizer_test.go b/acl/chained_authorizer_test.go index 5f33d0166..284a1bd0e 100644 --- a/acl/chained_authorizer_test.go +++ b/acl/chained_authorizer_test.go @@ -68,6 +68,12 @@ func (authz testAuthorizer) MeshRead(*AuthorizerContext) EnforcementDecision { func (authz testAuthorizer) MeshWrite(*AuthorizerContext) EnforcementDecision { return EnforcementDecision(authz) } +func (authz testAuthorizer) PeeringRead(*AuthorizerContext) EnforcementDecision { + return EnforcementDecision(authz) +} +func (authz testAuthorizer) PeeringWrite(*AuthorizerContext) EnforcementDecision { + return EnforcementDecision(authz) +} func (authz testAuthorizer) OperatorRead(*AuthorizerContext) EnforcementDecision { return EnforcementDecision(authz) } @@ -128,6 +134,8 @@ func TestChainedAuthorizer(t *testing.T) { checkDenyNodeWrite(t, authz, "foo", nil) checkDenyMeshRead(t, authz, "foo", nil) checkDenyMeshWrite(t, authz, "foo", nil) + checkDenyPeeringRead(t, authz, "foo", nil) + checkDenyPeeringWrite(t, authz, "foo", nil) checkDenyOperatorRead(t, authz, "foo", nil) checkDenyOperatorWrite(t, authz, "foo", nil) checkDenyPreparedQueryRead(t, authz, "foo", nil) @@ -160,6 +168,8 @@ func TestChainedAuthorizer(t *testing.T) { checkDenyNodeWrite(t, authz, "foo", nil) checkDenyMeshRead(t, authz, "foo", nil) checkDenyMeshWrite(t, authz, "foo", nil) + checkDenyPeeringRead(t, authz, "foo", nil) + checkDenyPeeringWrite(t, authz, "foo", nil) checkDenyOperatorRead(t, authz, "foo", nil) checkDenyOperatorWrite(t, authz, "foo", nil) checkDenyPreparedQueryRead(t, authz, "foo", nil) @@ -192,6 +202,8 @@ func TestChainedAuthorizer(t *testing.T) { checkAllowNodeWrite(t, authz, "foo", nil) checkAllowMeshRead(t, authz, "foo", nil) checkAllowMeshWrite(t, authz, "foo", nil) + checkAllowPeeringRead(t, authz, "foo", nil) + checkAllowPeeringWrite(t, authz, "foo", nil) checkAllowOperatorRead(t, authz, "foo", nil) checkAllowOperatorWrite(t, authz, "foo", nil) checkAllowPreparedQueryRead(t, authz, "foo", nil) @@ -224,6 +236,8 @@ func TestChainedAuthorizer(t *testing.T) { checkDenyNodeWrite(t, authz, "foo", nil) checkDenyMeshRead(t, authz, "foo", nil) checkDenyMeshWrite(t, authz, "foo", nil) + checkDenyPeeringRead(t, authz, "foo", nil) + checkDenyPeeringWrite(t, authz, "foo", nil) checkDenyOperatorRead(t, authz, "foo", nil) checkDenyOperatorWrite(t, authz, "foo", nil) checkDenyPreparedQueryRead(t, authz, "foo", nil) @@ -254,6 +268,8 @@ func TestChainedAuthorizer(t *testing.T) { checkAllowNodeWrite(t, authz, "foo", nil) checkAllowMeshRead(t, authz, "foo", nil) checkAllowMeshWrite(t, authz, "foo", nil) + checkAllowPeeringRead(t, authz, "foo", nil) + checkAllowPeeringWrite(t, authz, "foo", nil) checkAllowOperatorRead(t, authz, "foo", nil) checkAllowOperatorWrite(t, authz, "foo", nil) checkAllowPreparedQueryRead(t, authz, "foo", nil) diff --git a/acl/policy.go b/acl/policy.go index d4ebd5976..59c3df8b3 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -85,6 +85,7 @@ type PolicyRules struct { Keyring string `hcl:"keyring"` Operator string `hcl:"operator"` Mesh string `hcl:"mesh"` + Peering string `hcl:"peering"` } // Policy is used to represent the policy specified by an ACL configuration. @@ -289,6 +290,10 @@ func (pr *PolicyRules) Validate(conf *Config) error { return fmt.Errorf("Invalid mesh policy: %#v", pr.Mesh) } + // Validate the peering policy - this one is allowed to be empty + if pr.Peering != "" && !isPolicyValid(pr.Peering, false) { + return fmt.Errorf("Invalid peering policy: %#v", pr.Peering) + } return nil } @@ -309,6 +314,7 @@ func parseCurrent(rules string, conf *Config, meta *EnterprisePolicyMeta) (*Poli return p, nil } +// TODO(ACL-Legacy-Compat): remove in phase 2 func parseLegacy(rules string, conf *Config) (*Policy, error) { p := &Policy{} @@ -436,6 +442,7 @@ func NewPolicyFromSource(rules string, syntax SyntaxVersion, conf *Config, meta var policy *Policy var err error switch syntax { + // TODO(ACL-Legacy-Compat): remove and remove as argument from function case SyntaxLegacy: policy, err = parseLegacy(rules, conf) case SyntaxCurrent: diff --git a/acl/policy_authorizer.go b/acl/policy_authorizer.go index 3b79a6316..4a24b6bd0 100644 --- a/acl/policy_authorizer.go +++ b/acl/policy_authorizer.go @@ -43,6 +43,9 @@ type policyAuthorizer struct { // meshRule contains the mesh policies. meshRule *policyAuthorizerRule + // peeringRule contains the peering policies. + peeringRule *policyAuthorizerRule + // embedded enterprise policy authorizer enterprisePolicyAuthorizer } @@ -322,6 +325,15 @@ func (p *policyAuthorizer) loadRules(policy *PolicyRules) error { p.meshRule = &policyAuthorizerRule{access: access} } + // Load the peering policy + if policy.Peering != "" { + access, err := AccessLevelFromString(policy.Peering) + if err != nil { + return err + } + p.peeringRule = &policyAuthorizerRule{access: access} + } + return nil } @@ -692,6 +704,25 @@ func (p *policyAuthorizer) MeshWrite(ctx *AuthorizerContext) EnforcementDecision return p.OperatorWrite(ctx) } +// PeeringRead determines if the read-only peering functions are allowed. +func (p *policyAuthorizer) PeeringRead(ctx *AuthorizerContext) EnforcementDecision { + if p.peeringRule != nil { + return enforce(p.peeringRule.access, AccessRead) + } + // default to OperatorRead access + return p.OperatorRead(ctx) +} + +// PeeringWrite determines if the state-changing peering functions are +// allowed. +func (p *policyAuthorizer) PeeringWrite(ctx *AuthorizerContext) EnforcementDecision { + if p.peeringRule != nil { + return enforce(p.peeringRule.access, AccessWrite) + } + // default to OperatorWrite access + return p.OperatorWrite(ctx) +} + // OperatorRead determines if the read-only operator functions are allowed. func (p *policyAuthorizer) OperatorRead(*AuthorizerContext) EnforcementDecision { if p.operatorRule != nil { diff --git a/acl/policy_authorizer_test.go b/acl/policy_authorizer_test.go index d2f69a4eb..57f41993a 100644 --- a/acl/policy_authorizer_test.go +++ b/acl/policy_authorizer_test.go @@ -50,6 +50,8 @@ func TestPolicyAuthorizer(t *testing.T) { {name: "DefaultNodeWrite", prefix: "foo", check: checkDefaultNodeWrite}, {name: "DefaultMeshRead", prefix: "foo", check: checkDefaultMeshRead}, {name: "DefaultMeshWrite", prefix: "foo", check: checkDefaultMeshWrite}, + {name: "DefaultPeeringRead", prefix: "foo", check: checkDefaultPeeringRead}, + {name: "DefaultPeeringWrite", prefix: "foo", check: checkDefaultPeeringWrite}, {name: "DefaultOperatorRead", prefix: "foo", check: checkDefaultOperatorRead}, {name: "DefaultOperatorWrite", prefix: "foo", check: checkDefaultOperatorWrite}, {name: "DefaultPreparedQueryRead", prefix: "foo", check: checkDefaultPreparedQueryRead}, diff --git a/acl/policy_merger.go b/acl/policy_merger.go index d4a454bc1..3a617aa1e 100644 --- a/acl/policy_merger.go +++ b/acl/policy_merger.go @@ -10,6 +10,7 @@ type policyRulesMergeContext struct { keyRules map[string]*KeyRule keyPrefixRules map[string]*KeyRule meshRule string + peeringRule string nodeRules map[string]*NodeRule nodePrefixRules map[string]*NodeRule operatorRule string @@ -33,6 +34,7 @@ func (p *policyRulesMergeContext) init() { p.keyRules = make(map[string]*KeyRule) p.keyPrefixRules = make(map[string]*KeyRule) p.meshRule = "" + p.peeringRule = "" p.nodeRules = make(map[string]*NodeRule) p.nodePrefixRules = make(map[string]*NodeRule) p.operatorRule = "" @@ -119,10 +121,6 @@ func (p *policyRulesMergeContext) merge(policy *PolicyRules) { } } - if takesPrecedenceOver(policy.Mesh, p.meshRule) { - p.meshRule = policy.Mesh - } - for _, np := range policy.Nodes { update := true if permission, found := p.nodeRules[np.Name]; found { @@ -145,6 +143,14 @@ func (p *policyRulesMergeContext) merge(policy *PolicyRules) { } } + if takesPrecedenceOver(policy.Mesh, p.meshRule) { + p.meshRule = policy.Mesh + } + + if takesPrecedenceOver(policy.Peering, p.peeringRule) { + p.peeringRule = policy.Peering + } + if takesPrecedenceOver(policy.Operator, p.operatorRule) { p.operatorRule = policy.Operator } @@ -235,6 +241,7 @@ func (p *policyRulesMergeContext) fill(merged *PolicyRules) { merged.Keyring = p.keyringRule merged.Operator = p.operatorRule merged.Mesh = p.meshRule + merged.Peering = p.peeringRule // All the for loop appends are ugly but Go doesn't have a way to get // a slice of all values within a map so this is necessary diff --git a/acl/policy_test.go b/acl/policy_test.go index 5416eb557..362451e98 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -65,6 +65,7 @@ func TestPolicySourceParse(t *testing.T) { } operator = "deny" mesh = "deny" + peering = "deny" service_prefix "" { policy = "write" } @@ -147,6 +148,7 @@ func TestPolicySourceParse(t *testing.T) { }, "operator": "deny", "mesh": "deny", + "peering": "deny", "service_prefix": { "": { "policy": "write" @@ -253,6 +255,7 @@ func TestPolicySourceParse(t *testing.T) { }, Operator: PolicyDeny, Mesh: PolicyDeny, + Peering: PolicyDeny, PreparedQueryPrefixes: []*PreparedQueryRule{ { Prefix: "", @@ -743,6 +746,13 @@ func TestPolicySourceParse(t *testing.T) { RulesJSON: `{ "mesh": "nope" }`, Err: "Invalid mesh policy", }, + { + Name: "Bad Policy - Peering", + Syntax: SyntaxCurrent, + Rules: `peering = "nope"`, + RulesJSON: `{ "peering": "nope" }`, + Err: "Invalid peering policy", + }, { Name: "Keyring Empty", Syntax: SyntaxCurrent, @@ -764,6 +774,13 @@ func TestPolicySourceParse(t *testing.T) { RulesJSON: `{ "mesh": "" }`, Expected: &Policy{PolicyRules: PolicyRules{Mesh: ""}}, }, + { + Name: "Peering Empty", + Syntax: SyntaxCurrent, + Rules: `peering = ""`, + RulesJSON: `{ "peering": "" }`, + Expected: &Policy{PolicyRules: PolicyRules{Peering: ""}}, + }, } for _, tc := range cases { @@ -1453,66 +1470,90 @@ func TestMergePolicies(t *testing.T) { { name: "Write Precedence", input: []*Policy{ - {PolicyRules: PolicyRules{ - ACL: PolicyRead, - Keyring: PolicyRead, - Operator: PolicyRead, - Mesh: PolicyRead, - }}, - {PolicyRules: PolicyRules{ + { + PolicyRules: PolicyRules{ + ACL: PolicyRead, + Keyring: PolicyRead, + Operator: PolicyRead, + Mesh: PolicyRead, + Peering: PolicyRead, + }, + }, + { + PolicyRules: PolicyRules{ + ACL: PolicyWrite, + Keyring: PolicyWrite, + Operator: PolicyWrite, + Mesh: PolicyWrite, + Peering: PolicyWrite, + }, + }, + }, + expected: &Policy{ + PolicyRules: PolicyRules{ ACL: PolicyWrite, Keyring: PolicyWrite, Operator: PolicyWrite, Mesh: PolicyWrite, - }}, + Peering: PolicyWrite, + }, }, - expected: &Policy{PolicyRules: PolicyRules{ - ACL: PolicyWrite, - Keyring: PolicyWrite, - Operator: PolicyWrite, - Mesh: PolicyWrite, - }}, }, { name: "Deny Precedence", input: []*Policy{ - {PolicyRules: PolicyRules{ - ACL: PolicyWrite, - Keyring: PolicyWrite, - Operator: PolicyWrite, - Mesh: PolicyWrite, - }}, - {PolicyRules: PolicyRules{ + { + PolicyRules: PolicyRules{ + ACL: PolicyWrite, + Keyring: PolicyWrite, + Operator: PolicyWrite, + Mesh: PolicyWrite, + Peering: PolicyWrite, + }, + }, + { + PolicyRules: PolicyRules{ + ACL: PolicyDeny, + Keyring: PolicyDeny, + Operator: PolicyDeny, + Mesh: PolicyDeny, + Peering: PolicyDeny, + }, + }, + }, + expected: &Policy{ + PolicyRules: PolicyRules{ ACL: PolicyDeny, Keyring: PolicyDeny, Operator: PolicyDeny, Mesh: PolicyDeny, - }}, + Peering: PolicyDeny, + }, }, - expected: &Policy{PolicyRules: PolicyRules{ - ACL: PolicyDeny, - Keyring: PolicyDeny, - Operator: PolicyDeny, - Mesh: PolicyDeny, - }}, }, { name: "Read Precedence", input: []*Policy{ - {PolicyRules: PolicyRules{ + { + PolicyRules: PolicyRules{ + ACL: PolicyRead, + Keyring: PolicyRead, + Operator: PolicyRead, + Mesh: PolicyRead, + Peering: PolicyRead, + }, + }, + {}, + }, + expected: &Policy{ + PolicyRules: PolicyRules{ ACL: PolicyRead, Keyring: PolicyRead, Operator: PolicyRead, Mesh: PolicyRead, - }}, - {}, + Peering: PolicyRead, + }, }, - expected: &Policy{PolicyRules: PolicyRules{ - ACL: PolicyRead, - Keyring: PolicyRead, - Operator: PolicyRead, - Mesh: PolicyRead, - }}, }, } @@ -1524,6 +1565,7 @@ func TestMergePolicies(t *testing.T) { require.Equal(t, exp.Keyring, act.Keyring) require.Equal(t, exp.Operator, act.Operator) require.Equal(t, exp.Mesh, act.Mesh) + require.Equal(t, exp.Peering, act.Peering) require.ElementsMatch(t, exp.Agents, act.Agents) require.ElementsMatch(t, exp.AgentPrefixes, act.AgentPrefixes) require.ElementsMatch(t, exp.Events, act.Events) @@ -1597,6 +1639,9 @@ operator = "write" # comment mesh = "write" + +# comment +peering = "write" ` expected := ` @@ -1652,6 +1697,9 @@ operator = "write" # comment mesh = "write" + +# comment +peering = "write" ` output, err := TranslateLegacyRules([]byte(input)) diff --git a/acl/static_authorizer.go b/acl/static_authorizer.go index 951b026f3..07cc84511 100644 --- a/acl/static_authorizer.go +++ b/acl/static_authorizer.go @@ -170,6 +170,20 @@ func (s *staticAuthorizer) MeshWrite(*AuthorizerContext) EnforcementDecision { return Deny } +func (s *staticAuthorizer) PeeringRead(*AuthorizerContext) EnforcementDecision { + if s.defaultAllow { + return Allow + } + return Deny +} + +func (s *staticAuthorizer) PeeringWrite(*AuthorizerContext) EnforcementDecision { + if s.defaultAllow { + return Allow + } + return Deny +} + func (s *staticAuthorizer) OperatorRead(*AuthorizerContext) EnforcementDecision { if s.defaultAllow { return Allow diff --git a/agent/acl_endpoint_test.go b/agent/acl_endpoint_test.go index 60a512ef4..5cffef6ee 100644 --- a/agent/acl_endpoint_test.go +++ b/agent/acl_endpoint_test.go @@ -2044,6 +2044,14 @@ func TestACL_Authorize(t *testing.T) { Resource: "mesh", Access: "write", }, + { + Resource: "peering", + Access: "read", + }, + { + Resource: "peering", + Access: "write", + }, { Resource: "query", Segment: "foo", @@ -2186,6 +2194,14 @@ func TestACL_Authorize(t *testing.T) { Resource: "mesh", Access: "write", }, + { + Resource: "peering", + Access: "read", + }, + { + Resource: "peering", + Access: "write", + }, { Resource: "query", Segment: "foo", @@ -2238,6 +2254,8 @@ func TestACL_Authorize(t *testing.T) { true, // operator:write true, // mesh:read true, // mesh:write + true, // peering:read + true, // peering:write false, // query:read false, // query:write true, // service:read diff --git a/agent/structs/acl.go b/agent/structs/acl.go index 82d19b8ac..1fd3f1d93 100644 --- a/agent/structs/acl.go +++ b/agent/structs/acl.go @@ -60,6 +60,7 @@ node_prefix "" { } operator = "write" mesh = "write" +peering = "write" query_prefix "" { policy = "write" } From d21f793b74dda8f48dd34647d78350e13c94720e Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Fri, 22 Jul 2022 15:20:21 -0700 Subject: [PATCH 070/107] peering: add config to enable/disable peering (#13867) * peering: add config to enable/disable peering Add config: ``` peering { enabled = true } ``` Defaults to true. When disabled: 1. All peering RPC endpoints will return an error 2. Leader won't start its peering establishment goroutines 3. Leader won't start its peering deletion goroutines --- agent/agent.go | 2 + agent/config/builder.go | 1 + agent/config/config.go | 5 ++ agent/config/default.go | 3 + agent/config/runtime.go | 8 ++ agent/config/runtime_test.go | 11 +++ .../TestRuntimeConfig_Sanitize.golden | 1 + agent/config/testdata/full-config.hcl | 3 + agent/config/testdata/full-config.json | 3 + agent/consul/config.go | 5 ++ agent/consul/leader.go | 8 +- agent/consul/leader_peering_test.go | 86 +++++++++++++++++++ agent/consul/server.go | 1 + agent/rpc/peering/service.go | 35 ++++++++ agent/rpc/peering/service_test.go | 63 ++++++++++++++ docs/config/checklist-adding-config-fields.md | 2 + .../docs/agent/config/config-files.mdx | 9 ++ 17 files changed, 244 insertions(+), 2 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index a7c89a727..197434e77 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1341,6 +1341,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co // function does not drift. cfg.SerfLANConfig = consul.CloneSerfLANConfig(cfg.SerfLANConfig) + cfg.PeeringEnabled = runtimeCfg.PeeringEnabled + enterpriseConsulConfig(cfg, runtimeCfg) return cfg, nil } diff --git a/agent/config/builder.go b/agent/config/builder.go index f855aae51..70c5d044c 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -1014,6 +1014,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) { NodeMeta: c.NodeMeta, NodeName: b.nodeName(c.NodeName), ReadReplica: boolVal(c.ReadReplica), + PeeringEnabled: boolVal(c.Peering.Enabled), PidFile: stringVal(c.PidFile), PrimaryDatacenter: primaryDatacenter, PrimaryGateways: b.expandAllOptionalAddrs("primary_gateways", c.PrimaryGateways), diff --git a/agent/config/config.go b/agent/config/config.go index c4f752a82..23e7550aa 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -197,6 +197,7 @@ type Config struct { NodeID *string `mapstructure:"node_id"` NodeMeta map[string]string `mapstructure:"node_meta"` NodeName *string `mapstructure:"node_name"` + Peering Peering `mapstructure:"peering"` Performance Performance `mapstructure:"performance"` PidFile *string `mapstructure:"pid_file"` Ports Ports `mapstructure:"ports"` @@ -887,3 +888,7 @@ type TLS struct { // config merging logic. GRPCModifiedByDeprecatedConfig *struct{} `mapstructure:"-"` } + +type Peering struct { + Enabled *bool `mapstructure:"enabled"` +} diff --git a/agent/config/default.go b/agent/config/default.go index 951d9f126..d0cc2865d 100644 --- a/agent/config/default.go +++ b/agent/config/default.go @@ -104,6 +104,9 @@ func DefaultSource() Source { kv_max_value_size = ` + strconv.FormatInt(raft.SuggestedMaxDataSize, 10) + ` txn_max_req_len = ` + strconv.FormatInt(raft.SuggestedMaxDataSize, 10) + ` } + peering = { + enabled = true + } performance = { leave_drain_time = "5s" raft_multiplier = ` + strconv.Itoa(int(consul.DefaultRaftMultiplier)) + ` diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 2ae9888ae..db46c2184 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -810,6 +810,14 @@ type RuntimeConfig struct { // flag: -non-voting-server ReadReplica bool + // PeeringEnabled enables cluster peering. This setting only applies for servers. + // When disabled, all peering RPC endpoints will return errors, + // peering requests from other clusters will receive errors, and any peerings already stored in this server's + // state will be ignored. + // + // hcl: peering { enabled = (true|false) } + PeeringEnabled bool + // PidFile is the file to store our PID in. // // hcl: pid_file = string diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 0963ec07f..b05b31491 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -5548,6 +5548,16 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { "tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)", }, }) + run(t, testCase{ + desc: "peering.enabled defaults to true", + args: []string{ + `-data-dir=` + dataDir, + }, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.PeeringEnabled = true + }, + }) } func (tc testCase) run(format string, dataDir string) func(t *testing.T) { @@ -5955,6 +5965,7 @@ func TestLoad_FullConfig(t *testing.T) { NodeMeta: map[string]string{"5mgGQMBk": "mJLtVMSG", "A7ynFMJB": "0Nx6RGab"}, NodeName: "otlLxGaI", ReadReplica: true, + PeeringEnabled: true, PidFile: "43xN80Km", PrimaryGateways: []string{"aej8eeZo", "roh2KahS"}, PrimaryGatewaysInterval: 18866 * time.Second, diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 25fbba0c0..b5d72f864 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -235,6 +235,7 @@ "NodeID": "", "NodeMeta": {}, "NodeName": "", + "PeeringEnabled": false, "PidFile": "", "PrimaryDatacenter": "", "PrimaryGateways": [ diff --git a/agent/config/testdata/full-config.hcl b/agent/config/testdata/full-config.hcl index bb544b54a..ed8203296 100644 --- a/agent/config/testdata/full-config.hcl +++ b/agent/config/testdata/full-config.hcl @@ -305,6 +305,9 @@ node_meta { node_name = "otlLxGaI" non_voting_server = true partition = "" +peering { + enabled = true +} performance { leave_drain_time = "8265s" raft_multiplier = 5 diff --git a/agent/config/testdata/full-config.json b/agent/config/testdata/full-config.json index 36f52e681..8294a27b7 100644 --- a/agent/config/testdata/full-config.json +++ b/agent/config/testdata/full-config.json @@ -305,6 +305,9 @@ "node_name": "otlLxGaI", "non_voting_server": true, "partition": "", + "peering": { + "enabled": true + }, "performance": { "leave_drain_time": "8265s", "raft_multiplier": 5, diff --git a/agent/consul/config.go b/agent/consul/config.go index 50235c681..469ccc919 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -396,6 +396,9 @@ type Config struct { RaftBoltDBConfig RaftBoltDBConfig + // PeeringEnabled enables cluster peering. + PeeringEnabled bool + // Embedded Consul Enterprise specific configuration *EnterpriseConfig } @@ -512,6 +515,8 @@ func DefaultConfig() *Config { DefaultQueryTime: 300 * time.Second, MaxQueryTime: 600 * time.Second, + PeeringEnabled: true, + EnterpriseConfig: DefaultEnterpriseConfig(), } diff --git a/agent/consul/leader.go b/agent/consul/leader.go index eb197deb3..389b79056 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -315,7 +315,9 @@ func (s *Server) establishLeadership(ctx context.Context) error { s.startFederationStateAntiEntropy(ctx) - s.startPeeringStreamSync(ctx) + if s.config.PeeringEnabled { + s.startPeeringStreamSync(ctx) + } s.startDeferredDeletion(ctx) @@ -758,7 +760,9 @@ func (s *Server) stopACLReplication() { } func (s *Server) startDeferredDeletion(ctx context.Context) { - s.startPeeringDeferredDeletion(ctx) + if s.config.PeeringEnabled { + s.startPeeringDeferredDeletion(ctx) + } s.startTenancyDeferredDeletion(ctx) } diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index 06cbda43d..feaf5be02 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -1036,3 +1036,89 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { require.Equal(r, float32(2), metric2.Value) // for d, e services }) } + +// Test that the leader doesn't start its peering deletion routing when +// peering is disabled. +func TestLeader_Peering_NoDeletionWhenPeeringDisabled(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + _, s1 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s1.dc1" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + c.PeeringEnabled = false + }) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + var ( + peerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d" + peerName = "my-peer-s2" + lastIdx = uint64(0) + ) + + // Simulate a peering initiation event by writing a peering to the state store. + lastIdx++ + require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{ + ID: peerID, + Name: peerName, + })) + + // Mark the peering for deletion to trigger the termination sequence. + lastIdx++ + require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{ + ID: peerID, + Name: peerName, + DeletedAt: structs.TimeToProto(time.Now()), + })) + + // The leader routine shouldn't be running so the peering should never get deleted. + require.Never(t, func() bool { + _, peering, err := s1.fsm.State().PeeringRead(nil, state.Query{ + Value: peerName, + }) + if err != nil { + t.Logf("unexpected err: %s", err) + return true + } + if peering == nil { + return true + } + return false + }, 7*time.Second, 1*time.Second, "peering should not have been deleted") +} + +// Test that the leader doesn't start its peering establishment routine +// when peering is disabled. +func TestLeader_Peering_NoEstablishmentWhenPeeringDisabled(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + _, s1 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s1.dc1" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + c.PeeringEnabled = false + }) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + var ( + peerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d" + peerName = "my-peer-s2" + lastIdx = uint64(0) + ) + + // Simulate a peering initiation event by writing a peering to the state store. + require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{ + ID: peerID, + Name: peerName, + PeerServerAddresses: []string{"1.2.3.4"}, + })) + + require.Never(t, func() bool { + _, found := s1.peerStreamTracker.StreamStatus(peerID) + return found + }, 7*time.Second, 1*time.Second, "peering should not have been established") +} diff --git a/agent/consul/server.go b/agent/consul/server.go index 3c240c5f7..a14253d80 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -794,6 +794,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler }, Datacenter: config.Datacenter, ConnectEnabled: config.ConnectEnabled, + PeeringEnabled: config.PeeringEnabled, }) s.peeringServer = p diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 1d0d219f6..a8ca3b199 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -56,6 +56,7 @@ type Config struct { ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) Datacenter string ConnectEnabled bool + PeeringEnabled bool } func NewServer(cfg Config) *Server { @@ -139,6 +140,8 @@ type Store interface { TrustBundleListByService(ws memdb.WatchSet, service, dc string, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error) } +var peeringNotEnabledErr = grpcstatus.Error(codes.FailedPrecondition, "peering must be enabled to use this endpoint") + // GenerateToken implements the PeeringService RPC method to generate a // peering token which is the initial step in establishing a peering relationship // with other Consul clusters. @@ -146,6 +149,10 @@ func (s *Server) GenerateToken( ctx context.Context, req *pbpeering.GenerateTokenRequest, ) (*pbpeering.GenerateTokenResponse, error) { + if !s.Config.PeeringEnabled { + return nil, peeringNotEnabledErr + } + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) } @@ -251,6 +258,10 @@ func (s *Server) Establish( ctx context.Context, req *pbpeering.EstablishRequest, ) (*pbpeering.EstablishResponse, error) { + if !s.Config.PeeringEnabled { + return nil, peeringNotEnabledErr + } + // validate prior to forwarding to the leader, this saves a network hop if err := dns.ValidateLabel(req.PeerName); err != nil { return nil, fmt.Errorf("%s is not a valid peer name: %w", req.PeerName, err) @@ -316,6 +327,10 @@ func (s *Server) Establish( } func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequest) (*pbpeering.PeeringReadResponse, error) { + if !s.Config.PeeringEnabled { + return nil, peeringNotEnabledErr + } + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) } @@ -350,6 +365,10 @@ func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequ } func (s *Server) PeeringList(ctx context.Context, req *pbpeering.PeeringListRequest) (*pbpeering.PeeringListResponse, error) { + if !s.Config.PeeringEnabled { + return nil, peeringNotEnabledErr + } + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) } @@ -413,6 +432,10 @@ func (s *Server) reconcilePeering(peering *pbpeering.Peering) *pbpeering.Peering // TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. // Consider removing if we can find another way to populate state store in peering_endpoint_test.go func (s *Server) PeeringWrite(ctx context.Context, req *pbpeering.PeeringWriteRequest) (*pbpeering.PeeringWriteResponse, error) { + if !s.Config.PeeringEnabled { + return nil, peeringNotEnabledErr + } + if err := s.Backend.EnterpriseCheckPartitions(req.Peering.Partition); err != nil { return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) } @@ -449,6 +472,10 @@ func (s *Server) PeeringWrite(ctx context.Context, req *pbpeering.PeeringWriteRe } func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDeleteRequest) (*pbpeering.PeeringDeleteResponse, error) { + if !s.Config.PeeringEnabled { + return nil, peeringNotEnabledErr + } + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) } @@ -505,6 +532,10 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete } func (s *Server) TrustBundleRead(ctx context.Context, req *pbpeering.TrustBundleReadRequest) (*pbpeering.TrustBundleReadResponse, error) { + if !s.Config.PeeringEnabled { + return nil, peeringNotEnabledErr + } + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) } @@ -540,6 +571,10 @@ func (s *Server) TrustBundleRead(ctx context.Context, req *pbpeering.TrustBundle // TODO(peering): rename rpc & request/response to drop the "service" part func (s *Server) TrustBundleListByService(ctx context.Context, req *pbpeering.TrustBundleListByServiceRequest) (*pbpeering.TrustBundleListByServiceResponse, error) { + if !s.Config.PeeringEnabled { + return nil, peeringNotEnabledErr + } + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) } diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index 939a304d2..c7e37c91d 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -15,6 +15,8 @@ import ( "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/require" gogrpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul" @@ -529,6 +531,67 @@ func TestPeeringService_TrustBundleListByService(t *testing.T) { require.Equal(t, []string{"foo-root-1"}, resp.Bundles[1].RootPEMs) } +// Test RPC endpoint responses when peering is disabled. They should all return an error. +func TestPeeringService_PeeringDisabled(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(c *consul.Config) { c.PeeringEnabled = false }) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + // assertFailedResponse is a helper function that checks the error from a gRPC + // response is what we expect when peering is disabled. + assertFailedResponse := func(t *testing.T, err error) { + actErr, ok := grpcstatus.FromError(err) + require.True(t, ok) + require.Equal(t, codes.FailedPrecondition, actErr.Code()) + require.Equal(t, "peering must be enabled to use this endpoint", actErr.Message()) + } + + // Test all the endpoints. + + t.Run("PeeringWrite", func(t *testing.T) { + _, err := client.PeeringWrite(ctx, &pbpeering.PeeringWriteRequest{}) + assertFailedResponse(t, err) + }) + + t.Run("PeeringRead", func(t *testing.T) { + _, err := client.PeeringRead(ctx, &pbpeering.PeeringReadRequest{}) + assertFailedResponse(t, err) + }) + + t.Run("PeeringDelete", func(t *testing.T) { + _, err := client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{}) + assertFailedResponse(t, err) + }) + + t.Run("PeeringList", func(t *testing.T) { + _, err := client.PeeringList(ctx, &pbpeering.PeeringListRequest{}) + assertFailedResponse(t, err) + }) + + t.Run("Establish", func(t *testing.T) { + _, err := client.Establish(ctx, &pbpeering.EstablishRequest{}) + assertFailedResponse(t, err) + }) + + t.Run("GenerateToken", func(t *testing.T) { + _, err := client.GenerateToken(ctx, &pbpeering.GenerateTokenRequest{}) + assertFailedResponse(t, err) + }) + + t.Run("TrustBundleRead", func(t *testing.T) { + _, err := client.TrustBundleRead(ctx, &pbpeering.TrustBundleReadRequest{}) + assertFailedResponse(t, err) + }) + + t.Run("TrustBundleListByService", func(t *testing.T) { + _, err := client.TrustBundleListByService(ctx, &pbpeering.TrustBundleListByServiceRequest{}) + assertFailedResponse(t, err) + }) +} + // newTestServer is copied from partition/service_test.go, with the addition of certs/cas. // TODO(peering): these are endpoint tests and should live in the agent/consul // package. Instead, these can be written around a mock client (see testing.go) diff --git a/docs/config/checklist-adding-config-fields.md b/docs/config/checklist-adding-config-fields.md index e17139411..7a47eb841 100644 --- a/docs/config/checklist-adding-config-fields.md +++ b/docs/config/checklist-adding-config-fields.md @@ -45,6 +45,8 @@ There are four specific cases covered with increasing complexity: - [ ] Add that to `DefaultSource` in `agent/config/defaults.go`. - [ ] Add a test case to the table test `TestLoad_IntegrationWithFlags` in `agent/config/runtime_test.go`. + - [ ] If the config needs to be defaulted for the test server used in unit tests, + also add it to `DefaultConfig()` in `agent/consul/defaults.go`. - [ ] **If** your config should take effect on a reload/HUP. - [ ] Add necessary code to to trigger a safe (locked or atomic) update to any state the feature needs changing. This needs to be added to one or diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index a8eaba6d5..6b902987e 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -551,6 +551,15 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `max_query_time` Equivalent to the [`-max-query-time` command-line flag](/docs/agent/config/cli-flags#_max_query_time). +- `peering` This object allows setting options for cluster peering. + + The following sub-keys are available: + + - `enabled` ((#peering_enabled)) (Defaults to `true`) Controls whether cluster peering is enabled. + Has no effect on Consul clients, only on Consul servers. When disabled, all peering APIs will return + an error, any peerings stored in Consul already will be ignored (but they will not be deleted), + all peering connections from other clusters will be rejected. This was added in Consul 1.13.0. + - `partition` - This flag is used to set the name of the admin partition the agent belongs to. An agent can only join and communicate with other agents within its admin partition. Review the From b60ebc022e30594f5b171c8d6edc14f2c44e7a92 Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Fri, 22 Jul 2022 15:56:25 -0700 Subject: [PATCH 071/107] peering: use ShouldDial to validate peer role (#13823) Signed-off-by: acpana <8968914+acpana@users.noreply.github.com> --- agent/rpc/peering/service.go | 76 +++++++++++++++++++++++++------ agent/rpc/peering/service_test.go | 75 ++++++++++++++++++++++++++++++ 2 files changed, 138 insertions(+), 13 deletions(-) diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index a8ca3b199..8cb78e33c 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -194,13 +194,22 @@ func (s *Server) GenerateToken( } } + peeringOrNil, err := s.getExistingPeering(req.PeerName, req.Partition) + if err != nil { + return nil, err + } + + // validate that this peer name is not being used as a dialer already + if err = validatePeer(peeringOrNil, false); err != nil { + return nil, err + } + canRetry := true RETRY_ONCE: id, err := s.getExistingOrCreateNewPeerID(req.PeerName, req.Partition) if err != nil { return nil, err } - writeReq := pbpeering.PeeringWriteRequest{ Peering: &pbpeering.Peering{ ID: id, @@ -290,17 +299,32 @@ func (s *Server) Establish( defer metrics.MeasureSince([]string{"peering", "establish"}, time.Now()) + peeringOrNil, err := s.getExistingPeering(req.PeerName, req.Partition) + if err != nil { + return nil, err + } + + // validate that this peer name is not being used as an acceptor already + if err = validatePeer(peeringOrNil, true); err != nil { + return nil, err + } + + var id string + if peeringOrNil != nil { + id = peeringOrNil.ID + } else { + id, err = lib.GenerateUUID(s.Backend.CheckPeeringUUID) + if err != nil { + return nil, err + } + } + // convert ServiceAddress values to strings serverAddrs := make([]string, len(tok.ServerAddresses)) for i, addr := range tok.ServerAddresses { serverAddrs[i] = addr } - id, err := s.getExistingOrCreateNewPeerID(req.PeerName, req.Partition) - if err != nil { - return nil, err - } - // as soon as a peering is written with a list of ServerAddresses that is // non-empty, the leader routine will see the peering and attempt to // establish a connection with the remote peer. @@ -622,16 +646,12 @@ func (s *Server) TrustBundleListByService(ctx context.Context, req *pbpeering.Tr } func (s *Server) getExistingOrCreateNewPeerID(peerName, partition string) (string, error) { - q := state.Query{ - Value: strings.ToLower(peerName), - EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(partition), - } - _, peering, err := s.Backend.Store().PeeringRead(nil, q) + peeringOrNil, err := s.getExistingPeering(peerName, partition) if err != nil { return "", err } - if peering != nil { - return peering.ID, nil + if peeringOrNil != nil { + return peeringOrNil.ID, nil } id, err := lib.GenerateUUID(s.Backend.CheckPeeringUUID) @@ -641,6 +661,36 @@ func (s *Server) getExistingOrCreateNewPeerID(peerName, partition string) (strin return id, nil } +func (s *Server) getExistingPeering(peerName, partition string) (*pbpeering.Peering, error) { + q := state.Query{ + Value: strings.ToLower(peerName), + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(partition), + } + _, peering, err := s.Backend.Store().PeeringRead(nil, q) + if err != nil { + return nil, err + } + + return peering, nil +} + +// validatePeer enforces the following rule for an existing peering: +// - if a peering already exists, it can only be used as an acceptor or dialer +// +// We define a DIALER as a peering that has server addresses (or a peering that is created via the Establish endpoint) +// Conversely, we define an ACCEPTOR as a peering that is created via the GenerateToken endpoint +func validatePeer(peering *pbpeering.Peering, allowedToDial bool) error { + if peering != nil && peering.ShouldDial() != allowedToDial { + if allowedToDial { + return fmt.Errorf("cannot create peering with name: %q; there is an existing peering expecting to be dialed", peering.Name) + } else { + return fmt.Errorf("cannot create peering with name: %q; there is already an established peering", peering.Name) + } + } + + return nil +} + func copyPeering(p *pbpeering.Peering) *pbpeering.Peering { var copyP pbpeering.Peering proto.Merge(©P, p) diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index c7e37c91d..47de64554 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -531,6 +531,81 @@ func TestPeeringService_TrustBundleListByService(t *testing.T) { require.Equal(t, []string{"foo-root-1"}, resp.Bundles[1].RootPEMs) } +func TestPeeringService_validatePeer(t *testing.T) { + dir := testutil.TempDir(t, "consul") + signer, _, _ := tlsutil.GeneratePrivateKey() + ca, _, _ := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) + cafile := path.Join(dir, "cacert.pem") + require.NoError(t, ioutil.WriteFile(cafile, []byte(ca), 0600)) + + s := newTestServer(t, func(c *consul.Config) { + c.SerfLANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.1" + c.TLSConfig.GRPC.CAFile = cafile + c.DataDir = dir + }) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + testutil.RunStep(t, "generate a token", func(t *testing.T) { + req := pbpeering.GenerateTokenRequest{PeerName: "peerB"} + resp, err := client.GenerateToken(ctx, &req) + require.NoError(t, err) + require.NotEmpty(t, resp) + }) + + testutil.RunStep(t, "generate a token with the same name", func(t *testing.T) { + req := pbpeering.GenerateTokenRequest{PeerName: "peerB"} + resp, err := client.GenerateToken(ctx, &req) + require.NoError(t, err) + require.NotEmpty(t, resp) + }) + + validToken := peering.TestPeeringToken("83474a06-cca4-4ff4-99a4-4152929c8160") + validTokenJSON, _ := json.Marshal(&validToken) + validTokenB64 := base64.StdEncoding.EncodeToString(validTokenJSON) + + testutil.RunStep(t, "send an establish request for a different peer name", func(t *testing.T) { + resp, err := client.Establish(ctx, &pbpeering.EstablishRequest{ + PeerName: "peer1-usw1", + PeeringToken: validTokenB64, + }) + require.NoError(t, err) + require.NotEmpty(t, resp) + }) + + testutil.RunStep(t, "send an establish request for a different peer name again", func(t *testing.T) { + resp, err := client.Establish(ctx, &pbpeering.EstablishRequest{ + PeerName: "peer1-usw1", + PeeringToken: validTokenB64, + }) + require.NoError(t, err) + require.NotEmpty(t, resp) + }) + + testutil.RunStep(t, "attempt to generate token with the same name used as dialer", func(t *testing.T) { + req := pbpeering.GenerateTokenRequest{PeerName: "peer1-usw1"} + resp, err := client.GenerateToken(ctx, &req) + + require.Error(t, err) + require.Contains(t, err.Error(), + "cannot create peering with name: \"peer1-usw1\"; there is already an established peering") + require.Nil(t, resp) + }) + + testutil.RunStep(t, "attempt to establish the with the same name used as acceptor", func(t *testing.T) { + resp, err := client.Establish(ctx, &pbpeering.EstablishRequest{ + PeerName: "peerB", + PeeringToken: validTokenB64, + }) + + require.Error(t, err) + require.Contains(t, err.Error(), + "cannot create peering with name: \"peerB\"; there is an existing peering expecting to be dialed") + require.Nil(t, resp) + }) +} + // Test RPC endpoint responses when peering is disabled. They should all return an error. func TestPeeringService_PeeringDisabled(t *testing.T) { // TODO(peering): see note on newTestServer, refactor to not use this From 75efc0649b2404b930ebcab596c34988a9f212c0 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 22 Jul 2022 17:29:38 -0700 Subject: [PATCH 072/107] Remove excess debug log from ingress upstream shutdown --- agent/proxycfg/ingress_gateway.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/agent/proxycfg/ingress_gateway.go b/agent/proxycfg/ingress_gateway.go index 3889bdba8..828229864 100644 --- a/agent/proxycfg/ingress_gateway.go +++ b/agent/proxycfg/ingress_gateway.go @@ -149,10 +149,6 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent, for uid, cancelFn := range snap.IngressGateway.WatchedDiscoveryChains { if _, ok := watchedSvcs[uid]; !ok { for targetID, cancelUpstreamFn := range snap.IngressGateway.WatchedUpstreams[uid] { - s.logger.Debug("stopping watch of target", - "upstream", uid, - "target", targetID, - ) delete(snap.IngressGateway.WatchedUpstreams[uid], targetID) delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID) cancelUpstreamFn() From 5bbc0cc61531362f6430876f39111a5f881f6d7a Mon Sep 17 00:00:00 2001 From: freddygv Date: Tue, 12 Jul 2022 17:18:05 -0600 Subject: [PATCH 073/107] Add ACL enforcement to peering endpoints --- acl/authorizer.go | 18 + agent/cache-types/trust_bundle.go | 47 +- agent/cache-types/trust_bundle_test.go | 10 +- agent/cache-types/trust_bundles.go | 51 +- agent/cache-types/trust_bundles_test.go | 10 +- agent/consul/peering_backend.go | 6 + agent/consul/peering_backend_oss_test.go | 8 +- agent/consul/peering_backend_test.go | 40 +- agent/peering_endpoint.go | 73 +-- agent/proxycfg-glue/glue.go | 2 +- agent/proxycfg-glue/trust_bundle.go | 22 +- agent/proxycfg-glue/trust_bundle_test.go | 23 +- agent/proxycfg/connect_proxy.go | 30 +- agent/proxycfg/data_sources.go | 5 +- agent/proxycfg/mesh_gateway.go | 13 +- agent/proxycfg/state_test.go | 20 +- agent/proxycfg/testing.go | 12 +- agent/rpc/peering/service.go | 192 +++++-- agent/rpc/peering/service_test.go | 623 ++++++++++++++++++++++- agent/rpc/peering/testing.go | 2 + api/peering.go | 10 +- api/peering_test.go | 114 ++++- proto/pbpeering/peering.gen.go | 8 - proto/pbpeering/peering.go | 151 ++---- proto/pbpeering/peering.pb.go | 440 ++++++---------- proto/pbpeering/peering.proto | 64 +-- proto/pbpeering/peering.rpcglue.pb.go | 224 -------- 27 files changed, 1300 insertions(+), 918 deletions(-) diff --git a/acl/authorizer.go b/acl/authorizer.go index b0e5326bc..6842283b1 100644 --- a/acl/authorizer.go +++ b/acl/authorizer.go @@ -335,6 +335,24 @@ func (a AllowAuthorizer) MeshWriteAllowed(ctx *AuthorizerContext) error { return nil } +// PeeringReadAllowed determines if the read-only Consul peering functions +// can be used. +func (a AllowAuthorizer) PeeringReadAllowed(ctx *AuthorizerContext) error { + if a.Authorizer.PeeringRead(ctx) != Allow { + return PermissionDeniedByACLUnnamed(a, ctx, ResourcePeering, AccessRead) + } + return nil +} + +// PeeringWriteAllowed determines if the state-changing Consul peering +// functions can be used. +func (a AllowAuthorizer) PeeringWriteAllowed(ctx *AuthorizerContext) error { + if a.Authorizer.PeeringWrite(ctx) != Allow { + return PermissionDeniedByACLUnnamed(a, ctx, ResourcePeering, AccessWrite) + } + return nil +} + // NodeReadAllowed checks for permission to read (discover) a given node. func (a AllowAuthorizer) NodeReadAllowed(name string, ctx *AuthorizerContext) error { if a.Authorizer.NodeRead(name, ctx) != Allow { diff --git a/agent/cache-types/trust_bundle.go b/agent/cache-types/trust_bundle.go index 16b8f204b..48dad6437 100644 --- a/agent/cache-types/trust_bundle.go +++ b/agent/cache-types/trust_bundle.go @@ -3,16 +3,53 @@ package cachetype import ( "context" "fmt" + "strconv" + "time" + "github.com/mitchellh/hashstructure" "google.golang.org/grpc" "github.com/hashicorp/consul/agent/cache" + external "github.com/hashicorp/consul/agent/grpc-external" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto/pbpeering" ) // Recommended name for registration. const TrustBundleReadName = "peer-trust-bundle" +type TrustBundleReadRequest struct { + Request *pbpeering.TrustBundleReadRequest + structs.QueryOptions +} + +func (r *TrustBundleReadRequest) CacheInfo() cache.RequestInfo { + info := cache.RequestInfo{ + Token: r.Token, + Datacenter: "", + MinIndex: 0, + Timeout: 0, + MustRevalidate: false, + + // OPTIMIZE(peering): Cache.notifyPollingQuery polls at this interval. We need to revisit how that polling works. + // Using an exponential backoff when the result hasn't changed may be preferable. + MaxAge: 1 * time.Second, + } + + v, err := hashstructure.Hash([]interface{}{ + r.Request.Partition, + r.Request.Name, + }, nil) + if err == nil { + // If there is an error, we don't set the key. A blank key forces + // no cache for this request so the request is forwarded directly + // to the server. + info.Key = strconv.FormatUint(v, 10) + } + + return info +} + // TrustBundle supports fetching discovering service instances via prepared // queries. type TrustBundle struct { @@ -33,14 +70,20 @@ func (t *TrustBundle) Fetch(_ cache.FetchOptions, req cache.Request) (cache.Fetc // The request should be a TrustBundleReadRequest. // We do not need to make a copy of this request type like in other cache types // because the RequestInfo is synthetic. - reqReal, ok := req.(*pbpeering.TrustBundleReadRequest) + reqReal, ok := req.(*TrustBundleReadRequest) if !ok { return result, fmt.Errorf( "Internal cache failure: request wrong type: %T", req) } + // Always allow stale - there's no point in hitting leader if the request is + // going to be served from cache and end up arbitrarily stale anyway. This + // allows cached service-discover to automatically read scale across all + // servers too. + reqReal.QueryOptions.SetAllowStale(true) + // Fetch - reply, err := t.Client.TrustBundleRead(context.Background(), reqReal) + reply, err := t.Client.TrustBundleRead(external.ContextWithToken(context.Background(), reqReal.Token), reqReal.Request) if err != nil { return result, err } diff --git a/agent/cache-types/trust_bundle_test.go b/agent/cache-types/trust_bundle_test.go index fa3d016a2..ee03838aa 100644 --- a/agent/cache-types/trust_bundle_test.go +++ b/agent/cache-types/trust_bundle_test.go @@ -33,8 +33,10 @@ func TestTrustBundle(t *testing.T) { Return(resp, nil) // Fetch and assert against the result. - result, err := typ.Fetch(cache.FetchOptions{}, &pbpeering.TrustBundleReadRequest{ - Name: "foo", + result, err := typ.Fetch(cache.FetchOptions{}, &TrustBundleReadRequest{ + Request: &pbpeering.TrustBundleReadRequest{ + Name: "foo", + }, }) require.NoError(t, err) require.Equal(t, cache.FetchResult{ @@ -82,7 +84,9 @@ func TestTrustBundle_MultipleUpdates(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) t.Cleanup(cancel) - err := c.Notify(ctx, TrustBundleReadName, &pbpeering.TrustBundleReadRequest{Name: "foo"}, "updates", ch) + err := c.Notify(ctx, TrustBundleReadName, &TrustBundleReadRequest{ + Request: &pbpeering.TrustBundleReadRequest{Name: "foo"}, + }, "updates", ch) require.NoError(t, err) i := uint64(1) diff --git a/agent/cache-types/trust_bundles.go b/agent/cache-types/trust_bundles.go index 5b4bbcc13..70c63cb4b 100644 --- a/agent/cache-types/trust_bundles.go +++ b/agent/cache-types/trust_bundles.go @@ -3,16 +3,55 @@ package cachetype import ( "context" "fmt" + "strconv" + "time" + "github.com/mitchellh/hashstructure" "google.golang.org/grpc" "github.com/hashicorp/consul/agent/cache" + external "github.com/hashicorp/consul/agent/grpc-external" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto/pbpeering" ) // Recommended name for registration. const TrustBundleListName = "trust-bundles" +type TrustBundleListRequest struct { + Request *pbpeering.TrustBundleListByServiceRequest + structs.QueryOptions +} + +func (r *TrustBundleListRequest) CacheInfo() cache.RequestInfo { + info := cache.RequestInfo{ + Token: r.Token, + Datacenter: "", + MinIndex: 0, + Timeout: 0, + MustRevalidate: false, + + // OPTIMIZE(peering): Cache.notifyPollingQuery polls at this interval. We need to revisit how that polling works. + // Using an exponential backoff when the result hasn't changed may be preferable. + MaxAge: 1 * time.Second, + } + + v, err := hashstructure.Hash([]interface{}{ + r.Request.Partition, + r.Request.Namespace, + r.Request.ServiceName, + r.Request.Kind, + }, nil) + if err == nil { + // If there is an error, we don't set the key. A blank key forces + // no cache for this request so the request is forwarded directly + // to the server. + info.Key = strconv.FormatUint(v, 10) + } + + return info +} + // TrustBundles supports fetching discovering service instances via prepared // queries. type TrustBundles struct { @@ -30,17 +69,23 @@ type TrustBundleLister interface { func (t *TrustBundles) Fetch(_ cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { var result cache.FetchResult - // The request should be a TrustBundleListByServiceRequest. + // The request should be a TrustBundleListRequest. // We do not need to make a copy of this request type like in other cache types // because the RequestInfo is synthetic. - reqReal, ok := req.(*pbpeering.TrustBundleListByServiceRequest) + reqReal, ok := req.(*TrustBundleListRequest) if !ok { return result, fmt.Errorf( "Internal cache failure: request wrong type: %T", req) } + // Always allow stale - there's no point in hitting leader if the request is + // going to be served from cache and end up arbitrarily stale anyway. This + // allows cached service-discover to automatically read scale across all + // servers too. + reqReal.QueryOptions.SetAllowStale(true) + // Fetch - reply, err := t.Client.TrustBundleListByService(context.Background(), reqReal) + reply, err := t.Client.TrustBundleListByService(external.ContextWithToken(context.Background(), reqReal.Token), reqReal.Request) if err != nil { return result, err } diff --git a/agent/cache-types/trust_bundles_test.go b/agent/cache-types/trust_bundles_test.go index d5fbd6f50..09d8a80bc 100644 --- a/agent/cache-types/trust_bundles_test.go +++ b/agent/cache-types/trust_bundles_test.go @@ -36,8 +36,10 @@ func TestTrustBundles(t *testing.T) { Return(resp, nil) // Fetch and assert against the result. - result, err := typ.Fetch(cache.FetchOptions{}, &pbpeering.TrustBundleListByServiceRequest{ - ServiceName: "foo", + result, err := typ.Fetch(cache.FetchOptions{}, &TrustBundleListRequest{ + Request: &pbpeering.TrustBundleListByServiceRequest{ + ServiceName: "foo", + }, }) require.NoError(t, err) require.Equal(t, cache.FetchResult{ @@ -85,7 +87,9 @@ func TestTrustBundles_MultipleUpdates(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) t.Cleanup(cancel) - err := c.Notify(ctx, TrustBundleListName, &pbpeering.TrustBundleListByServiceRequest{ServiceName: "foo"}, "updates", ch) + err := c.Notify(ctx, TrustBundleListName, &TrustBundleListRequest{ + Request: &pbpeering.TrustBundleListByServiceRequest{ServiceName: "foo"}, + }, "updates", ch) require.NoError(t, err) i := uint64(1) diff --git a/agent/consul/peering_backend.go b/agent/consul/peering_backend.go index 589b4e95b..1ab1b5c95 100644 --- a/agent/consul/peering_backend.go +++ b/agent/consul/peering_backend.go @@ -7,6 +7,8 @@ import ( "strconv" "sync" + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/grpc-external/services/peerstream" "github.com/hashicorp/consul/agent/rpc/peering" @@ -160,3 +162,7 @@ func (b *PeeringBackend) CatalogDeregister(req *structs.DeregisterRequest) error _, err := b.srv.leaderRaftApply("Catalog.Deregister", structs.DeregisterRequestType, req) return err } + +func (b *PeeringBackend) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzCtx *acl.AuthorizerContext) (resolver.Result, error) { + return b.srv.ResolveTokenAndDefaultMeta(token, entMeta, authzCtx) +} diff --git a/agent/consul/peering_backend_oss_test.go b/agent/consul/peering_backend_oss_test.go index 5996690ea..3c120d26f 100644 --- a/agent/consul/peering_backend_oss_test.go +++ b/agent/consul/peering_backend_oss_test.go @@ -42,8 +42,7 @@ func TestPeeringBackend_RejectsPartition(t *testing.T) { peeringClient := pbpeering.NewPeeringServiceClient(conn) req := pbpeering.GenerateTokenRequest{ - Datacenter: "dc1", - Partition: "test", + Partition: "test", } _, err = peeringClient.GenerateToken(ctx, &req) require.Error(t, err) @@ -77,9 +76,8 @@ func TestPeeringBackend_IgnoresDefaultPartition(t *testing.T) { peeringClient := pbpeering.NewPeeringServiceClient(conn) req := pbpeering.GenerateTokenRequest{ - Datacenter: "dc1", - PeerName: "my-peer", - Partition: "DeFaUlT", + PeerName: "my-peer", + Partition: "DeFaUlT", } _, err = peeringClient.GenerateToken(ctx, &req) require.NoError(t, err) diff --git a/agent/consul/peering_backend_test.go b/agent/consul/peering_backend_test.go index 6d6344a29..fc73ba53d 100644 --- a/agent/consul/peering_backend_test.go +++ b/agent/consul/peering_backend_test.go @@ -15,43 +15,6 @@ import ( "github.com/hashicorp/consul/testrpc" ) -func TestPeeringBackend_DoesNotForwardToDifferentDC(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - _, s1 := testServerDC(t, "dc1") - _, s2 := testServerDC(t, "dc2") - - joinWAN(t, s2, s1) - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - testrpc.WaitForLeader(t, s2.RPC, "dc2") - - // make a grpc client to dial s2 directly - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - t.Cleanup(cancel) - - conn, err := gogrpc.DialContext(ctx, s2.config.RPCAddr.String(), - gogrpc.WithContextDialer(newServerDialer(s2.config.RPCAddr.String())), - gogrpc.WithInsecure(), - gogrpc.WithBlock()) - require.NoError(t, err) - t.Cleanup(func() { conn.Close() }) - - peeringClient := pbpeering.NewPeeringServiceClient(conn) - - // GenerateToken request should fail against dc1, because we are dialing dc2. The GenerateToken request should never be forwarded across datacenters. - req := pbpeering.GenerateTokenRequest{ - PeerName: "peer1-usw1", - Datacenter: "dc1", - } - _, err = peeringClient.GenerateToken(ctx, &req) - require.Error(t, err) - require.Contains(t, err.Error(), "requests to generate peering tokens cannot be forwarded to remote datacenters") -} - func TestPeeringBackend_ForwardToLeader(t *testing.T) { t.Parallel() @@ -86,8 +49,7 @@ func TestPeeringBackend_ForwardToLeader(t *testing.T) { testutil.RunStep(t, "forward a write", func(t *testing.T) { // Do the grpc Write call to server2 req := pbpeering.GenerateTokenRequest{ - Datacenter: "dc1", - PeerName: "foo", + PeerName: "foo", } _, err := peeringClient.GenerateToken(ctx, &req) require.NoError(t, err) diff --git a/agent/peering_endpoint.go b/agent/peering_endpoint.go index 0d120830e..6ef7167b2 100644 --- a/agent/peering_endpoint.go +++ b/agent/peering_endpoint.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/hashicorp/consul/acl" + external "github.com/hashicorp/consul/agent/grpc-external" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/proto/pbpeering" @@ -32,17 +33,20 @@ func (s *HTTPHandlers) PeeringEndpoint(resp http.ResponseWriter, req *http.Reque // peeringRead fetches a peering that matches the name and partition. // This assumes that the name and partition parameters are valid func (s *HTTPHandlers) peeringRead(resp http.ResponseWriter, req *http.Request, name string) (interface{}, error) { - args := pbpeering.PeeringReadRequest{ - Name: name, - Datacenter: s.agent.config.Datacenter, - } var entMeta acl.EnterpriseMeta if err := s.parseEntMetaPartition(req, &entMeta); err != nil { return nil, err } - args.Partition = entMeta.PartitionOrEmpty() + args := pbpeering.PeeringReadRequest{ + Name: name, + Partition: entMeta.PartitionOrEmpty(), + } - result, err := s.agent.rpcClientPeering.PeeringRead(req.Context(), &args) + var token string + s.parseToken(req, &token) + ctx := external.ContextWithToken(req.Context(), token) + + result, err := s.agent.rpcClientPeering.PeeringRead(ctx, &args) if err != nil { return nil, err } @@ -55,16 +59,19 @@ func (s *HTTPHandlers) peeringRead(resp http.ResponseWriter, req *http.Request, // PeeringList fetches all peerings in the datacenter in OSS or in a given partition in Consul Enterprise. func (s *HTTPHandlers) PeeringList(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - args := pbpeering.PeeringListRequest{ - Datacenter: s.agent.config.Datacenter, - } var entMeta acl.EnterpriseMeta if err := s.parseEntMetaPartition(req, &entMeta); err != nil { return nil, err } - args.Partition = entMeta.PartitionOrEmpty() + args := pbpeering.PeeringListRequest{ + Partition: entMeta.PartitionOrEmpty(), + } - pbresp, err := s.agent.rpcClientPeering.PeeringList(req.Context(), &args) + var token string + s.parseToken(req, &token) + ctx := external.ContextWithToken(req.Context(), token) + + pbresp, err := s.agent.rpcClientPeering.PeeringList(ctx, &args) if err != nil { return nil, err } @@ -79,14 +86,12 @@ func (s *HTTPHandlers) PeeringGenerateToken(resp http.ResponseWriter, req *http. return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "The peering arguments must be provided in the body"} } - apiRequest := &api.PeeringGenerateTokenRequest{ - Datacenter: s.agent.config.Datacenter, - } - if err := lib.DecodeJSON(req.Body, apiRequest); err != nil { + var apiRequest api.PeeringGenerateTokenRequest + if err := lib.DecodeJSON(req.Body, &apiRequest); err != nil { return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Body decoding failed: %v", err)} } - args := pbpeering.NewGenerateTokenRequestFromAPI(apiRequest) + args := pbpeering.NewGenerateTokenRequestFromAPI(&apiRequest) if args.PeerName == "" { return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "PeerName is required in the payload when generating a new peering token."} } @@ -99,7 +104,11 @@ func (s *HTTPHandlers) PeeringGenerateToken(resp http.ResponseWriter, req *http. args.Partition = entMeta.PartitionOrEmpty() } - out, err := s.agent.rpcClientPeering.GenerateToken(req.Context(), args) + var token string + s.parseToken(req, &token) + ctx := external.ContextWithToken(req.Context(), token) + + out, err := s.agent.rpcClientPeering.GenerateToken(ctx, args) if err != nil { return nil, err } @@ -114,18 +123,15 @@ func (s *HTTPHandlers) PeeringEstablish(resp http.ResponseWriter, req *http.Requ return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "The peering arguments must be provided in the body"} } - apiRequest := &api.PeeringEstablishRequest{ - Datacenter: s.agent.config.Datacenter, - } - if err := lib.DecodeJSON(req.Body, apiRequest); err != nil { + var apiRequest api.PeeringEstablishRequest + if err := lib.DecodeJSON(req.Body, &apiRequest); err != nil { return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Body decoding failed: %v", err)} } - args := pbpeering.NewEstablishRequestFromAPI(apiRequest) + args := pbpeering.NewEstablishRequestFromAPI(&apiRequest) if args.PeerName == "" { return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "PeerName is required in the payload when establishing a peering."} } - if args.PeeringToken == "" { return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "PeeringToken is required in the payload when establishing a peering."} } @@ -138,7 +144,11 @@ func (s *HTTPHandlers) PeeringEstablish(resp http.ResponseWriter, req *http.Requ args.Partition = entMeta.PartitionOrEmpty() } - out, err := s.agent.rpcClientPeering.Establish(req.Context(), args) + var token string + s.parseToken(req, &token) + ctx := external.ContextWithToken(req.Context(), token) + + out, err := s.agent.rpcClientPeering.Establish(ctx, args) if err != nil { return nil, err } @@ -149,17 +159,20 @@ func (s *HTTPHandlers) PeeringEstablish(resp http.ResponseWriter, req *http.Requ // peeringDelete initiates a deletion for a peering that matches the name and partition. // This assumes that the name and partition parameters are valid. func (s *HTTPHandlers) peeringDelete(resp http.ResponseWriter, req *http.Request, name string) (interface{}, error) { - args := pbpeering.PeeringDeleteRequest{ - Name: name, - Datacenter: s.agent.config.Datacenter, - } var entMeta acl.EnterpriseMeta if err := s.parseEntMetaPartition(req, &entMeta); err != nil { return nil, err } - args.Partition = entMeta.PartitionOrEmpty() + args := pbpeering.PeeringDeleteRequest{ + Name: name, + Partition: entMeta.PartitionOrEmpty(), + } - _, err := s.agent.rpcClientPeering.PeeringDelete(req.Context(), &args) + var token string + s.parseToken(req, &token) + ctx := external.ContextWithToken(req.Context(), token) + + _, err := s.agent.rpcClientPeering.PeeringDelete(ctx, &args) if err != nil { return nil, err } diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index 04451c3d2..86badf67e 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -3,6 +3,7 @@ package proxycfgglue import ( "context" + "github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/go-memdb" "github.com/hashicorp/consul/acl" @@ -14,7 +15,6 @@ import ( "github.com/hashicorp/consul/agent/consul/watch" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbpeering" ) // Store is the state store interface required for server-local data sources. diff --git a/agent/proxycfg-glue/trust_bundle.go b/agent/proxycfg-glue/trust_bundle.go index 4ce42591b..455d7dc9f 100644 --- a/agent/proxycfg-glue/trust_bundle.go +++ b/agent/proxycfg-glue/trust_bundle.go @@ -19,7 +19,7 @@ import ( // CacheTrustBundle satisfies the proxycfg.TrustBundle interface by sourcing // data from the agent cache. func CacheTrustBundle(c *cache.Cache) proxycfg.TrustBundle { - return &cacheProxyDataSource[*pbpeering.TrustBundleReadRequest]{c, cachetype.TrustBundleReadName} + return &cacheProxyDataSource[*cachetype.TrustBundleReadRequest]{c, cachetype.TrustBundleReadName} } // ServerTrustBundle satisfies the proxycfg.TrustBundle interface by sourcing @@ -32,13 +32,13 @@ type serverTrustBundle struct { deps ServerDataSourceDeps } -func (s *serverTrustBundle) Notify(ctx context.Context, req *pbpeering.TrustBundleReadRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { +func (s *serverTrustBundle) Notify(ctx context.Context, req *cachetype.TrustBundleReadRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { // TODO(peering): ACL check. return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore, func(ws memdb.WatchSet, store Store) (uint64, *pbpeering.TrustBundleReadResponse, error) { index, bundle, err := store.PeeringTrustBundleRead(ws, state.Query{ - Value: req.Name, - EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + Value: req.Request.Name, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Request.Partition), }) if err != nil { return 0, nil, err @@ -55,7 +55,7 @@ func (s *serverTrustBundle) Notify(ctx context.Context, req *pbpeering.TrustBund // CacheTrustBundleList satisfies the proxycfg.TrustBundleList interface by sourcing // data from the agent cache. func CacheTrustBundleList(c *cache.Cache) proxycfg.TrustBundleList { - return &cacheProxyDataSource[*pbpeering.TrustBundleListByServiceRequest]{c, cachetype.TrustBundleListName} + return &cacheProxyDataSource[*cachetype.TrustBundleListRequest]{c, cachetype.TrustBundleListName} } // ServerTrustBundleList satisfies the proxycfg.TrustBundle interface by @@ -68,8 +68,8 @@ type serverTrustBundleList struct { deps ServerDataSourceDeps } -func (s *serverTrustBundleList) Notify(ctx context.Context, req *pbpeering.TrustBundleListByServiceRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { - entMeta := acl.NewEnterpriseMetaWithPartition(req.Partition, req.Namespace) +func (s *serverTrustBundleList) Notify(ctx context.Context, req *cachetype.TrustBundleListRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { + entMeta := acl.NewEnterpriseMetaWithPartition(req.Request.Partition, req.Request.Namespace) // TODO(peering): ACL check. return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore, @@ -80,11 +80,11 @@ func (s *serverTrustBundleList) Notify(ctx context.Context, req *pbpeering.Trust err error ) switch { - case req.ServiceName != "": - index, bundles, err = store.TrustBundleListByService(ws, req.ServiceName, s.deps.Datacenter, entMeta) - case req.Kind == string(structs.ServiceKindMeshGateway): + case req.Request.Kind == string(structs.ServiceKindMeshGateway): index, bundles, err = store.PeeringTrustBundleList(ws, entMeta) - case req.Kind != "": + case req.Request.ServiceName != "": + index, bundles, err = store.TrustBundleListByService(ws, req.Request.ServiceName, s.deps.Datacenter, entMeta) + case req.Request.Kind != "": err = errors.New("kind must be mesh-gateway if set") default: err = errors.New("one of service or kind is required") diff --git a/agent/proxycfg-glue/trust_bundle_test.go b/agent/proxycfg-glue/trust_bundle_test.go index 65c343a05..910ffdcab 100644 --- a/agent/proxycfg-glue/trust_bundle_test.go +++ b/agent/proxycfg-glue/trust_bundle_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/consul/state" @@ -32,8 +33,10 @@ func TestServerTrustBundle(t *testing.T) { }) eventCh := make(chan proxycfg.UpdateEvent) - err := dataSource.Notify(context.Background(), &pbpeering.TrustBundleReadRequest{ - Name: peerName, + err := dataSource.Notify(context.Background(), &cachetype.TrustBundleReadRequest{ + Request: &pbpeering.TrustBundleReadRequest{ + Name: peerName, + }, }, "", eventCh) require.NoError(t, err) @@ -96,9 +99,11 @@ func TestServerTrustBundleList(t *testing.T) { }) eventCh := make(chan proxycfg.UpdateEvent) - err := dataSource.Notify(context.Background(), &pbpeering.TrustBundleListByServiceRequest{ - ServiceName: serviceName, - Partition: us, + err := dataSource.Notify(context.Background(), &cachetype.TrustBundleListRequest{ + Request: &pbpeering.TrustBundleListByServiceRequest{ + ServiceName: serviceName, + Partition: us, + }, }, "", eventCh) require.NoError(t, err) @@ -134,9 +139,11 @@ func TestServerTrustBundleList(t *testing.T) { }) eventCh := make(chan proxycfg.UpdateEvent) - err := dataSource.Notify(context.Background(), &pbpeering.TrustBundleListByServiceRequest{ - Kind: string(structs.ServiceKindMeshGateway), - Partition: "default", + err := dataSource.Notify(context.Background(), &cachetype.TrustBundleListRequest{ + Request: &pbpeering.TrustBundleListByServiceRequest{ + Kind: string(structs.ServiceKindMeshGateway), + Partition: "default", + }, }, "", eventCh) require.NoError(t, err) diff --git a/agent/proxycfg/connect_proxy.go b/agent/proxycfg/connect_proxy.go index 9b0f3e54b..15e3498f2 100644 --- a/agent/proxycfg/connect_proxy.go +++ b/agent/proxycfg/connect_proxy.go @@ -46,11 +46,13 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e return snap, err } - err = s.dataSources.TrustBundleList.Notify(ctx, &pbpeering.TrustBundleListByServiceRequest{ - // TODO(peering): Pass ACL token - ServiceName: s.proxyCfg.DestinationServiceName, - Namespace: s.proxyID.NamespaceOrDefault(), - Partition: s.proxyID.PartitionOrDefault(), + err = s.dataSources.TrustBundleList.Notify(ctx, &cachetype.TrustBundleListRequest{ + Request: &pbpeering.TrustBundleListByServiceRequest{ + ServiceName: s.proxyCfg.DestinationServiceName, + Namespace: s.proxyID.NamespaceOrDefault(), + Partition: s.proxyID.PartitionOrDefault(), + }, + QueryOptions: structs.QueryOptions{Token: s.token}, }, peeringTrustBundlesWatchID, s.ch) if err != nil { return snap, err @@ -226,9 +228,12 @@ func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, e // Check whether a watch for this peer exists to avoid duplicates. if ok := snap.ConnectProxy.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok { peerCtx, cancel := context.WithCancel(ctx) - if err := s.dataSources.TrustBundle.Notify(peerCtx, &pbpeering.TrustBundleReadRequest{ - Name: uid.Peer, - Partition: uid.PartitionOrDefault(), + if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{ + Request: &pbpeering.TrustBundleReadRequest{ + Name: uid.Peer, + Partition: uid.PartitionOrDefault(), + }, + QueryOptions: structs.QueryOptions{Token: s.token}, }, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil { cancel() return snap, fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err) @@ -344,9 +349,12 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s // Check whether a watch for this peer exists to avoid duplicates. if ok := snap.ConnectProxy.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok { peerCtx, cancel := context.WithCancel(ctx) - if err := s.dataSources.TrustBundle.Notify(peerCtx, &pbpeering.TrustBundleReadRequest{ - Name: uid.Peer, - Partition: uid.PartitionOrDefault(), + if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{ + Request: &pbpeering.TrustBundleReadRequest{ + Name: uid.Peer, + Partition: uid.PartitionOrDefault(), + }, + QueryOptions: structs.QueryOptions{Token: s.token}, }, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil { cancel() return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err) diff --git a/agent/proxycfg/data_sources.go b/agent/proxycfg/data_sources.go index 3bef5e347..bda0226ff 100644 --- a/agent/proxycfg/data_sources.go +++ b/agent/proxycfg/data_sources.go @@ -5,7 +5,6 @@ import ( cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbpeering" ) // UpdateEvent contains new data for a resource we are subscribed to (e.g. an @@ -220,13 +219,13 @@ type ServiceList interface { // TrustBundle is the interface used to consume updates about a single // peer's trust bundle. type TrustBundle interface { - Notify(ctx context.Context, req *pbpeering.TrustBundleReadRequest, correlationID string, ch chan<- UpdateEvent) error + Notify(ctx context.Context, req *cachetype.TrustBundleReadRequest, correlationID string, ch chan<- UpdateEvent) error } // TrustBundleList is the interface used to consume updates about trust bundles // for peered clusters that the given proxy is exported to. type TrustBundleList interface { - Notify(ctx context.Context, req *pbpeering.TrustBundleListByServiceRequest, correlationID string, ch chan<- UpdateEvent) error + Notify(ctx context.Context, req *cachetype.TrustBundleListRequest, correlationID string, ch chan<- UpdateEvent) error } // ExportedPeeredServices is the interface used to consume updates about the diff --git a/agent/proxycfg/mesh_gateway.go b/agent/proxycfg/mesh_gateway.go index b5c3f6755..f80ee537f 100644 --- a/agent/proxycfg/mesh_gateway.go +++ b/agent/proxycfg/mesh_gateway.go @@ -32,11 +32,14 @@ func (s *handlerMeshGateway) initialize(ctx context.Context) (ConfigSnapshot, er } // Watch for all peer trust bundles we may need. - err = s.dataSources.TrustBundleList.Notify(ctx, &pbpeering.TrustBundleListByServiceRequest{ - // TODO(peering): Pass ACL token - Kind: string(structs.ServiceKindMeshGateway), - Namespace: s.proxyID.NamespaceOrDefault(), - Partition: s.proxyID.PartitionOrDefault(), + err = s.dataSources.TrustBundleList.Notify(ctx, &cachetype.TrustBundleListRequest{ + Request: &pbpeering.TrustBundleListByServiceRequest{ + Kind: string(structs.ServiceKindMeshGateway), + ServiceName: s.service, + Namespace: s.proxyID.NamespaceOrDefault(), + Partition: s.proxyID.PartitionOrDefault(), + }, + QueryOptions: structs.QueryOptions{Token: s.token}, }, peeringTrustBundlesWatchID, s.ch) if err != nil { return snap, err diff --git a/agent/proxycfg/state_test.go b/agent/proxycfg/state_test.go index 662596b9b..855ded03d 100644 --- a/agent/proxycfg/state_test.go +++ b/agent/proxycfg/state_test.go @@ -137,8 +137,8 @@ func recordWatches(sc *stateConfig) *watchRecorder { PreparedQuery: typedWatchRecorder[*structs.PreparedQueryExecuteRequest]{wr}, ResolvedServiceConfig: typedWatchRecorder[*structs.ServiceConfigRequest]{wr}, ServiceList: typedWatchRecorder[*structs.DCSpecificRequest]{wr}, - TrustBundle: typedWatchRecorder[*pbpeering.TrustBundleReadRequest]{wr}, - TrustBundleList: typedWatchRecorder[*pbpeering.TrustBundleListByServiceRequest]{wr}, + TrustBundle: typedWatchRecorder[*cachetype.TrustBundleReadRequest]{wr}, + TrustBundleList: typedWatchRecorder[*cachetype.TrustBundleListRequest]{wr}, ExportedPeeredServices: typedWatchRecorder[*structs.DCSpecificRequest]{wr}, } recordWatchesEnterprise(sc, wr) @@ -203,9 +203,9 @@ func verifyDatacentersWatch(t testing.TB, request any) { func genVerifyTrustBundleReadWatch(peer string) verifyWatchRequest { return func(t testing.TB, request any) { - reqReal, ok := request.(*pbpeering.TrustBundleReadRequest) + reqReal, ok := request.(*cachetype.TrustBundleReadRequest) require.True(t, ok) - require.Equal(t, peer, reqReal.Name) + require.Equal(t, peer, reqReal.Request.Name) } } @@ -225,19 +225,19 @@ func genVerifyLeafWatch(expectedService string, expectedDatacenter string) verif func genVerifyTrustBundleListWatch(service string) verifyWatchRequest { return func(t testing.TB, request any) { - reqReal, ok := request.(*pbpeering.TrustBundleListByServiceRequest) + reqReal, ok := request.(*cachetype.TrustBundleListRequest) require.True(t, ok) - require.Equal(t, service, reqReal.ServiceName) + require.Equal(t, service, reqReal.Request.ServiceName) } } func genVerifyTrustBundleListWatchForMeshGateway(partition string) verifyWatchRequest { return func(t testing.TB, request any) { - reqReal, ok := request.(*pbpeering.TrustBundleListByServiceRequest) + reqReal, ok := request.(*cachetype.TrustBundleListRequest) require.True(t, ok) - require.Equal(t, string(structs.ServiceKindMeshGateway), reqReal.Kind) - require.True(t, acl.EqualPartitions(partition, reqReal.Partition), "%q != %q", partition, reqReal.Partition) - require.Empty(t, reqReal.ServiceName) + require.Equal(t, string(structs.ServiceKindMeshGateway), reqReal.Request.Kind) + require.True(t, acl.EqualPartitions(partition, reqReal.Request.Partition), "%q != %q", partition, reqReal.Request.Partition) + require.NotEmpty(t, reqReal.Request.ServiceName) } } diff --git a/agent/proxycfg/testing.go b/agent/proxycfg/testing.go index dfde519d1..0493e30da 100644 --- a/agent/proxycfg/testing.go +++ b/agent/proxycfg/testing.go @@ -751,8 +751,8 @@ func testConfigSnapshotFixture( PreparedQuery: &noopDataSource[*structs.PreparedQueryExecuteRequest]{}, ResolvedServiceConfig: &noopDataSource[*structs.ServiceConfigRequest]{}, ServiceList: &noopDataSource[*structs.DCSpecificRequest]{}, - TrustBundle: &noopDataSource[*pbpeering.TrustBundleReadRequest]{}, - TrustBundleList: &noopDataSource[*pbpeering.TrustBundleListByServiceRequest]{}, + TrustBundle: &noopDataSource[*cachetype.TrustBundleReadRequest]{}, + TrustBundleList: &noopDataSource[*cachetype.TrustBundleListRequest]{}, ExportedPeeredServices: &noopDataSource[*structs.DCSpecificRequest]{}, }, dnsConfig: DNSConfig{ // TODO: make configurable @@ -954,8 +954,8 @@ func NewTestDataSources() *TestDataSources { PreparedQuery: NewTestDataSource[*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExecuteResponse](), ResolvedServiceConfig: NewTestDataSource[*structs.ServiceConfigRequest, *structs.ServiceConfigResponse](), ServiceList: NewTestDataSource[*structs.DCSpecificRequest, *structs.IndexedServiceList](), - TrustBundle: NewTestDataSource[*pbpeering.TrustBundleReadRequest, *pbpeering.TrustBundleReadResponse](), - TrustBundleList: NewTestDataSource[*pbpeering.TrustBundleListByServiceRequest, *pbpeering.TrustBundleListByServiceResponse](), + TrustBundle: NewTestDataSource[*cachetype.TrustBundleReadRequest, *pbpeering.TrustBundleReadResponse](), + TrustBundleList: NewTestDataSource[*cachetype.TrustBundleListRequest, *pbpeering.TrustBundleListByServiceResponse](), } srcs.buildEnterpriseSources() return srcs @@ -981,8 +981,8 @@ type TestDataSources struct { PreparedQuery *TestDataSource[*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExecuteResponse] ResolvedServiceConfig *TestDataSource[*structs.ServiceConfigRequest, *structs.ServiceConfigResponse] ServiceList *TestDataSource[*structs.DCSpecificRequest, *structs.IndexedServiceList] - TrustBundle *TestDataSource[*pbpeering.TrustBundleReadRequest, *pbpeering.TrustBundleReadResponse] - TrustBundleList *TestDataSource[*pbpeering.TrustBundleListByServiceRequest, *pbpeering.TrustBundleListByServiceResponse] + TrustBundle *TestDataSource[*cachetype.TrustBundleReadRequest, *pbpeering.TrustBundleReadResponse] + TrustBundleList *TestDataSource[*cachetype.TrustBundleListRequest, *pbpeering.TrustBundleListByServiceResponse] TestDataSourcesEnterprise } diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 8cb78e33c..c7e0b861c 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -16,9 +16,11 @@ import ( "google.golang.org/protobuf/proto" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/dns" + external "github.com/hashicorp/consul/agent/grpc-external" "github.com/hashicorp/consul/agent/grpc-external/services/peerstream" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" @@ -43,6 +45,20 @@ func (e *errPeeringInvalidServerAddress) Error() string { return fmt.Sprintf("%s is not a valid peering server address", e.addr) } +// For private/internal gRPC handlers, protoc-gen-rpc-glue generates the +// requisite methods to satisfy the structs.RPCInfo interface using fields +// from the pbcommon package. This service is public, so we can't use those +// fields in our proto definition. Instead, we construct our RPCInfo manually. +var writeRequest struct { + structs.WriteRequest + structs.DCSpecificRequest +} + +var readRequest struct { + structs.QueryOptions + structs.DCSpecificRequest +} + // Server implements pbpeering.PeeringService to provide RPC operations for // managing peering relationships. type Server struct { @@ -90,6 +106,12 @@ func (s *Server) Register(grpcServer *grpc.Server) { // providing access to CA data and the RPC system for forwarding requests to // other servers. type Backend interface { + // ResolveTokenAndDefaultMeta returns an acl.Authorizer which authorizes + // actions based on the permissions granted to the token. + // If either entMeta or authzContext are non-nil they will be populated with the + // partition and namespace from the token. + ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzCtx *acl.AuthorizerContext) (resolver.Result, error) + // GetAgentCACertificates returns the CA certificate to be returned in the peering token data GetAgentCACertificates() ([]string, error) @@ -165,11 +187,11 @@ func (s *Server) GenerateToken( return nil, fmt.Errorf("meta tags failed validation: %w", err) } - // TODO(peering): add metrics - // TODO(peering): add tracing + defer metrics.MeasureSince([]string{"peering", "generate_token"}, time.Now()) resp := &pbpeering.GenerateTokenResponse{} - handled, err := s.ForwardRPC(req, func(conn *grpc.ClientConn) error { + handled, err := s.ForwardRPC(&writeRequest, func(conn *grpc.ClientConn) error { + ctx := external.ForwardMetadataContext(ctx) var err error resp, err = pbpeering.NewPeeringServiceClient(conn).GenerateToken(ctx, req) return err @@ -178,6 +200,17 @@ func (s *Server) GenerateToken( return resp, err } + var authzCtx acl.AuthorizerContext + entMeta := structs.DefaultEnterpriseMetaInPartition(req.Partition) + authz, err := s.Backend.ResolveTokenAndDefaultMeta(external.TokenFromContext(ctx), entMeta, &authzCtx) + if err != nil { + return nil, err + } + + if err := authz.ToAllowAuthorizer().PeeringWriteAllowed(&authzCtx); err != nil { + return nil, err + } + ca, err := s.Backend.GetAgentCACertificates() if err != nil { return nil, err @@ -194,7 +227,7 @@ func (s *Server) GenerateToken( } } - peeringOrNil, err := s.getExistingPeering(req.PeerName, req.Partition) + peeringOrNil, err := s.getExistingPeering(req.PeerName, entMeta.PartitionOrDefault()) if err != nil { return nil, err } @@ -206,7 +239,7 @@ func (s *Server) GenerateToken( canRetry := true RETRY_ONCE: - id, err := s.getExistingOrCreateNewPeerID(req.PeerName, req.Partition) + id, err := s.getExistingOrCreateNewPeerID(req.PeerName, entMeta.PartitionOrDefault()) if err != nil { return nil, err } @@ -214,9 +247,10 @@ RETRY_ONCE: Peering: &pbpeering.Peering{ ID: id, Name: req.PeerName, - // TODO(peering): Normalize from ACL token once this endpoint is guarded by ACLs. - Partition: req.PartitionOrDefault(), - Meta: req.Meta, + Meta: req.Meta, + + // PartitionOrEmpty is used to avoid writing "default" in OSS. + Partition: entMeta.PartitionOrEmpty(), }, } if err := s.Backend.PeeringWrite(&writeReq); err != nil { @@ -234,7 +268,7 @@ RETRY_ONCE: q := state.Query{ Value: strings.ToLower(req.PeerName), - EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + EnterpriseMeta: *entMeta, } _, peering, err := s.Backend.Store().PeeringRead(nil, q) if err != nil { @@ -288,7 +322,8 @@ func (s *Server) Establish( } resp := &pbpeering.EstablishResponse{} - handled, err := s.ForwardRPC(req, func(conn *grpc.ClientConn) error { + handled, err := s.ForwardRPC(&writeRequest, func(conn *grpc.ClientConn) error { + ctx := external.ForwardMetadataContext(ctx) var err error resp, err = pbpeering.NewPeeringServiceClient(conn).Establish(ctx, req) return err @@ -299,7 +334,18 @@ func (s *Server) Establish( defer metrics.MeasureSince([]string{"peering", "establish"}, time.Now()) - peeringOrNil, err := s.getExistingPeering(req.PeerName, req.Partition) + var authzCtx acl.AuthorizerContext + entMeta := structs.DefaultEnterpriseMetaInPartition(req.Partition) + authz, err := s.Backend.ResolveTokenAndDefaultMeta(external.TokenFromContext(ctx), entMeta, &authzCtx) + if err != nil { + return nil, err + } + + if err := authz.ToAllowAuthorizer().PeeringWriteAllowed(&authzCtx); err != nil { + return nil, err + } + + peeringOrNil, err := s.getExistingPeering(req.PeerName, entMeta.PartitionOrDefault()) if err != nil { return nil, err } @@ -341,6 +387,9 @@ func (s *Server) Establish( PeerID: tok.PeerID, Meta: req.Meta, State: pbpeering.PeeringState_ESTABLISHING, + + // PartitionOrEmpty is used to avoid writing "default" in OSS. + Partition: entMeta.PartitionOrEmpty(), }, } if err = s.Backend.PeeringWrite(writeReq); err != nil { @@ -350,6 +399,7 @@ func (s *Server) Establish( return resp, nil } +// OPTIMIZE: Handle blocking queries func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequest) (*pbpeering.PeeringReadResponse, error) { if !s.Config.PeeringEnabled { return nil, peeringNotEnabledErr @@ -360,7 +410,8 @@ func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequ } var resp *pbpeering.PeeringReadResponse - handled, err := s.ForwardRPC(req, func(conn *grpc.ClientConn) error { + handled, err := s.ForwardRPC(&readRequest, func(conn *grpc.ClientConn) error { + ctx := external.ForwardMetadataContext(ctx) var err error resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringRead(ctx, req) return err @@ -370,12 +421,22 @@ func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequ } defer metrics.MeasureSince([]string{"peering", "read"}, time.Now()) - // TODO(peering): ACL check request token - // TODO(peering): handle blocking queries + var authzCtx acl.AuthorizerContext + entMeta := structs.DefaultEnterpriseMetaInPartition(req.Partition) + authz, err := s.Backend.ResolveTokenAndDefaultMeta(external.TokenFromContext(ctx), entMeta, &authzCtx) + if err != nil { + return nil, err + } + + if err := authz.ToAllowAuthorizer().PeeringReadAllowed(&authzCtx); err != nil { + return nil, err + } + q := state.Query{ Value: strings.ToLower(req.Name), - EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition)} + EnterpriseMeta: *entMeta, + } _, peering, err := s.Backend.Store().PeeringRead(nil, q) if err != nil { return nil, err @@ -388,6 +449,7 @@ func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequ return &pbpeering.PeeringReadResponse{Peering: cp}, nil } +// OPTIMIZE: Handle blocking queries func (s *Server) PeeringList(ctx context.Context, req *pbpeering.PeeringListRequest) (*pbpeering.PeeringListResponse, error) { if !s.Config.PeeringEnabled { return nil, peeringNotEnabledErr @@ -398,7 +460,8 @@ func (s *Server) PeeringList(ctx context.Context, req *pbpeering.PeeringListRequ } var resp *pbpeering.PeeringListResponse - handled, err := s.ForwardRPC(req, func(conn *grpc.ClientConn) error { + handled, err := s.ForwardRPC(&readRequest, func(conn *grpc.ClientConn) error { + ctx := external.ForwardMetadataContext(ctx) var err error resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringList(ctx, req) return err @@ -407,11 +470,20 @@ func (s *Server) PeeringList(ctx context.Context, req *pbpeering.PeeringListRequ return resp, err } - defer metrics.MeasureSince([]string{"peering", "list"}, time.Now()) - // TODO(peering): ACL check request token + var authzCtx acl.AuthorizerContext + entMeta := structs.DefaultEnterpriseMetaInPartition(req.Partition) + authz, err := s.Backend.ResolveTokenAndDefaultMeta(external.TokenFromContext(ctx), entMeta, &authzCtx) + if err != nil { + return nil, err + } - // TODO(peering): handle blocking queries - _, peerings, err := s.Backend.Store().PeeringList(nil, *structs.NodeEnterpriseMetaInPartition(req.Partition)) + if err := authz.ToAllowAuthorizer().PeeringReadAllowed(&authzCtx); err != nil { + return nil, err + } + + defer metrics.MeasureSince([]string{"peering", "list"}, time.Now()) + + _, peerings, err := s.Backend.Store().PeeringList(nil, *entMeta) if err != nil { return nil, err } @@ -465,7 +537,8 @@ func (s *Server) PeeringWrite(ctx context.Context, req *pbpeering.PeeringWriteRe } var resp *pbpeering.PeeringWriteResponse - handled, err := s.ForwardRPC(req, func(conn *grpc.ClientConn) error { + handled, err := s.ForwardRPC(&writeRequest, func(conn *grpc.ClientConn) error { + ctx := external.ForwardMetadataContext(ctx) var err error resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringWrite(ctx, req) return err @@ -475,19 +548,28 @@ func (s *Server) PeeringWrite(ctx context.Context, req *pbpeering.PeeringWriteRe } defer metrics.MeasureSince([]string{"peering", "write"}, time.Now()) - // TODO(peering): ACL check request token + + var authzCtx acl.AuthorizerContext + entMeta := structs.DefaultEnterpriseMetaInPartition(req.Peering.Partition) + authz, err := s.Backend.ResolveTokenAndDefaultMeta(external.TokenFromContext(ctx), entMeta, &authzCtx) + if err != nil { + return nil, err + } + + if err := authz.ToAllowAuthorizer().PeeringWriteAllowed(&authzCtx); err != nil { + return nil, err + } if req.Peering == nil { return nil, fmt.Errorf("missing required peering body") } - id, err := s.getExistingOrCreateNewPeerID(req.Peering.Name, req.Peering.Partition) + id, err := s.getExistingOrCreateNewPeerID(req.Peering.Name, entMeta.PartitionOrDefault()) if err != nil { return nil, err } req.Peering.ID = id - // TODO(peering): handle blocking queries err = s.Backend.PeeringWrite(req) if err != nil { return nil, err @@ -505,7 +587,8 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete } var resp *pbpeering.PeeringDeleteResponse - handled, err := s.ForwardRPC(req, func(conn *grpc.ClientConn) error { + handled, err := s.ForwardRPC(&writeRequest, func(conn *grpc.ClientConn) error { + ctx := external.ForwardMetadataContext(ctx) var err error resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringDelete(ctx, req) return err @@ -515,13 +598,21 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete } defer metrics.MeasureSince([]string{"peering", "delete"}, time.Now()) - // TODO(peering): ACL check request token - // TODO(peering): handle blocking queries + var authzCtx acl.AuthorizerContext + entMeta := structs.DefaultEnterpriseMetaInPartition(req.Partition) + authz, err := s.Backend.ResolveTokenAndDefaultMeta(external.TokenFromContext(ctx), entMeta, &authzCtx) + if err != nil { + return nil, err + } + + if err := authz.ToAllowAuthorizer().PeeringWriteAllowed(&authzCtx); err != nil { + return nil, err + } q := state.Query{ Value: strings.ToLower(req.Name), - EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + EnterpriseMeta: *entMeta, } _, existing, err := s.Backend.Store().PeeringRead(nil, q) if err != nil { @@ -543,9 +634,11 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete // for deletion the peering is effectively gone. ID: existing.ID, Name: req.Name, - Partition: req.Partition, State: pbpeering.PeeringState_DELETING, DeletedAt: structs.TimeToProto(time.Now().UTC()), + + // PartitionOrEmpty is used to avoid writing "default" in OSS. + Partition: entMeta.PartitionOrEmpty(), }, } err = s.Backend.PeeringWrite(writeReq) @@ -555,6 +648,7 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete return &pbpeering.PeeringDeleteResponse{}, nil } +// OPTIMIZE: Handle blocking queries func (s *Server) TrustBundleRead(ctx context.Context, req *pbpeering.TrustBundleReadRequest) (*pbpeering.TrustBundleReadResponse, error) { if !s.Config.PeeringEnabled { return nil, peeringNotEnabledErr @@ -565,7 +659,8 @@ func (s *Server) TrustBundleRead(ctx context.Context, req *pbpeering.TrustBundle } var resp *pbpeering.TrustBundleReadResponse - handled, err := s.ForwardRPC(req, func(conn *grpc.ClientConn) error { + handled, err := s.ForwardRPC(&readRequest, func(conn *grpc.ClientConn) error { + ctx := external.ForwardMetadataContext(ctx) var err error resp, err = pbpeering.NewPeeringServiceClient(conn).TrustBundleRead(ctx, req) return err @@ -575,13 +670,21 @@ func (s *Server) TrustBundleRead(ctx context.Context, req *pbpeering.TrustBundle } defer metrics.MeasureSince([]string{"peering", "trust_bundle_read"}, time.Now()) - // TODO(peering): ACL check request token - // TODO(peering): handle blocking queries + var authzCtx acl.AuthorizerContext + entMeta := structs.DefaultEnterpriseMetaInPartition(req.Partition) + authz, err := s.Backend.ResolveTokenAndDefaultMeta(external.TokenFromContext(ctx), entMeta, &authzCtx) + if err != nil { + return nil, err + } + + if err := authz.ToAllowAuthorizer().ServiceWriteAnyAllowed(&authzCtx); err != nil { + return nil, err + } idx, trustBundle, err := s.Backend.Store().PeeringTrustBundleRead(nil, state.Query{ Value: req.Name, - EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + EnterpriseMeta: *entMeta, }) if err != nil { return nil, fmt.Errorf("failed to read trust bundle for peer %s: %w", req.Name, err) @@ -594,6 +697,7 @@ func (s *Server) TrustBundleRead(ctx context.Context, req *pbpeering.TrustBundle } // TODO(peering): rename rpc & request/response to drop the "service" part +// OPTIMIZE: Handle blocking queries func (s *Server) TrustBundleListByService(ctx context.Context, req *pbpeering.TrustBundleListByServiceRequest) (*pbpeering.TrustBundleListByServiceResponse, error) { if !s.Config.PeeringEnabled { return nil, peeringNotEnabledErr @@ -605,9 +709,13 @@ func (s *Server) TrustBundleListByService(ctx context.Context, req *pbpeering.Tr if err := s.Backend.EnterpriseCheckNamespaces(req.Namespace); err != nil { return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) } + if req.ServiceName == "" { + return nil, errors.New("missing service name") + } var resp *pbpeering.TrustBundleListByServiceResponse - handled, err := s.ForwardRPC(req, func(conn *grpc.ClientConn) error { + handled, err := s.ForwardRPC(&readRequest, func(conn *grpc.ClientConn) error { + ctx := external.ForwardMetadataContext(ctx) var err error resp, err = pbpeering.NewPeeringServiceClient(conn).TrustBundleListByService(ctx, req) return err @@ -617,11 +725,17 @@ func (s *Server) TrustBundleListByService(ctx context.Context, req *pbpeering.Tr } defer metrics.MeasureSince([]string{"peering", "trust_bundle_list_by_service"}, time.Now()) - // TODO(peering): ACL check request token for service:write on the service name - - // TODO(peering): handle blocking queries + var authzCtx acl.AuthorizerContext entMeta := acl.NewEnterpriseMetaWithPartition(req.Partition, req.Namespace) + authz, err := s.Backend.ResolveTokenAndDefaultMeta(external.TokenFromContext(ctx), &entMeta, &authzCtx) + if err != nil { + return nil, err + } + + if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(req.ServiceName, &authzCtx); err != nil { + return nil, err + } var ( idx uint64 @@ -629,10 +743,10 @@ func (s *Server) TrustBundleListByService(ctx context.Context, req *pbpeering.Tr ) switch { - case req.ServiceName != "": - idx, bundles, err = s.Backend.Store().TrustBundleListByService(nil, req.ServiceName, s.Datacenter, entMeta) case req.Kind == string(structs.ServiceKindMeshGateway): idx, bundles, err = s.Backend.Store().PeeringTrustBundleList(nil, entMeta) + case req.ServiceName != "": + idx, bundles, err = s.Backend.Store().TrustBundleListByService(nil, req.ServiceName, s.Datacenter, entMeta) case req.Kind != "": return nil, grpcstatus.Error(codes.InvalidArgument, "kind must be mesh-gateway if set") default: diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index 47de64554..ca3553a1b 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -22,6 +22,7 @@ import ( "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/stream" + external "github.com/hashicorp/consul/agent/grpc-external" grpc "github.com/hashicorp/consul/agent/grpc-internal" "github.com/hashicorp/consul/agent/grpc-internal/resolver" "github.com/hashicorp/consul/agent/pool" @@ -41,6 +42,13 @@ import ( "github.com/hashicorp/consul/types" ) +const ( + testTokenPeeringReadSecret = "9a83c138-a0c7-40f1-89fa-6acf9acd78f5" + testTokenPeeringWriteSecret = "91f90a41-0840-4afe-b615-68745f9e16c1" + testTokenServiceReadSecret = "1ef8e3cf-6e95-49aa-9f73-a0d3ad1a77d4" + testTokenServiceWriteSecret = "4a3dc05d-d86c-4f20-be43-8f4f8f045fea" +) + func generateTooManyMetaKeys() map[string]string { // todo -- modularize in structs.go or testing.go tooMuchMeta := make(map[string]string) @@ -70,12 +78,12 @@ func TestPeeringService_GenerateToken(t *testing.T) { // TODO(peering): for more failure cases, consider using a table test // check meta tags - reqE := pbpeering.GenerateTokenRequest{PeerName: "peerB", Datacenter: "dc1", Meta: generateTooManyMetaKeys()} + reqE := pbpeering.GenerateTokenRequest{PeerName: "peerB", Meta: generateTooManyMetaKeys()} _, errE := client.GenerateToken(ctx, &reqE) require.EqualError(t, errE, "rpc error: code = Unknown desc = meta tags failed validation: Node metadata cannot contain more than 64 key/value pairs") // happy path - req := pbpeering.GenerateTokenRequest{PeerName: "peerB", Datacenter: "dc1", Meta: map[string]string{"foo": "bar"}} + req := pbpeering.GenerateTokenRequest{PeerName: "peerB", Meta: map[string]string{"foo": "bar"}} resp, err := client.GenerateToken(ctx, &req) require.NoError(t, err) @@ -129,7 +137,7 @@ func TestPeeringService_GenerateTokenExternalAddress(t *testing.T) { externalAddress := "32.1.2.3:8502" // happy path - req := pbpeering.GenerateTokenRequest{PeerName: "peerB", Datacenter: "dc1", Meta: map[string]string{"foo": "bar"}, ServerExternalAddresses: []string{externalAddress}} + req := pbpeering.GenerateTokenRequest{PeerName: "peerB", Meta: map[string]string{"foo": "bar"}, ServerExternalAddresses: []string{externalAddress}} resp, err := client.GenerateToken(ctx, &req) require.NoError(t, err) @@ -144,6 +152,62 @@ func TestPeeringService_GenerateTokenExternalAddress(t *testing.T) { require.Equal(t, []string{ca}, token.CA) } +func TestPeeringService_GenerateToken_ACLEnforcement(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(conf *consul.Config) { + conf.ACLsEnabled = true + conf.ACLResolverSettings.ACLDefaultPolicy = acl.PolicyDeny + }) + upsertTestACLs(t, s.Server.FSM().State()) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + req *pbpeering.GenerateTokenRequest + token string + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + _, err := client.GenerateToken(external.ContextWithToken(ctx, tc.token), tc.req) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + } + tcs := []testcase{ + { + name: "anonymous token lacks permissions", + req: &pbpeering.GenerateTokenRequest{PeerName: "foo"}, + expectErr: "lacks permission 'peering:write'", + }, + { + name: "read token lacks permissions", + req: &pbpeering.GenerateTokenRequest{ + PeerName: "foo", + }, + token: testTokenPeeringReadSecret, + expectErr: "lacks permission 'peering:write'", + }, + { + name: "write token grants permission", + req: &pbpeering.GenerateTokenRequest{ + PeerName: "foo", + }, + token: testTokenPeeringWriteSecret, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + func TestPeeringService_Establish(t *testing.T) { validToken := peering.TestPeeringToken("83474a06-cca4-4ff4-99a4-4152929c8160") validTokenJSON, _ := json.Marshal(&validToken) @@ -250,6 +314,71 @@ func TestPeeringService_Establish(t *testing.T) { } } +func TestPeeringService_Establish_ACLEnforcement(t *testing.T) { + validToken := peering.TestPeeringToken("83474a06-cca4-4ff4-99a4-4152929c8160") + validTokenJSON, _ := json.Marshal(&validToken) + validTokenB64 := base64.StdEncoding.EncodeToString(validTokenJSON) + + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(conf *consul.Config) { + conf.ACLsEnabled = true + conf.ACLResolverSettings.ACLDefaultPolicy = acl.PolicyDeny + }) + upsertTestACLs(t, s.Server.FSM().State()) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + req *pbpeering.EstablishRequest + token string + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + _, err := client.Establish(external.ContextWithToken(ctx, tc.token), tc.req) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + } + tcs := []testcase{ + { + name: "anonymous token lacks permissions", + req: &pbpeering.EstablishRequest{ + PeerName: "foo", + PeeringToken: validTokenB64, + }, + expectErr: "lacks permission 'peering:write'", + }, + { + name: "read token lacks permissions", + req: &pbpeering.EstablishRequest{ + PeerName: "foo", + PeeringToken: validTokenB64, + }, + token: testTokenPeeringReadSecret, + expectErr: "lacks permission 'peering:write'", + }, + { + name: "write token grants permission", + req: &pbpeering.EstablishRequest{ + PeerName: "foo", + PeeringToken: validTokenB64, + }, + token: testTokenPeeringWriteSecret, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + func TestPeeringService_Read(t *testing.T) { // TODO(peering): see note on newTestServer, refactor to not use this s := newTestServer(t, nil) @@ -309,6 +438,72 @@ func TestPeeringService_Read(t *testing.T) { } } +func TestPeeringService_Read_ACLEnforcement(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(conf *consul.Config) { + conf.ACLsEnabled = true + conf.ACLResolverSettings.ACLDefaultPolicy = acl.PolicyDeny + }) + upsertTestACLs(t, s.Server.FSM().State()) + + // insert peering directly to state store + p := &pbpeering.Peering{ + ID: testUUID(t), + Name: "foo", + State: pbpeering.PeeringState_ESTABLISHING, + PeerCAPems: nil, + PeerServerName: "test", + PeerServerAddresses: []string{"addr1"}, + ImportedServiceCount: 0, + ExportedServiceCount: 0, + } + err := s.Server.FSM().State().PeeringWrite(10, p) + require.NoError(t, err) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + req *pbpeering.PeeringReadRequest + expect *pbpeering.PeeringReadResponse + token string + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.PeeringRead(external.ContextWithToken(ctx, tc.token), tc.req) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + prototest.AssertDeepEqual(t, tc.expect, resp) + } + tcs := []testcase{ + { + name: "anonymous token lacks permissions", + req: &pbpeering.PeeringReadRequest{Name: "foo"}, + expect: &pbpeering.PeeringReadResponse{Peering: p}, + expectErr: "lacks permission 'peering:read'", + }, + { + name: "read token grants permission", + req: &pbpeering.PeeringReadRequest{ + Name: "foo", + }, + expect: &pbpeering.PeeringReadResponse{Peering: p}, + token: testTokenPeeringReadSecret, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + func TestPeeringService_Delete(t *testing.T) { // TODO(peering): see note on newTestServer, refactor to not use this s := newTestServer(t, nil) @@ -344,6 +539,76 @@ func TestPeeringService_Delete(t *testing.T) { }) } +func TestPeeringService_Delete_ACLEnforcement(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(conf *consul.Config) { + conf.ACLsEnabled = true + conf.ACLResolverSettings.ACLDefaultPolicy = acl.PolicyDeny + }) + upsertTestACLs(t, s.Server.FSM().State()) + + p := &pbpeering.Peering{ + ID: testUUID(t), + Name: "foo", + State: pbpeering.PeeringState_ESTABLISHING, + PeerCAPems: nil, + PeerServerName: "test", + PeerServerAddresses: []string{"addr1"}, + } + err := s.Server.FSM().State().PeeringWrite(10, p) + require.NoError(t, err) + require.Nil(t, p.DeletedAt) + require.True(t, p.IsActive()) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + req *pbpeering.PeeringDeleteRequest + token string + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + _, err = client.PeeringDelete(external.ContextWithToken(ctx, tc.token), tc.req) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + } + tcs := []testcase{ + { + name: "anonymous token lacks permissions", + req: &pbpeering.PeeringDeleteRequest{Name: "foo"}, + expectErr: "lacks permission 'peering:write'", + }, + { + name: "read token lacks permissions", + req: &pbpeering.PeeringDeleteRequest{ + Name: "foo", + }, + token: testTokenPeeringReadSecret, + expectErr: "lacks permission 'peering:write'", + }, + { + name: "write token grants permission", + req: &pbpeering.PeeringDeleteRequest{ + Name: "foo", + }, + token: testTokenPeeringWriteSecret, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } + +} + func TestPeeringService_List(t *testing.T) { // TODO(peering): see note on newTestServer, refactor to not use this s := newTestServer(t, nil) @@ -388,6 +653,78 @@ func TestPeeringService_List(t *testing.T) { prototest.AssertDeepEqual(t, expect, resp) } +func TestPeeringService_List_ACLEnforcement(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(conf *consul.Config) { + conf.ACLsEnabled = true + conf.ACLResolverSettings.ACLDefaultPolicy = acl.PolicyDeny + }) + upsertTestACLs(t, s.Server.FSM().State()) + + // insert peering directly to state store + foo := &pbpeering.Peering{ + ID: testUUID(t), + Name: "foo", + State: pbpeering.PeeringState_ESTABLISHING, + PeerCAPems: nil, + PeerServerName: "fooservername", + PeerServerAddresses: []string{"addr1"}, + ImportedServiceCount: 0, + ExportedServiceCount: 0, + } + require.NoError(t, s.Server.FSM().State().PeeringWrite(10, foo)) + bar := &pbpeering.Peering{ + ID: testUUID(t), + Name: "bar", + State: pbpeering.PeeringState_ACTIVE, + PeerCAPems: nil, + PeerServerName: "barservername", + PeerServerAddresses: []string{"addr1"}, + ImportedServiceCount: 0, + ExportedServiceCount: 0, + } + require.NoError(t, s.Server.FSM().State().PeeringWrite(15, bar)) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + token string + expect *pbpeering.PeeringListResponse + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.PeeringList(external.ContextWithToken(ctx, tc.token), &pbpeering.PeeringListRequest{}) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + prototest.AssertDeepEqual(t, tc.expect, resp) + } + tcs := []testcase{ + { + name: "anonymous token lacks permissions", + expectErr: "lacks permission 'peering:read'", + }, + { + name: "read token grants permission", + token: testTokenPeeringReadSecret, + expect: &pbpeering.PeeringListResponse{ + Peerings: []*pbpeering.Peering{bar, foo}, + }, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + func TestPeeringService_TrustBundleRead(t *testing.T) { srv := newTestServer(t, nil) store := srv.Server.FSM().State() @@ -396,25 +733,6 @@ func TestPeeringService_TrustBundleRead(t *testing.T) { var lastIdx uint64 = 1 _ = setupTestPeering(t, store, "my-peering", lastIdx) - mysql := &structs.CheckServiceNode{ - Node: &structs.Node{ - Node: "node1", - Address: "10.0.0.1", - PeerName: "my-peering", - }, - Service: &structs.NodeService{ - ID: "mysql-1", - Service: "mysql", - Port: 5000, - PeerName: "my-peering", - }, - } - - lastIdx++ - require.NoError(t, store.EnsureNode(lastIdx, mysql.Node)) - lastIdx++ - require.NoError(t, store.EnsureService(lastIdx, mysql.Node.Node, mysql.Service)) - bundle := &pbpeering.PeeringTrustBundle{ TrustDomain: "peer1.com", PeerName: "my-peering", @@ -435,6 +753,76 @@ func TestPeeringService_TrustBundleRead(t *testing.T) { prototest.AssertDeepEqual(t, bundle, resp.Bundle) } +func TestPeeringService_TrustBundleRead_ACLEnforcement(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(conf *consul.Config) { + conf.ACLsEnabled = true + conf.ACLResolverSettings.ACLDefaultPolicy = acl.PolicyDeny + }) + store := s.Server.FSM().State() + upsertTestACLs(t, s.Server.FSM().State()) + + // Insert peering and trust bundle directly to state store. + _ = setupTestPeering(t, store, "my-peering", 10) + + bundle := &pbpeering.PeeringTrustBundle{ + TrustDomain: "peer1.com", + PeerName: "my-peering", + RootPEMs: []string{"peer1-root-1"}, + } + require.NoError(t, store.PeeringTrustBundleWrite(11, bundle)) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + req *pbpeering.TrustBundleReadRequest + token string + expect *pbpeering.PeeringTrustBundle + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.TrustBundleRead(external.ContextWithToken(ctx, tc.token), tc.req) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + prototest.AssertDeepEqual(t, tc.expect, resp.Bundle) + } + tcs := []testcase{ + { + name: "anonymous token lacks permissions", + req: &pbpeering.TrustBundleReadRequest{Name: "foo"}, + expectErr: "lacks permission 'service:write'", + }, + { + name: "service read token lacks permissions", + req: &pbpeering.TrustBundleReadRequest{ + Name: "my-peering", + }, + token: testTokenServiceReadSecret, + expectErr: "lacks permission 'service:write'", + }, + { + name: "with service write token", + req: &pbpeering.TrustBundleReadRequest{ + Name: "my-peering", + }, + token: testTokenServiceWriteSecret, + expect: bundle, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + // Setup: // - Peerings "foo" and "bar" with trust bundles saved // - "api" service exported to both "foo" and "bar" @@ -667,6 +1055,116 @@ func TestPeeringService_PeeringDisabled(t *testing.T) { }) } +func TestPeeringService_TrustBundleListByService_ACLEnforcement(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(conf *consul.Config) { + conf.ACLsEnabled = true + conf.ACLResolverSettings.ACLDefaultPolicy = acl.PolicyDeny + }) + store := s.Server.FSM().State() + upsertTestACLs(t, s.Server.FSM().State()) + + var lastIdx uint64 = 10 + + lastIdx++ + require.NoError(t, s.Server.FSM().State().PeeringWrite(lastIdx, &pbpeering.Peering{ + ID: testUUID(t), + Name: "foo", + State: pbpeering.PeeringState_ESTABLISHING, + PeerServerName: "test", + PeerServerAddresses: []string{"addr1"}, + })) + + lastIdx++ + require.NoError(t, store.PeeringTrustBundleWrite(lastIdx, &pbpeering.PeeringTrustBundle{ + TrustDomain: "foo.com", + PeerName: "foo", + RootPEMs: []string{"foo-root-1"}, + })) + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, &structs.Node{ + Node: "my-node", Address: "127.0.0.1", + })) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "my-node", &structs.NodeService{ + ID: "api", + Service: "api", + Port: 8000, + })) + + entry := structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "api", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "foo", + }, + }, + }, + }, + } + require.NoError(t, entry.Normalize()) + require.NoError(t, entry.Validate()) + + lastIdx++ + require.NoError(t, store.EnsureConfigEntry(lastIdx, &entry)) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + req *pbpeering.TrustBundleListByServiceRequest + token string + expect []string + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.TrustBundleListByService(external.ContextWithToken(ctx, tc.token), tc.req) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + require.Len(t, resp.Bundles, 1) + require.Equal(t, tc.expect, resp.Bundles[0].RootPEMs) + } + tcs := []testcase{ + { + name: "anonymous token lacks permissions", + req: &pbpeering.TrustBundleListByServiceRequest{ServiceName: "api"}, + expectErr: "lacks permission 'service:write'", + }, + { + name: "service read token lacks permission", + req: &pbpeering.TrustBundleListByServiceRequest{ + ServiceName: "api", + }, + token: testTokenServiceReadSecret, + expectErr: "lacks permission 'service:write'", + }, + { + name: "with service write token", + req: &pbpeering.TrustBundleListByServiceRequest{ + ServiceName: "api", + }, + token: testTokenServiceWriteSecret, + expect: []string{"foo-root-1"}, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + // newTestServer is copied from partition/service_test.go, with the addition of certs/cas. // TODO(peering): these are endpoint tests and should live in the agent/consul // package. Instead, these can be written around a mock client (see testing.go) @@ -831,6 +1329,87 @@ func newDefaultDeps(t *testing.T, c *consul.Config) consul.Deps { } } +func upsertTestACLs(t *testing.T, store *state.Store) { + var ( + testPolicyPeeringReadID = "43fed171-ad1d-4d3b-9df3-c99c1c835c37" + testPolicyPeeringWriteID = "cddb0821-e720-4411-bbdd-cc62ce417eac" + + testPolicyServiceReadID = "0e054136-f5d3-4627-a7e6-198f1df923d3" + testPolicyServiceWriteID = "b55e03f4-c9dd-4210-8d24-f7ea8e2a1918" + ) + policies := structs.ACLPolicies{ + { + ID: testPolicyPeeringReadID, + Name: "peering-read", + Rules: `peering = "read"`, + Syntax: acl.SyntaxCurrent, + }, + { + ID: testPolicyPeeringWriteID, + Name: "peering-write", + Rules: `peering = "write"`, + Syntax: acl.SyntaxCurrent, + }, + { + ID: testPolicyServiceReadID, + Name: "service-read", + Rules: `service "api" { policy = "read" }`, + Syntax: acl.SyntaxCurrent, + }, + { + ID: testPolicyServiceWriteID, + Name: "service-write", + Rules: `service "api" { policy = "write" }`, + Syntax: acl.SyntaxCurrent, + }, + } + require.NoError(t, store.ACLPolicyBatchSet(100, policies)) + + tokens := structs.ACLTokens{ + &structs.ACLToken{ + AccessorID: "22500c91-723c-4335-be8a-6697417dc35b", + SecretID: testTokenPeeringReadSecret, + Description: "peering read", + Policies: []structs.ACLTokenPolicyLink{ + { + ID: testPolicyPeeringReadID, + }, + }, + }, + &structs.ACLToken{ + AccessorID: "de924f93-cfec-404c-9a7e-c1c9b96b8cae", + SecretID: testTokenPeeringWriteSecret, + Description: "peering write", + Policies: []structs.ACLTokenPolicyLink{ + { + ID: testPolicyPeeringWriteID, + }, + }, + }, + &structs.ACLToken{ + AccessorID: "53c54f79-ffed-47d4-904e-e2e0e40c0a01", + SecretID: testTokenServiceReadSecret, + Description: "service read", + Policies: []structs.ACLTokenPolicyLink{ + { + ID: testPolicyServiceReadID, + }, + }, + }, + &structs.ACLToken{ + AccessorID: "a100fa5f-db72-49f0-8f61-aa1f9f92f657", + SecretID: testTokenServiceWriteSecret, + Description: "service write", + Policies: []structs.ACLTokenPolicyLink{ + { + ID: testPolicyServiceWriteID, + }, + }, + }, + } + require.NoError(t, store.ACLTokenBatchSet(101, tokens, state.ACLTokenSetOptions{})) +} + func setupTestPeering(t *testing.T, store *state.Store, name string, index uint64) string { t.Helper() err := store.PeeringWrite(index, &pbpeering.Peering{ diff --git a/agent/rpc/peering/testing.go b/agent/rpc/peering/testing.go index de64dda7a..04f1bb223 100644 --- a/agent/rpc/peering/testing.go +++ b/agent/rpc/peering/testing.go @@ -1,6 +1,7 @@ package peering import ( + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto/pbpeering" ) @@ -53,6 +54,7 @@ func TestPeering(peerName string, state pbpeering.PeeringState, meta map[string] State: state, PeerID: validPeerID, Meta: meta, + Partition: acl.DefaultPartitionName, } } diff --git a/api/peering.go b/api/peering.go index bd24d9849..7a98ba936 100644 --- a/api/peering.go +++ b/api/peering.go @@ -80,10 +80,8 @@ type PeeringGenerateTokenRequest struct { // PeerName is the name of the remote peer. PeerName string // Partition to be peered. - Partition string `json:",omitempty"` - Datacenter string `json:",omitempty"` - Token string `json:",omitempty"` - // Meta is a mapping of some string value to any other string value. + Partition string `json:",omitempty"` + // Meta is a mapping of some string value to any other string value Meta map[string]string `json:",omitempty"` // ServerExternalAddresses is a list of addresses to put into the generated token. This could be used to specify // load balancer(s) or external IPs to reach the servers from the dialing side, and will override any server @@ -103,9 +101,7 @@ type PeeringEstablishRequest struct { // The peering token returned from the peer's GenerateToken endpoint. PeeringToken string `json:",omitempty"` // Partition to be peered. - Partition string `json:",omitempty"` - Datacenter string `json:",omitempty"` - Token string `json:",omitempty"` + Partition string `json:",omitempty"` // Meta is a mapping of some string value to any other string value Meta map[string]string `json:",omitempty"` } diff --git a/api/peering_test.go b/api/peering_test.go index 8a42a454b..79c8983ce 100644 --- a/api/peering_test.go +++ b/api/peering_test.go @@ -37,6 +37,97 @@ func peerExistsInPeerListings(peer *Peering, peerings []*Peering) bool { return false } +func TestAPI_Peering_ACLDeny(t *testing.T) { + c, s := makeACLClient(t) + defer s.Stop() + + peerings := c.Peerings() + + testutil.RunStep(t, "generate token", func(t *testing.T) { + req := PeeringGenerateTokenRequest{PeerName: "peer1"} + + testutil.RunStep(t, "without ACL token", func(t *testing.T) { + _, _, err := peerings.GenerateToken(context.Background(), req, &WriteOptions{Token: "anonymous"}) + require.Error(t, err) + testutil.RequireErrorContains(t, err, "Permission denied") + }) + + testutil.RunStep(t, "with ACL token", func(t *testing.T) { + resp, wm, err := peerings.GenerateToken(context.Background(), req, &WriteOptions{Token: "root"}) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotNil(t, resp) + }) + }) + + testutil.RunStep(t, "establish peering", func(t *testing.T) { + tokenJSON := `{"ServerAddresses":["127.0.0.1:8502"],"ServerName":"foo","PeerID":"716af65f-b844-f3bb-8aef-cfd7949f6873"}` + tokenB64 := base64.StdEncoding.EncodeToString([]byte(tokenJSON)) + + req := PeeringEstablishRequest{ + PeerName: "peer2", + PeeringToken: tokenB64, + } + testutil.RunStep(t, "without ACL token", func(t *testing.T) { + _, _, err := peerings.Establish(context.Background(), req, &WriteOptions{Token: "anonymous"}) + require.Error(t, err) + testutil.RequireErrorContains(t, err, "Permission denied") + }) + + testutil.RunStep(t, "with ACL token", func(t *testing.T) { + resp, wm, err := peerings.Establish(context.Background(), req, &WriteOptions{Token: "root"}) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotNil(t, resp) + }) + }) + + testutil.RunStep(t, "read peering", func(t *testing.T) { + testutil.RunStep(t, "without ACL token", func(t *testing.T) { + _, _, err := peerings.Read(context.Background(), "peer1", &QueryOptions{Token: "anonymous"}) + require.Error(t, err) + testutil.RequireErrorContains(t, err, "Permission denied") + }) + + testutil.RunStep(t, "with ACL token", func(t *testing.T) { + resp, qm, err := peerings.Read(context.Background(), "peer1", &QueryOptions{Token: "root"}) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotNil(t, resp) + }) + }) + + testutil.RunStep(t, "list peerings", func(t *testing.T) { + testutil.RunStep(t, "without ACL token", func(t *testing.T) { + _, _, err := peerings.List(context.Background(), &QueryOptions{Token: "anonymous"}) + require.Error(t, err) + testutil.RequireErrorContains(t, err, "Permission denied") + }) + + testutil.RunStep(t, "with ACL token", func(t *testing.T) { + resp, qm, err := peerings.List(context.Background(), &QueryOptions{Token: "root"}) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotNil(t, resp) + require.Len(t, resp, 2) + }) + }) + + testutil.RunStep(t, "delete peering", func(t *testing.T) { + testutil.RunStep(t, "without ACL token", func(t *testing.T) { + _, err := peerings.Delete(context.Background(), "peer1", &WriteOptions{Token: "anonymous"}) + require.Error(t, err) + testutil.RequireErrorContains(t, err, "Permission denied") + }) + + testutil.RunStep(t, "with ACL token", func(t *testing.T) { + wm, err := peerings.Delete(context.Background(), "peer1", &WriteOptions{Token: "root"}) + require.NoError(t, err) + require.NotNil(t, wm) + }) + }) +} + func TestAPI_Peering_Read_ErrorHandling(t *testing.T) { t.Parallel() @@ -115,25 +206,6 @@ func TestAPI_Peering_List(t *testing.T) { }) } -func TestAPI_Peering_GenerateToken(t *testing.T) { - t.Parallel() - - c, s := makeClientWithCA(t) - defer s.Stop() - s.WaitForSerfCheck(t) - - ctx, cancel := context.WithTimeout(context.Background(), DefaultCtxDuration) - defer cancel() - - peerings := c.Peerings() - - t.Run("cannot have GenerateToken forward DC requests", func(t *testing.T) { - // Try to generate a token in dc2 - _, _, err := peerings.GenerateToken(ctx, PeeringGenerateTokenRequest{PeerName: "peer2", Datacenter: "dc2"}, nil) - require.Error(t, err) - }) -} - func TestAPI_Peering_GenerateToken_ExternalAddresses(t *testing.T) { t.Parallel() @@ -163,9 +235,6 @@ func TestAPI_Peering_GenerateToken_ExternalAddresses(t *testing.T) { require.Contains(t, string(tokenJSON), externalAddress) } -// TODO(peering): cover the following test cases: bad/ malformed input, peering with wrong token, -// peering with the wrong PeerName - // TestAPI_Peering_GenerateToken_Read_Establish_Delete tests the following use case: // a server creates a peering token, reads the token, then another server calls establish peering // finally, we delete the token on the first server @@ -217,7 +286,6 @@ func TestAPI_Peering_GenerateToken_Read_Establish_Delete(t *testing.T) { testutil.RunStep(t, "establish peering", func(t *testing.T) { i := PeeringEstablishRequest{ - Datacenter: c2.config.Datacenter, PeerName: "peer1", PeeringToken: token1, Meta: map[string]string{"foo": "bar"}, diff --git a/proto/pbpeering/peering.gen.go b/proto/pbpeering/peering.gen.go index b7afc6a4c..5707e3b6c 100644 --- a/proto/pbpeering/peering.gen.go +++ b/proto/pbpeering/peering.gen.go @@ -11,8 +11,6 @@ func EstablishRequestToAPI(s *EstablishRequest, t *api.PeeringEstablishRequest) t.PeerName = s.PeerName t.PeeringToken = s.PeeringToken t.Partition = s.Partition - t.Datacenter = s.Datacenter - t.Token = s.Token t.Meta = s.Meta } func EstablishRequestFromAPI(t *api.PeeringEstablishRequest, s *EstablishRequest) { @@ -22,8 +20,6 @@ func EstablishRequestFromAPI(t *api.PeeringEstablishRequest, s *EstablishRequest s.PeerName = t.PeerName s.PeeringToken = t.PeeringToken s.Partition = t.Partition - s.Datacenter = t.Datacenter - s.Token = t.Token s.Meta = t.Meta } func EstablishResponseToAPI(s *EstablishResponse, t *api.PeeringEstablishResponse) { @@ -42,8 +38,6 @@ func GenerateTokenRequestToAPI(s *GenerateTokenRequest, t *api.PeeringGenerateTo } t.PeerName = s.PeerName t.Partition = s.Partition - t.Datacenter = s.Datacenter - t.Token = s.Token t.Meta = s.Meta t.ServerExternalAddresses = s.ServerExternalAddresses } @@ -53,8 +47,6 @@ func GenerateTokenRequestFromAPI(t *api.PeeringGenerateTokenRequest, s *Generate } s.PeerName = t.PeerName s.Partition = t.Partition - s.Datacenter = t.Datacenter - s.Token = t.Token s.Meta = t.Meta s.ServerExternalAddresses = t.ServerExternalAddresses } diff --git a/proto/pbpeering/peering.go b/proto/pbpeering/peering.go index 5de1dc9bc..f5092ded2 100644 --- a/proto/pbpeering/peering.go +++ b/proto/pbpeering/peering.go @@ -1,86 +1,68 @@ package pbpeering import ( - "strconv" "time" "github.com/golang/protobuf/ptypes/timestamp" - "github.com/mitchellh/hashstructure" - - "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" ) -// TODO(peering): These are byproducts of not embedding -// types in our protobuf definitions and are temporary; -// Hoping to replace them with 1 or 2 methods per request -// using https://github.com/hashicorp/consul/pull/12507 - // RequestDatacenter implements structs.RPCInfo func (req *GenerateTokenRequest) RequestDatacenter() string { - return req.Datacenter + // Cross-datacenter requests are not allowed for peering actions because + // they rely on WAN-federation. + return "" } -// IsRead implements structs.RPCInfo -func (req *GenerateTokenRequest) IsRead() bool { - return false +// RequestDatacenter implements structs.RPCInfo +func (req *EstablishRequest) RequestDatacenter() string { + // Cross-datacenter requests are not allowed for peering actions because + // they rely on WAN-federation. + return "" } -// AllowStaleRead implements structs.RPCInfo -func (req *GenerateTokenRequest) AllowStaleRead() bool { - return false +// RequestDatacenter implements structs.RPCInfo +func (req *PeeringReadRequest) RequestDatacenter() string { + // Cross-datacenter requests are not allowed for peering actions because + // they rely on WAN-federation. + return "" } -// TokenSecret implements structs.RPCInfo -func (req *GenerateTokenRequest) TokenSecret() string { - return req.Token +// RequestDatacenter implements structs.RPCInfo +func (req *PeeringListRequest) RequestDatacenter() string { + // Cross-datacenter requests are not allowed for peering actions because + // they rely on WAN-federation. + return "" } -// SetTokenSecret implements structs.RPCInfo -func (req *GenerateTokenRequest) SetTokenSecret(token string) { - req.Token = token +// RequestDatacenter implements structs.RPCInfo +func (req *PeeringWriteRequest) RequestDatacenter() string { + // Cross-datacenter requests are not allowed for peering actions because + // they rely on WAN-federation. + return "" } -// HasTimedOut implements structs.RPCInfo -func (req *GenerateTokenRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { - return time.Since(start) > rpcHoldTimeout, nil +// RequestDatacenter implements structs.RPCInfo +func (req *PeeringDeleteRequest) RequestDatacenter() string { + // Cross-datacenter requests are not allowed for peering actions because + // they rely on WAN-federation. + return "" } -// Timeout implements structs.RPCInfo -func (msg *GenerateTokenRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { - return rpcHoldTimeout +// RequestDatacenter implements structs.RPCInfo +func (req *TrustBundleReadRequest) RequestDatacenter() string { + // Cross-datacenter requests are not allowed for peering actions because + // they rely on WAN-federation. + return "" } -// IsRead implements structs.RPCInfo -func (req *EstablishRequest) IsRead() bool { - return false -} - -// AllowStaleRead implements structs.RPCInfo -func (req *EstablishRequest) AllowStaleRead() bool { - return false -} - -// TokenSecret implements structs.RPCInfo -func (req *EstablishRequest) TokenSecret() string { - return req.Token -} - -// SetTokenSecret implements structs.RPCInfo -func (req *EstablishRequest) SetTokenSecret(token string) { - req.Token = token -} - -// HasTimedOut implements structs.RPCInfo -func (req *EstablishRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { - return time.Since(start) > rpcHoldTimeout, nil -} - -// Timeout implements structs.RPCInfo -func (msg *EstablishRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { - return rpcHoldTimeout +// RequestDatacenter implements structs.RPCInfo +func (req *TrustBundleListByServiceRequest) RequestDatacenter() string { + // Cross-datacenter requests are not allowed for peering actions because + // they rely on WAN-federation. + return "" } // ShouldDial returns true when the peering was stored via the peering initiation endpoint, @@ -95,34 +77,6 @@ func (x PeeringState) GoString() string { return x.String() } -func (r *TrustBundleReadRequest) CacheInfo() cache.RequestInfo { - info := cache.RequestInfo{ - // TODO(peering): Revisit whether this is the token to use once request types accept a token. - Token: r.Token(), - Datacenter: r.Datacenter, - MinIndex: 0, - Timeout: 0, - MustRevalidate: false, - - // TODO(peering): Cache.notifyPollingQuery polls at this interval. We need to revisit how that polling works. - // Using an exponential backoff when the result hasn't changed may be preferable. - MaxAge: 1 * time.Second, - } - - v, err := hashstructure.Hash([]interface{}{ - r.Partition, - r.Name, - }, nil) - if err == nil { - // If there is an error, we don't set the key. A blank key forces - // no cache for this request so the request is forwarded directly - // to the server. - info.Key = strconv.FormatUint(v, 10) - } - - return info -} - // ConcatenatedRootPEMs concatenates and returns all PEM-encoded public certificates // in a peer's trust bundle. func (b *PeeringTrustBundle) ConcatenatedRootPEMs() string { @@ -242,35 +196,6 @@ func NewEstablishRequestFromAPI(req *api.PeeringEstablishRequest) *EstablishRequ return t } -func (r *TrustBundleListByServiceRequest) CacheInfo() cache.RequestInfo { - info := cache.RequestInfo{ - // TODO(peering): Revisit whether this is the token to use once request types accept a token. - Token: r.Token(), - Datacenter: r.Datacenter, - MinIndex: 0, - Timeout: 0, - MustRevalidate: false, - - // TODO(peering): Cache.notifyPollingQuery polls at this interval. We need to revisit how that polling works. - // Using an exponential backoff when the result hasn't changed may be preferable. - MaxAge: 1 * time.Second, - } - - v, err := hashstructure.Hash([]interface{}{ - r.Partition, - r.Namespace, - r.ServiceName, - }, nil) - if err == nil { - // If there is an error, we don't set the key. A blank key forces - // no cache for this request so the request is forwarded directly - // to the server. - info.Key = strconv.FormatUint(v, 10) - } - - return info -} - func TimePtrFromProto(s *timestamp.Timestamp) *time.Time { if s == nil { return nil diff --git a/proto/pbpeering/peering.pb.go b/proto/pbpeering/peering.pb.go index a5e0f2cb5..7c2a8b478 100644 --- a/proto/pbpeering/peering.pb.go +++ b/proto/pbpeering/peering.pb.go @@ -140,9 +140,11 @@ type Peering struct { // ExportedServiceCount is the count of how many services are exported to this peering. ExportedServiceCount uint64 `protobuf:"varint,14,opt,name=ExportedServiceCount,proto3" json:"ExportedServiceCount,omitempty"` // CreateIndex is the Raft index at which the Peering was created. - CreateIndex uint64 `protobuf:"varint,11,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty"` + // @gotags: bexpr:"-" + CreateIndex uint64 `protobuf:"varint,11,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty" bexpr:"-"` // ModifyIndex is the latest Raft index at which the Peering. was modified. - ModifyIndex uint64 `protobuf:"varint,12,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty"` + // @gotags: bexpr:"-" + ModifyIndex uint64 `protobuf:"varint,12,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty" bexpr:"-"` } func (x *Peering) Reset() { @@ -293,9 +295,11 @@ type PeeringTrustBundle struct { // which sent this trust bundle. Used for generating SpiffeIDs. ExportedPartition string `protobuf:"bytes,5,opt,name=ExportedPartition,proto3" json:"ExportedPartition,omitempty"` // CreateIndex is the Raft index at which the trust domain was created. - CreateIndex uint64 `protobuf:"varint,6,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty"` + // @gotags: bexpr:"-" + CreateIndex uint64 `protobuf:"varint,6,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty" bexpr:"-"` // ModifyIndex is the latest Raft index at which the trust bundle was modified. - ModifyIndex uint64 `protobuf:"varint,7,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty"` + // @gotags: bexpr:"-" + ModifyIndex uint64 `protobuf:"varint,7,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty" bexpr:"-"` } func (x *PeeringTrustBundle) Reset() { @@ -379,15 +383,14 @@ func (x *PeeringTrustBundle) GetModifyIndex() uint64 { return 0 } -// @consul-rpc-glue: Datacenter,LeaderReadTODO +// @consul-rpc-glue: LeaderReadTODO type PeeringReadRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` - Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` } func (x *PeeringReadRequest) Reset() { @@ -436,13 +439,6 @@ func (x *PeeringReadRequest) GetPartition() string { return "" } -func (x *PeeringReadRequest) GetDatacenter() string { - if x != nil { - return x.Datacenter - } - return "" -} - type PeeringReadResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -490,14 +486,13 @@ func (x *PeeringReadResponse) GetPeering() *Peering { return nil } -// @consul-rpc-glue: Datacenter,LeaderReadTODO +// @consul-rpc-glue: LeaderReadTODO type PeeringListRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Partition string `protobuf:"bytes,1,opt,name=Partition,proto3" json:"Partition,omitempty"` - Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + Partition string `protobuf:"bytes,1,opt,name=Partition,proto3" json:"Partition,omitempty"` } func (x *PeeringListRequest) Reset() { @@ -539,13 +534,6 @@ func (x *PeeringListRequest) GetPartition() string { return "" } -func (x *PeeringListRequest) GetDatacenter() string { - if x != nil { - return x.Datacenter - } - return "" -} - type PeeringListResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -593,17 +581,14 @@ func (x *PeeringListResponse) GetPeerings() []*Peering { return nil } -// @consul-rpc-glue: Datacenter,WriteTODO type PeeringWriteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Peering *Peering `protobuf:"bytes,1,opt,name=Peering,proto3" json:"Peering,omitempty"` - //TODO(peering): what to do with embedded write request? - Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` // Meta is a mapping of some string value to any other string value - Meta map[string]string `protobuf:"bytes,3,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Meta map[string]string `protobuf:"bytes,2,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *PeeringWriteRequest) Reset() { @@ -645,13 +630,6 @@ func (x *PeeringWriteRequest) GetPeering() *Peering { return nil } -func (x *PeeringWriteRequest) GetDatacenter() string { - if x != nil { - return x.Datacenter - } - return "" -} - func (x *PeeringWriteRequest) GetMeta() map[string]string { if x != nil { return x.Meta @@ -698,7 +676,6 @@ func (*PeeringWriteResponse) Descriptor() ([]byte, []int) { return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{7} } -// @consul-rpc-glue: Datacenter,WriteTODO type PeeringDeleteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -706,8 +683,6 @@ type PeeringDeleteRequest struct { Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` - //TODO(peering): what to do with embedded write request? - Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` } func (x *PeeringDeleteRequest) Reset() { @@ -756,13 +731,6 @@ func (x *PeeringDeleteRequest) GetPartition() string { return "" } -func (x *PeeringDeleteRequest) GetDatacenter() string { - if x != nil { - return x.Datacenter - } - return "" -} - type PeeringDeleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -801,7 +769,6 @@ func (*PeeringDeleteResponse) Descriptor() ([]byte, []int) { return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{9} } -// @consul-rpc-glue: Datacenter,ReadTODO type TrustBundleListByServiceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -811,9 +778,6 @@ type TrustBundleListByServiceRequest struct { Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` Partition string `protobuf:"bytes,3,opt,name=Partition,proto3" json:"Partition,omitempty"` Kind string `protobuf:"bytes,4,opt,name=Kind,proto3" json:"Kind,omitempty"` - // these are common fields required for implementing structs.RPCInfo methods - // that are used to forward requests - Datacenter string `protobuf:"bytes,5,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` } func (x *TrustBundleListByServiceRequest) Reset() { @@ -876,13 +840,6 @@ func (x *TrustBundleListByServiceRequest) GetKind() string { return "" } -func (x *TrustBundleListByServiceRequest) GetDatacenter() string { - if x != nil { - return x.Datacenter - } - return "" -} - type TrustBundleListByServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -938,7 +895,6 @@ func (x *TrustBundleListByServiceResponse) GetBundles() []*PeeringTrustBundle { return nil } -// @consul-rpc-glue: Datacenter,ReadTODO type TrustBundleReadRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -946,9 +902,6 @@ type TrustBundleReadRequest struct { Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` - // these are common fields required for implementing structs.RPCInfo methods - // that are used to forward requests - Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` } func (x *TrustBundleReadRequest) Reset() { @@ -997,13 +950,6 @@ func (x *TrustBundleReadRequest) GetPartition() string { return "" } -func (x *TrustBundleReadRequest) GetDatacenter() string { - if x != nil { - return x.Datacenter - } - return "" -} - type TrustBundleReadResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1059,6 +1005,7 @@ func (x *TrustBundleReadResponse) GetBundle() *PeeringTrustBundle { return nil } +// This is a purely internal type and does not require query metadata. type PeeringTerminateByIDRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1144,15 +1091,12 @@ func (*PeeringTerminateByIDResponse) Descriptor() ([]byte, []int) { return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{15} } -// @consul-rpc-glue: Datacenter type PeeringTrustBundleWriteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields PeeringTrustBundle *PeeringTrustBundle `protobuf:"bytes,1,opt,name=PeeringTrustBundle,proto3" json:"PeeringTrustBundle,omitempty"` - //TODO(peering): what to do with embedded write request? - Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` } func (x *PeeringTrustBundleWriteRequest) Reset() { @@ -1194,13 +1138,6 @@ func (x *PeeringTrustBundleWriteRequest) GetPeeringTrustBundle() *PeeringTrustBu return nil } -func (x *PeeringTrustBundleWriteRequest) GetDatacenter() string { - if x != nil { - return x.Datacenter - } - return "" -} - type PeeringTrustBundleWriteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1239,7 +1176,6 @@ func (*PeeringTrustBundleWriteResponse) Descriptor() ([]byte, []int) { return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{17} } -// @consul-rpc-glue: Datacenter type PeeringTrustBundleDeleteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1247,8 +1183,6 @@ type PeeringTrustBundleDeleteRequest struct { Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` - //TODO(peering): what to do with embedded write request? - Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` } func (x *PeeringTrustBundleDeleteRequest) Reset() { @@ -1297,13 +1231,6 @@ func (x *PeeringTrustBundleDeleteRequest) GetPartition() string { return "" } -func (x *PeeringTrustBundleDeleteRequest) GetDatacenter() string { - if x != nil { - return x.Datacenter - } - return "" -} - type PeeringTrustBundleDeleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1356,10 +1283,6 @@ type GenerateTokenRequest struct { PeerName string `protobuf:"bytes,1,opt,name=PeerName,proto3" json:"PeerName,omitempty"` // Partition is the local partition being peered. Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` - // these are common fields required for implementing structs.RPCInfo methods - // that are used to forward requests - Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` - Token string `protobuf:"bytes,4,opt,name=Token,proto3" json:"Token,omitempty"` // Meta is a mapping of some string value to any other string value Meta map[string]string `protobuf:"bytes,5,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // ServerExternalAddresses is a list of addresses to put into the generated token. This could be used to specify @@ -1414,20 +1337,6 @@ func (x *GenerateTokenRequest) GetPartition() string { return "" } -func (x *GenerateTokenRequest) GetDatacenter() string { - if x != nil { - return x.Datacenter - } - return "" -} - -func (x *GenerateTokenRequest) GetToken() string { - if x != nil { - return x.Token - } - return "" -} - func (x *GenerateTokenRequest) GetMeta() map[string]string { if x != nil { return x.Meta @@ -1496,8 +1405,6 @@ func (x *GenerateTokenResponse) GetPeeringToken() string { return "" } -// @consul-rpc-glue: Datacenter -// // mog annotation: // // target=github.com/hashicorp/consul/api.PeeringEstablishRequest @@ -1514,12 +1421,8 @@ type EstablishRequest struct { PeeringToken string `protobuf:"bytes,2,opt,name=PeeringToken,proto3" json:"PeeringToken,omitempty"` // Partition is the local partition being peered. Partition string `protobuf:"bytes,3,opt,name=Partition,proto3" json:"Partition,omitempty"` - // these are common fields required for implementing structs.RPCInfo methods - // that are used to forward requests - Datacenter string `protobuf:"bytes,4,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` - Token string `protobuf:"bytes,5,opt,name=Token,proto3" json:"Token,omitempty"` // Meta is a mapping of some string value to any other string value - Meta map[string]string `protobuf:"bytes,6,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Meta map[string]string `protobuf:"bytes,4,rep,name=Meta,proto3" json:"Meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *EstablishRequest) Reset() { @@ -1575,20 +1478,6 @@ func (x *EstablishRequest) GetPartition() string { return "" } -func (x *EstablishRequest) GetDatacenter() string { - if x != nil { - return x.Datacenter - } - return "" -} - -func (x *EstablishRequest) GetToken() string { - if x != nil { - return x.Token - } - return "" -} - func (x *EstablishRequest) GetMeta() map[string]string { if x != nil { return x.Meta @@ -1596,7 +1485,6 @@ func (x *EstablishRequest) GetMeta() map[string]string { return nil } -// // mog annotation: // // target=github.com/hashicorp/consul/api.PeeringEstablishResponse @@ -1706,39 +1594,33 @@ var file_proto_pbpeering_peering_proto_rawDesc = []byte{ 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x22, 0x66, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, + 0x6e, 0x64, 0x65, 0x78, 0x22, 0x46, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, - 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x5b, 0x0a, 0x13, + 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5b, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, - 0x52, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x52, 0x0a, 0x12, 0x50, 0x65, 0x65, + 0x52, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x32, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, - 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x5d, 0x0a, + 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x8a, 0x02, 0x0a, + 0x6e, 0x67, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xea, 0x01, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x52, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, - 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x04, 0x4d, 0x65, - 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x67, 0x52, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x04, 0x4d, 0x65, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, @@ -1748,15 +1630,13 @@ var file_proto_pbpeering_peering_proto_rawDesc = []byte{ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x68, 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, + 0x65, 0x22, 0x48, 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, - 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x17, 0x0a, 0x15, 0x50, + 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x17, 0x0a, 0x15, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x1f, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x1f, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x53, @@ -1765,9 +1645,7 @@ var file_proto_pbpeering_peering_proto_rawDesc = []byte{ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, - 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x89, 0x01, 0x0a, 0x20, 0x54, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x22, 0x89, 0x01, 0x0a, 0x20, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, @@ -1776,14 +1654,12 @@ var file_proto_pbpeering_peering_proto_rawDesc = []byte{ 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x07, 0x42, - 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x22, 0x6a, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, - 0x65, 0x72, 0x22, 0x7e, 0x0a, 0x17, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, + 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x17, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x4d, 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x02, 0x20, @@ -1796,7 +1672,7 @@ var file_proto_pbpeering_peering_proto_rawDesc = []byte{ 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x22, 0x1e, 0x0a, 0x1c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, + 0x65, 0x22, 0x87, 0x01, 0x0a, 0x1e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x65, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, @@ -1804,159 +1680,149 @@ var file_proto_pbpeering_peering_proto_rawDesc = []byte{ 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, - 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x44, - 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x21, 0x0a, 0x1f, 0x50, + 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x21, 0x0a, 0x1f, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, - 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x73, + 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x53, 0x0a, 0x1f, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, - 0x74, 0x65, 0x72, 0x22, 0x22, 0x0a, 0x20, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, + 0x69, 0x6f, 0x6e, 0x22, 0x22, 0x0a, 0x20, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd0, 0x02, 0x0a, 0x14, 0x47, 0x65, 0x6e, 0x65, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9a, 0x02, 0x0a, 0x14, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, - 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x55, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, - 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x55, 0x0a, 0x04, 0x4d, 0x65, + 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, + 0x61, 0x12, 0x38, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x45, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x4d, + 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3b, 0x0a, 0x15, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, + 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0xfc, 0x01, 0x0a, 0x10, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, + 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, - 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3b, 0x0a, 0x15, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb2, 0x02, 0x0a, 0x10, 0x45, 0x73, 0x74, 0x61, - 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, - 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, - 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x51, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, - 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, - 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x13, 0x0a, 0x11, - 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2a, 0x73, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, - 0x0c, 0x45, 0x53, 0x54, 0x41, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, - 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x46, - 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x4c, 0x45, - 0x54, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x06, 0x32, 0xc0, 0x08, 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x0d, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x37, 0x2e, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, - 0x0a, 0x09, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x33, 0x2e, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x34, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, - 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x52, 0x65, 0x61, 0x64, 0x12, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, - 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, - 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, - 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x0d, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x12, 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, - 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7f, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x13, 0x0a, 0x11, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x73, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, + 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x53, 0x54, 0x41, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x49, 0x4e, + 0x47, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x12, + 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, + 0x44, 0x45, 0x4c, 0x45, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x45, + 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x45, 0x44, 0x10, 0x06, 0x32, 0xc0, 0x08, 0x0a, 0x0e, 0x50, + 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x82, 0x01, + 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xa3, 0x01, 0x0a, 0x18, 0x54, 0x72, 0x75, - 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x42, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, - 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x76, 0x0a, 0x09, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, + 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, + 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x0b, 0x50, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x12, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7c, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x0d, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7f, 0x0a, 0x0c, 0x50, + 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x36, 0x2e, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, + 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, + 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xa3, 0x01, 0x0a, + 0x18, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x42, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x79, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x88, - 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, - 0x61, 0x64, 0x12, 0x39, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, - 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x8a, 0x02, 0x0a, 0x25, 0x63, 0x6f, - 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x42, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, - 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, 0x50, 0xaa, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xca, 0x02, 0x21, 0x48, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xe2, - 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, + 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4c, 0x69, 0x73, + 0x74, 0x42, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x12, 0x39, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, + 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x8a, 0x02, + 0x0a, 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, + 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, + 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, 0x50, 0xaa, 0x02, 0x21, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0xca, + 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x50, 0x65, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, - 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, - 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x50, - 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x6e, 0x67, 0xe2, 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, + 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, + 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0xea, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, + 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x3a, 0x3a, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/proto/pbpeering/peering.proto b/proto/pbpeering/peering.proto index fc29d2458..2f7c9858f 100644 --- a/proto/pbpeering/peering.proto +++ b/proto/pbpeering/peering.proto @@ -105,9 +105,11 @@ message Peering { uint64 ExportedServiceCount = 14; // CreateIndex is the Raft index at which the Peering was created. + // @gotags: bexpr:"-" uint64 CreateIndex = 11; // ModifyIndex is the latest Raft index at which the Peering. was modified. + // @gotags: bexpr:"-" uint64 ModifyIndex = 12; } @@ -130,79 +132,56 @@ message PeeringTrustBundle { string ExportedPartition = 5; // CreateIndex is the Raft index at which the trust domain was created. + // @gotags: bexpr:"-" uint64 CreateIndex = 6; // ModifyIndex is the latest Raft index at which the trust bundle was modified. + // @gotags: bexpr:"-" uint64 ModifyIndex = 7; } -// @consul-rpc-glue: Datacenter,LeaderReadTODO +// @consul-rpc-glue: LeaderReadTODO message PeeringReadRequest { string Name = 1; string Partition = 2; - - string Datacenter = 3; - -//TODO(peering) query metadata } message PeeringReadResponse { Peering Peering = 1; - -//TODO(peering) query metadata } -// @consul-rpc-glue: Datacenter,LeaderReadTODO +// @consul-rpc-glue: LeaderReadTODO message PeeringListRequest { string Partition = 1; - - string Datacenter = 2; - -//TODO(peering) query metadata } message PeeringListResponse { repeated Peering Peerings = 1; - -//TODO(peering) query metadata } -// @consul-rpc-glue: Datacenter,WriteTODO message PeeringWriteRequest { Peering Peering = 1; - //TODO(peering): what to do with embedded write request? - string Datacenter = 2; - // Meta is a mapping of some string value to any other string value - map Meta = 3; + map Meta = 2; } // TODO(peering): Consider returning Peering if we keep this endpoint around message PeeringWriteResponse {} -// @consul-rpc-glue: Datacenter,WriteTODO message PeeringDeleteRequest { string Name = 1; string Partition = 2; - - //TODO(peering): what to do with embedded write request? - string Datacenter = 3; } message PeeringDeleteResponse {} -// @consul-rpc-glue: Datacenter,ReadTODO message TrustBundleListByServiceRequest { string ServiceName = 1; string Namespace = 2; string Partition = 3; string Kind = 4; - - // these are common fields required for implementing structs.RPCInfo methods - // that are used to forward requests - string Datacenter = 5; } message TrustBundleListByServiceResponse { @@ -210,14 +189,9 @@ message TrustBundleListByServiceResponse { repeated PeeringTrustBundle Bundles = 2; } -// @consul-rpc-glue: Datacenter,ReadTODO message TrustBundleReadRequest { string Name = 1; string Partition = 2; - - // these are common fields required for implementing structs.RPCInfo methods - // that are used to forward requests - string Datacenter = 3; } message TrustBundleReadResponse { @@ -225,30 +199,23 @@ message TrustBundleReadResponse { PeeringTrustBundle Bundle = 2; } +// This is a purely internal type and does not require query metadata. message PeeringTerminateByIDRequest { string ID = 1; } message PeeringTerminateByIDResponse {} -// @consul-rpc-glue: Datacenter message PeeringTrustBundleWriteRequest { PeeringTrustBundle PeeringTrustBundle = 1; - - //TODO(peering): what to do with embedded write request? - string Datacenter = 2; } message PeeringTrustBundleWriteResponse {} -// @consul-rpc-glue: Datacenter message PeeringTrustBundleDeleteRequest { string Name = 1; string Partition = 2; - - //TODO(peering): what to do with embedded write request? - string Datacenter = 3; } message PeeringTrustBundleDeleteResponse {} @@ -265,11 +232,6 @@ message GenerateTokenRequest { // Partition is the local partition being peered. string Partition = 2; - // these are common fields required for implementing structs.RPCInfo methods - // that are used to forward requests - string Datacenter = 3; - string Token = 4; - // Meta is a mapping of some string value to any other string value map Meta = 5; @@ -290,8 +252,6 @@ message GenerateTokenResponse { string PeeringToken = 1; } -// @consul-rpc-glue: Datacenter -// // mog annotation: // // target=github.com/hashicorp/consul/api.PeeringEstablishRequest @@ -307,16 +267,10 @@ message EstablishRequest { // Partition is the local partition being peered. string Partition = 3; - // these are common fields required for implementing structs.RPCInfo methods - // that are used to forward requests - string Datacenter = 4; - string Token = 5; - // Meta is a mapping of some string value to any other string value - map Meta = 6; + map Meta = 4; } -// // mog annotation: // // target=github.com/hashicorp/consul/api.PeeringEstablishResponse diff --git a/proto/pbpeering/peering.rpcglue.pb.go b/proto/pbpeering/peering.rpcglue.pb.go index 4c3c4dff0..ca7498e0b 100644 --- a/proto/pbpeering/peering.rpcglue.pb.go +++ b/proto/pbpeering/peering.rpcglue.pb.go @@ -12,14 +12,6 @@ import ( var _ structs.RPCInfo var _ time.Month -// RequestDatacenter implements structs.RPCInfo -func (msg *PeeringReadRequest) RequestDatacenter() string { - if msg == nil { - return "" - } - return msg.Datacenter -} - // IsRead implements structs.RPCInfo func (msg *PeeringReadRequest) IsRead() bool { // TODO(peering): figure out read semantics here @@ -64,14 +56,6 @@ func (msg *PeeringReadRequest) Token() string { return "" } -// RequestDatacenter implements structs.RPCInfo -func (msg *PeeringListRequest) RequestDatacenter() string { - if msg == nil { - return "" - } - return msg.Datacenter -} - // IsRead implements structs.RPCInfo func (msg *PeeringListRequest) IsRead() bool { // TODO(peering): figure out read semantics here @@ -115,211 +99,3 @@ func (msg *PeeringListRequest) Token() string { // TODO(peering): figure out read semantics here return "" } - -// RequestDatacenter implements structs.RPCInfo -func (msg *PeeringWriteRequest) RequestDatacenter() string { - if msg == nil { - return "" - } - return msg.Datacenter -} - -// IsRead implements structs.RPCInfo -func (msg *PeeringWriteRequest) IsRead() bool { - // TODO(peering): figure out write semantics here - return false -} - -// AllowStaleRead implements structs.RPCInfo -func (msg *PeeringWriteRequest) AllowStaleRead() bool { - // TODO(peering): figure out write semantics here - return false -} - -// HasTimedOut implements structs.RPCInfo -func (msg *PeeringWriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { - // TODO(peering): figure out write semantics here - return time.Since(start) > rpcHoldTimeout, nil -} - -// Timeout implements structs.RPCInfo -func (msg *PeeringWriteRequest) Timeout(rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) time.Duration { - // TODO(peering): figure out write semantics here - return rpcHoldTimeout -} - -// SetTokenSecret implements structs.RPCInfo -func (msg *PeeringWriteRequest) SetTokenSecret(s string) { - // TODO(peering): figure out write semantics here -} - -// TokenSecret implements structs.RPCInfo -func (msg *PeeringWriteRequest) TokenSecret() string { - // TODO(peering): figure out write semantics here - return "" -} - -// RequestDatacenter implements structs.RPCInfo -func (msg *PeeringDeleteRequest) RequestDatacenter() string { - if msg == nil { - return "" - } - return msg.Datacenter -} - -// IsRead implements structs.RPCInfo -func (msg *PeeringDeleteRequest) IsRead() bool { - // TODO(peering): figure out write semantics here - return false -} - -// AllowStaleRead implements structs.RPCInfo -func (msg *PeeringDeleteRequest) AllowStaleRead() bool { - // TODO(peering): figure out write semantics here - return false -} - -// HasTimedOut implements structs.RPCInfo -func (msg *PeeringDeleteRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { - // TODO(peering): figure out write semantics here - return time.Since(start) > rpcHoldTimeout, nil -} - -// Timeout implements structs.RPCInfo -func (msg *PeeringDeleteRequest) Timeout(rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) time.Duration { - // TODO(peering): figure out write semantics here - return rpcHoldTimeout -} - -// SetTokenSecret implements structs.RPCInfo -func (msg *PeeringDeleteRequest) SetTokenSecret(s string) { - // TODO(peering): figure out write semantics here -} - -// TokenSecret implements structs.RPCInfo -func (msg *PeeringDeleteRequest) TokenSecret() string { - // TODO(peering): figure out write semantics here - return "" -} - -// RequestDatacenter implements structs.RPCInfo -func (msg *TrustBundleListByServiceRequest) RequestDatacenter() string { - if msg == nil { - return "" - } - return msg.Datacenter -} - -// IsRead implements structs.RPCInfo -func (msg *TrustBundleListByServiceRequest) IsRead() bool { - // TODO(peering): figure out read semantics here - return true -} - -// AllowStaleRead implements structs.RPCInfo -func (msg *TrustBundleListByServiceRequest) AllowStaleRead() bool { - // TODO(peering): figure out read semantics here - return false -} - -// HasTimedOut implements structs.RPCInfo -func (msg *TrustBundleListByServiceRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { - // TODO(peering): figure out read semantics here - return time.Since(start) > rpcHoldTimeout, nil -} - -// Timeout implements structs.RPCInfo -func (msg *TrustBundleListByServiceRequest) Timeout(rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) time.Duration { - // TODO(peering): figure out read semantics here - return rpcHoldTimeout -} - -// SetTokenSecret implements structs.RPCInfo -func (msg *TrustBundleListByServiceRequest) SetTokenSecret(s string) { - // TODO(peering): figure out read semantics here -} - -// TokenSecret implements structs.RPCInfo -func (msg *TrustBundleListByServiceRequest) TokenSecret() string { - // TODO(peering): figure out read semantics here - return "" -} - -// Token implements structs.RPCInfo -func (msg *TrustBundleListByServiceRequest) Token() string { - // TODO(peering): figure out read semantics here - return "" -} - -// RequestDatacenter implements structs.RPCInfo -func (msg *TrustBundleReadRequest) RequestDatacenter() string { - if msg == nil { - return "" - } - return msg.Datacenter -} - -// IsRead implements structs.RPCInfo -func (msg *TrustBundleReadRequest) IsRead() bool { - // TODO(peering): figure out read semantics here - return true -} - -// AllowStaleRead implements structs.RPCInfo -func (msg *TrustBundleReadRequest) AllowStaleRead() bool { - // TODO(peering): figure out read semantics here - return false -} - -// HasTimedOut implements structs.RPCInfo -func (msg *TrustBundleReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) (bool, error) { - // TODO(peering): figure out read semantics here - return time.Since(start) > rpcHoldTimeout, nil -} - -// Timeout implements structs.RPCInfo -func (msg *TrustBundleReadRequest) Timeout(rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) time.Duration { - // TODO(peering): figure out read semantics here - return rpcHoldTimeout -} - -// SetTokenSecret implements structs.RPCInfo -func (msg *TrustBundleReadRequest) SetTokenSecret(s string) { - // TODO(peering): figure out read semantics here -} - -// TokenSecret implements structs.RPCInfo -func (msg *TrustBundleReadRequest) TokenSecret() string { - // TODO(peering): figure out read semantics here - return "" -} - -// Token implements structs.RPCInfo -func (msg *TrustBundleReadRequest) Token() string { - // TODO(peering): figure out read semantics here - return "" -} - -// RequestDatacenter implements structs.RPCInfo -func (msg *PeeringTrustBundleWriteRequest) RequestDatacenter() string { - if msg == nil { - return "" - } - return msg.Datacenter -} - -// RequestDatacenter implements structs.RPCInfo -func (msg *PeeringTrustBundleDeleteRequest) RequestDatacenter() string { - if msg == nil { - return "" - } - return msg.Datacenter -} - -// RequestDatacenter implements structs.RPCInfo -func (msg *EstablishRequest) RequestDatacenter() string { - if msg == nil { - return "" - } - return msg.Datacenter -} From 6a47c4475571e32ef384320f3b015e66d4115816 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 25 Jul 2022 11:50:11 -0400 Subject: [PATCH 074/107] Enable/Disable Peering Support in the UI (#13816) We enabled/disable based on the config flag. --- agent/uiserver/ui_template_data.go | 1 + agent/uiserver/uiserver_test.go | 37 ++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/agent/uiserver/ui_template_data.go b/agent/uiserver/ui_template_data.go index 97dcbf51d..e28fc97fe 100644 --- a/agent/uiserver/ui_template_data.go +++ b/agent/uiserver/ui_template_data.go @@ -35,6 +35,7 @@ func uiTemplateDataFromConfig(cfg *config.RuntimeConfig) (map[string]interface{} "UIConfig": uiCfg, "LocalDatacenter": cfg.Datacenter, "PrimaryDatacenter": cfg.PrimaryDatacenter, + "PeeringEnabled": cfg.PeeringEnabled, } // Also inject additional provider scripts if needed, otherwise strip the diff --git a/agent/uiserver/uiserver_test.go b/agent/uiserver/uiserver_test.go index 97835f2b0..47110da5a 100644 --- a/agent/uiserver/uiserver_test.go +++ b/agent/uiserver/uiserver_test.go @@ -43,6 +43,7 @@ func TestUIServerIndex(t *testing.T) { "LocalDatacenter": "dc1", "PrimaryDatacenter": "dc1", "ContentPath": "/ui/", + "PeeringEnabled": true, "UIConfig": { "hcp_enabled": false, "metrics_provider": "", @@ -78,6 +79,7 @@ func TestUIServerIndex(t *testing.T) { "LocalDatacenter": "dc1", "PrimaryDatacenter": "dc1", "ContentPath": "/ui/", + "PeeringEnabled": true, "UIConfig": { "hcp_enabled": false, "metrics_provider": "foo", @@ -101,6 +103,7 @@ func TestUIServerIndex(t *testing.T) { "LocalDatacenter": "dc1", "PrimaryDatacenter": "dc1", "ContentPath": "/ui/", + "PeeringEnabled": true, "UIConfig": { "hcp_enabled": false, "metrics_provider": "", @@ -121,6 +124,7 @@ func TestUIServerIndex(t *testing.T) { "LocalDatacenter": "dc1", "PrimaryDatacenter": "dc1", "ContentPath": "/ui/", + "PeeringEnabled": true, "UIConfig": { "hcp_enabled": true, "metrics_provider": "", @@ -129,6 +133,29 @@ func TestUIServerIndex(t *testing.T) { } }`, }, + { + name: "peering disabled", + cfg: basicUIEnabledConfig( + withPeeringDisabled(), + ), + path: "/", + wantStatus: http.StatusOK, + wantContains: []string{" + +[HTTP Method]: /api-docs#http-methods +[URL Path]: /api-docs#version-prefix +[Response Type]: /api-docs#formatted-json-output +[Required ACLs]: /docs/security/acl/acl-system +[Blocking Queries]: /api-docs/features/blocking +[Consistency Modes]: /api-docs/features/consistency +[Agent Caching]: /api-docs/features/caching From 13c91ddbdccd7072d14bb905f7aea470c4eb5547 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Wed, 12 Jan 2022 08:47:13 -0800 Subject: [PATCH 087/107] docs: add partition command characteristics Characteristics include: - Required ACL permissions - Corresponding HTTP API endpoint - (Lack of) support for blocking queries and agent caching --- website/content/commands/partition.mdx | 59 +++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/website/content/commands/partition.mdx b/website/content/commands/partition.mdx index ef9142097..c6e1ed019 100644 --- a/website/content/commands/partition.mdx +++ b/website/content/commands/partition.mdx @@ -5,6 +5,8 @@ description: | The partition command enables you create and manage Consul Enterprise admin partitions. --- +@include 'http_api_and_cli_characteristics_links.mdx' + # Consul Admin Partition Command: `consul partition` @@ -66,6 +68,16 @@ You can issue the following subcommands with the `consul partition` command. ### `create` The `create` subcommand sends a request to the server to create a new admin partition. +This subcommand has the following characteristics: + +| Characteristic | Value | +| -------------- | ----- | +| [Required ACLs] | `operator:write` | +| Corresponding HTTP API Endpoint | [\[PUT\] /partition](/api-docs/admin-partitions#create-a-partition) | +| [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | + +#### Usage ```shell-session consul partition create @@ -95,7 +107,20 @@ $ consul partition create -name "webdev" -description "Partition for admin of we ### `write` -The `write` subcommand sends a request to the server to create a new admin partition or update an existing partition from its full definition. You can specify an admin partition definition file or use values from `stdin`. +The `write` subcommand sends a request to the server to create a new admin partition or update an existing partition from its full definition. +This subcommand has the following characteristics: + +| Characteristic | Value | +| -------------- | ----- | +| [Required ACLs] | `operator:write` | +| Corresponding HTTP API Endpoint | [\[PUT\] /partition/:name](/api-docs/admin-partitions#update-a-partition) | +| [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | + +#### Usage + +You can specify an admin partition definition file or use values from `stdin` +in either JSON or HCL format. Use the following syntax to write from file: @@ -109,7 +134,7 @@ Use the following syntax to write from `stdin`: consul partition write - ``` -The definition file or `stdin` values can be provided in JSON or HCL format. Refer to the [Admin Partition Definition](#partition-definition) section for details about the supported parameters. +Refer to the [Admin Partition Definition](#partition-definition) section for details about the supported parameters. You can specify the following options: @@ -134,6 +159,16 @@ $ consul partition write -format json -show-meta - <<< 'name = "webdev-bu" descr ### `read` The `read` subcommand sends a request to the server to read the configuration for the specified partition and print it to the console. +This subcommand has the following characteristics: + +| Characteristic | Value | +| -------------- | ----- | +| [Required ACLs] | `operator:read`; however, a non-anonymous token can always read its own partition | +| Corresponding HTTP API Endpoint | [\[GET\] /partition/:name](/api-docs/admin-partitions#read-a-partition) | +| [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | + +#### Usage ```shell-session consul partition read @@ -161,6 +196,16 @@ consul partition read -format json -meta webdev ### `list` The `list` subcommand prints existing admin partitions to the console. +This subcommand has the following characteristics: + +| Characteristic | Value | +| -------------- | ----- | +| [Required ACLs] | `operator:read` | +| Corresponding HTTP API Endpoint | [\[GET\] /partitions](/api-docs/admin-partitions#list-all-partitions) | +| [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | + +#### Usage ```shell-session consul partition list @@ -202,6 +247,16 @@ $ consul partition list -format json -show-meta ### `delete` The `delete` subcommand sends a request to the server to remove the specified partition. +This subcommand has the following characteristics: + +| Characteristic | Value | +| -------------- | ----- | +| [Required ACLs] | `operator:write` | +| Corresponding HTTP API Endpoint | [\[DELETE\] /partitions](/api-docs/admin-partitions#delete-a-partition) | +| [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | + +#### Usage ```shell-session $ consul partition delete From 44de9aaf4b56114e13b4498047d067ea20ad7a30 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Wed, 6 Apr 2022 15:18:54 -0700 Subject: [PATCH 088/107] docs: remove partition subcommand usage headings --- website/content/commands/partition.mdx | 33 ++------------------------ 1 file changed, 2 insertions(+), 31 deletions(-) diff --git a/website/content/commands/partition.mdx b/website/content/commands/partition.mdx index c6e1ed019..c04e33295 100644 --- a/website/content/commands/partition.mdx +++ b/website/content/commands/partition.mdx @@ -77,12 +77,6 @@ This subcommand has the following characteristics: | [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | | [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -#### Usage - -```shell-session -consul partition create -``` - The admin partition is created according to the values specified in the options. You can specify the following options: | Option | Description | Default | Required | @@ -107,7 +101,7 @@ $ consul partition create -name "webdev" -description "Partition for admin of we ### `write` -The `write` subcommand sends a request to the server to create a new admin partition or update an existing partition from its full definition. +The `write` subcommand sends a request to the server to create a new admin partition or update an existing partition from its full definition. You can specify an admin partition definition file or use values from `stdin`. This subcommand has the following characteristics: | Characteristic | Value | @@ -117,11 +111,6 @@ This subcommand has the following characteristics: | [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | | [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -#### Usage - -You can specify an admin partition definition file or use values from `stdin` -in either JSON or HCL format. - Use the following syntax to write from file: ```shell-session @@ -134,7 +123,7 @@ Use the following syntax to write from `stdin`: consul partition write - ``` -Refer to the [Admin Partition Definition](#partition-definition) section for details about the supported parameters. +The definition file or `stdin` values can be provided in JSON or HCL format. Refer to the [Admin Partition Definition](#admin-partition-definition) section for details about the supported parameters. You can specify the following options: @@ -168,12 +157,6 @@ This subcommand has the following characteristics: | [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | | [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -#### Usage - -```shell-session -consul partition read -``` - The admin partition is created according to the values specified in the options. You can specify the following options: | Option | Description | Default | Required | @@ -205,12 +188,6 @@ This subcommand has the following characteristics: | [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | | [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -#### Usage - -```shell-session -consul partition list -``` - The admin partition is created according to the values specified in the options. You can specify the following options: | Option | Description | Default | Required | @@ -256,12 +233,6 @@ This subcommand has the following characteristics: | [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | | [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -#### Usage - -```shell-session -$ consul partition delete -``` - In the following example, the `webdev-bu` partition is deleted: ```shell-session From dd1258498127404bb7579ac3bc9eacc992feb6c9 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Thu, 24 Feb 2022 20:31:25 -0800 Subject: [PATCH 089/107] docs: restructure partition API characteristics The existing characteristics were restructured into a list. The corresponding CLI command characteristic was added. --- website/content/api-docs/admin-partitions.mdx | 148 ++++++++---------- 1 file changed, 61 insertions(+), 87 deletions(-) diff --git a/website/content/api-docs/admin-partitions.mdx b/website/content/api-docs/admin-partitions.mdx index 4d87bde4d..6d8124deb 100644 --- a/website/content/api-docs/admin-partitions.mdx +++ b/website/content/api-docs/admin-partitions.mdx @@ -4,6 +4,8 @@ page_title: Admin Partition - HTTP API description: The /partition endpoints allow for managing Consul Enterprise Admin Partitions. --- +@include 'http_api_and_cli_characteristics_links.mdx' + # Admin Partition - HTTP API @@ -13,23 +15,18 @@ The functionality described here is available only in ## Create a Partition -This endpoint creates a new Partition. +This endpoint creates a new partition and has the following characteristics: -| Method | Path | Produces | -| ------ | ------------ | ------------------ | -| `PUT` | `/partition` | `application/json` | - -The table below shows this endpoint's support for -[blocking queries](/api-docs/features/blocking), -[consistency modes](/api-docs/features/consistency), -[agent caching](/api-docs/features/caching), and -[required ACLs](/api#authentication). - -| Blocking Queries | Consistency Modes | Agent Caching | ACL Required | -| ---------------- | ----------------- | ------------- | ---------------- | -| `NO` | `none` | `none` | `operator:write` | - -The corresponding CLI command is [`consul partition create`](/commands/partition#create). +| Characteristic | Value | +| -------------- | ----- | +| [HTTP Method] | `PUT` | +| [URL Path] | `/partition` | +| [Response Type] | `application/json` | +| [Required ACLs] | `operator:write` | +| Corresponding CLI Command | [`consul partition create`](/commands/partition#create) | +| [Consistency Modes] | N/A | +| [Blocking Queries] | N/A | +| [Agent Caching] | N/A | ### JSON Request Body Schema @@ -69,25 +66,18 @@ $ curl ---request PUT \ ## Read a Partition -This endpoint reads a Partition with the given name. +This endpoint reads a partition with the given name and has the following characteristics: -| Method | Path | Produces | -| ------ | ------------------ | ------------------ | -| `GET` | `/partition/:name` | `application/json` | - -The table below shows this endpoint's support for -[blocking queries](/api-docs/features/blocking), -[consistency modes](/api-docs/features/consistency), -[agent caching](/api-docs/features/caching), and -[required ACLs](/api#authentication). - -| Blocking Queries | Consistency Modes | Agent Caching | ACL Required | -| ---------------- | ----------------- | ------------- | ------------------------------------- | -| `NO` | `consistent` | `none` | `operator:read` or `none`1 | - -1 A non-anonymous token can read its own partition. - -The corresponding CLI command is [`consul partition read`](/commands/partition#read). +| Characteristic | Value | +| -------------- | ----- | +| [HTTP Method] | `GET` | +| [URL Path] | `/partition/:name` | +| [Response Type] | `application/json` | +| [Required ACLs] | `operator:read`; however, a non-anonymous token can always read its own partition | +| Corresponding CLI Command | [`consul partition read`](/commands/partition#read) | +| [Consistency Modes] | `default`, `consistent` | +| [Blocking Queries] | no | +| [Agent Caching] | no | ### Path Parameters @@ -100,7 +90,7 @@ $ curl --header "X-Consul-Token: b23b3cad-5ea1-4413-919e-c76884b9ad60" \ http://127.0.0.1:8500/v1/partition/na-west ``` -### SampleResponse +### Sample Response ```json { @@ -113,23 +103,18 @@ $ curl --header "X-Consul-Token: b23b3cad-5ea1-4413-919e-c76884b9ad60" \ ## Update a Partition -This endpoint updates a Partition description. +This endpoint updates a partition description and has the following characteristics: -| Method | Path | Produces | -| ------ | ------------------ | ------------------ | -| `PUT` | `/partition/:name` | `application/json` | - -The table below shows this endpoint's support for -[blocking queries](/api-docs/features/blocking), -[consistency modes](/api-docs/features/consistency), -[agent caching](/api-docs/features/caching), and -[required ACLs](/api#authentication). - -| Blocking Queries | Consistency Modes | Agent Caching | ACL Required | -| ---------------- | ----------------- | ------------- | ---------------- | -| `NO` | `none` | `none` | `operator:write` | - -The corresponding CLI command is [`consul partition write`](/commands/partition#write). +| Characteristic | Value | +| -------------- | ----- | +| [HTTP Method] | `PUT` | +| [URL Path] | `/partition/:name` | +| [Response Type] | `application/json` | +| [Required ACLs] | `operator:write` | +| Corresponding CLI Command | [`consul partition write`](/commands/partition#write) | +| [Consistency Modes] | N/A | +| [Blocking Queries] | N/A | +| [Agent Caching] | N/A | ### Path Parameters @@ -173,31 +158,25 @@ $ curl --request PUT \ ## Delete a Partition -This endpoint marks a Partition for deletion. Once marked Consul will +This endpoint marks a partition for deletion. Once marked Consul will deleted all the associated partitioned data in the background. Only once -all associated data has been deleted will the Partition actually disappear. +all associated data has been deleted will the partition actually disappear. Until then, further reads can be performed on the partition and a `DeletedAt` -field will now be populated with the timestamp of when the Partition was +field will now be populated with the timestamp of when the partition was marked for deletion. -| Method | Path | Produces | -| -------- | ------------------ | -------- | -| `DELETE` | `/partition/:name` | N/A | +This endpoint has the following characteristics: -This endpoint will return no data. Success or failure is indicated by the status -code returned. - -The table below shows this endpoint's support for -[blocking queries](/api-docs/features/blocking), -[consistency modes](/api-docs/features/consistency), -[agent caching](/api-docs/features/caching), and -[required ACLs](/api#authentication). - -| Blocking Queries | Consistency Modes | Agent Caching | ACL Required | -| ---------------- | ----------------- | ------------- | ---------------- | -| `NO` | `none` | `none` | `operator:write` | - -The corresponding CLI command is [`consul partition delete`](/commands/partition#delete). +| Characteristic | Value | +| -------------- | ----- | +| [HTTP Method] | `DELETE` | +| [URL Path] | `/partition/:name` | +| [Response Type] | none; success or failure is indicated by the HTTP response status code | +| [Required ACLs] | `operator:write` | +| Corresponding CLI Command | [`consul partition delete`](/commands/partition#delete) | +| [Consistency Modes] | N/A | +| [Blocking Queries] | N/A | +| [Agent Caching] | N/A | ### Path Parameters @@ -225,23 +204,18 @@ $ curl --request DELETE \ ## List all Partitions -This endpoint lists all the Partitions. +This endpoint lists all the partitions and has the following characteristics: -| Method | Path | Produces | -| ------ | ------------- | ------------------ | -| `GET` | `/partitions` | `application/json` | - -The table below shows this endpoint's support for -[blocking queries](/api-docs/features/blocking), -[consistency modes](/api-docs/features/consistency), -[agent caching](/api-docs/features/caching), and -[required ACLs](/api#authentication). - -| Blocking Queries | Consistency Modes | Agent Caching | ACL Required | -| ---------------- | ----------------- | ------------- | --------------- | -| `NO` | `consistent` | `none` | `operator:read` | - -The corresponding CLI command is [`consul partition list`](/commands/partition#list). +| Characteristic | Value | +| -------------- | ----- | +| [HTTP Method] | `GET` | +| [URL Path] | `/partitions` | +| [Response Type] | `application/json` | +| [Required ACLs] | `operator:read` | +| Corresponding CLI Command | [`consul partition list`](/commands/partition#list) | +| [Consistency Modes] | `default`, `consistent` | +| [Blocking Queries] | no | +| [Agent Caching] | no | ### Sample Request From dd81f6a76fce90c578a0a34e2fea536697a6fda2 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Wed, 20 Jul 2022 15:56:31 -0700 Subject: [PATCH 090/107] docs: adjust HTTP API/CLI characteristics tables --- website/content/api-docs/admin-partitions.mdx | 70 +++++++++---------- website/content/commands/partition.mdx | 30 ++++---- ...http_api_and_cli_characteristics_links.mdx | 11 ++- 3 files changed, 54 insertions(+), 57 deletions(-) diff --git a/website/content/api-docs/admin-partitions.mdx b/website/content/api-docs/admin-partitions.mdx index 6d8124deb..7078cbfbf 100644 --- a/website/content/api-docs/admin-partitions.mdx +++ b/website/content/api-docs/admin-partitions.mdx @@ -19,14 +19,14 @@ This endpoint creates a new partition and has the following characteristics: | Characteristic | Value | | -------------- | ----- | -| [HTTP Method] | `PUT` | -| [URL Path] | `/partition` | -| [Response Type] | `application/json` | +| HTTP method | `PUT` | +| URL path | `/v1/partition` | +| Response type | `application/json` | | [Required ACLs] | `operator:write` | -| Corresponding CLI Command | [`consul partition create`](/commands/partition#create) | -| [Consistency Modes] | N/A | -| [Blocking Queries] | N/A | -| [Agent Caching] | N/A | +| Corresponding CLI command | [`consul partition create`](/commands/partition#create) | +| [Consistency modes] | N/A | +| [Blocking queries] | N/A | +| [Agent caching] | N/A | ### JSON Request Body Schema @@ -70,14 +70,14 @@ This endpoint reads a partition with the given name and has the following charac | Characteristic | Value | | -------------- | ----- | -| [HTTP Method] | `GET` | -| [URL Path] | `/partition/:name` | -| [Response Type] | `application/json` | +| HTTP method | `GET` | +| URL path | `/v1/partition/:name` | +| Response type | `application/json` | | [Required ACLs] | `operator:read`; however, a non-anonymous token can always read its own partition | -| Corresponding CLI Command | [`consul partition read`](/commands/partition#read) | -| [Consistency Modes] | `default`, `consistent` | -| [Blocking Queries] | no | -| [Agent Caching] | no | +| Corresponding CLI command | [`consul partition read`](/commands/partition#read) | +| [Consistency modes] | `default`, `consistent` | +| [Blocking queries] | No | +| [Agent caching] | No | ### Path Parameters @@ -107,14 +107,14 @@ This endpoint updates a partition description and has the following characterist | Characteristic | Value | | -------------- | ----- | -| [HTTP Method] | `PUT` | -| [URL Path] | `/partition/:name` | -| [Response Type] | `application/json` | +| HTTP method | `PUT` | +| URL path | `/v1/partition/:name` | +| Response type | `application/json` | | [Required ACLs] | `operator:write` | -| Corresponding CLI Command | [`consul partition write`](/commands/partition#write) | -| [Consistency Modes] | N/A | -| [Blocking Queries] | N/A | -| [Agent Caching] | N/A | +| Corresponding CLI command | [`consul partition write`](/commands/partition#write) | +| [Consistency modes] | N/A | +| [Blocking queries] | N/A | +| [Agent caching] | N/A | ### Path Parameters @@ -169,14 +169,14 @@ This endpoint has the following characteristics: | Characteristic | Value | | -------------- | ----- | -| [HTTP Method] | `DELETE` | -| [URL Path] | `/partition/:name` | -| [Response Type] | none; success or failure is indicated by the HTTP response status code | +| HTTP method | `DELETE` | +| URL path | `/v1/partition/:name` | +| Response type | none; success or failure is indicated by the HTTP response status code | | [Required ACLs] | `operator:write` | -| Corresponding CLI Command | [`consul partition delete`](/commands/partition#delete) | -| [Consistency Modes] | N/A | -| [Blocking Queries] | N/A | -| [Agent Caching] | N/A | +| Corresponding CLI command | [`consul partition delete`](/commands/partition#delete) | +| [Consistency modes] | N/A | +| [Blocking queries] | N/A | +| [Agent caching] | N/A | ### Path Parameters @@ -208,14 +208,14 @@ This endpoint lists all the partitions and has the following characteristics: | Characteristic | Value | | -------------- | ----- | -| [HTTP Method] | `GET` | -| [URL Path] | `/partitions` | -| [Response Type] | `application/json` | +| HTTP method | `GET` | +| URL path | `/v1/partitions` | +| Response type | `application/json` | | [Required ACLs] | `operator:read` | -| Corresponding CLI Command | [`consul partition list`](/commands/partition#list) | -| [Consistency Modes] | `default`, `consistent` | -| [Blocking Queries] | no | -| [Agent Caching] | no | +| Corresponding CLI command | [`consul partition list`](/commands/partition#list) | +| [Consistency modes] | `default`, `consistent` | +| [Blocking queries] | No | +| [Agent caching] | No | ### Sample Request diff --git a/website/content/commands/partition.mdx b/website/content/commands/partition.mdx index c04e33295..df9f4067f 100644 --- a/website/content/commands/partition.mdx +++ b/website/content/commands/partition.mdx @@ -73,9 +73,9 @@ This subcommand has the following characteristics: | Characteristic | Value | | -------------- | ----- | | [Required ACLs] | `operator:write` | -| Corresponding HTTP API Endpoint | [\[PUT\] /partition](/api-docs/admin-partitions#create-a-partition) | -| [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -| [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| Corresponding HTTP API endpoint | [\[PUT\] /v1/partition](/api-docs/admin-partitions#create-a-partition) | +| [Blocking queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| [Agent caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | The admin partition is created according to the values specified in the options. You can specify the following options: @@ -107,9 +107,9 @@ This subcommand has the following characteristics: | Characteristic | Value | | -------------- | ----- | | [Required ACLs] | `operator:write` | -| Corresponding HTTP API Endpoint | [\[PUT\] /partition/:name](/api-docs/admin-partitions#update-a-partition) | -| [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -| [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| Corresponding HTTP API endpoint | [\[PUT\] /v1/partition/:name](/api-docs/admin-partitions#update-a-partition) | +| [Blocking queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| [Agent caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | Use the following syntax to write from file: @@ -153,9 +153,9 @@ This subcommand has the following characteristics: | Characteristic | Value | | -------------- | ----- | | [Required ACLs] | `operator:read`; however, a non-anonymous token can always read its own partition | -| Corresponding HTTP API Endpoint | [\[GET\] /partition/:name](/api-docs/admin-partitions#read-a-partition) | -| [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -| [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| Corresponding HTTP API endpoint | [\[GET\] /v1/partition/:name](/api-docs/admin-partitions#read-a-partition) | +| [Blocking queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| [Agent caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | The admin partition is created according to the values specified in the options. You can specify the following options: @@ -184,9 +184,9 @@ This subcommand has the following characteristics: | Characteristic | Value | | -------------- | ----- | | [Required ACLs] | `operator:read` | -| Corresponding HTTP API Endpoint | [\[GET\] /partitions](/api-docs/admin-partitions#list-all-partitions) | -| [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -| [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| Corresponding HTTP API endpoint | [\[GET\] /v1/partitions](/api-docs/admin-partitions#list-all-partitions) | +| [Blocking queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| [Agent caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | The admin partition is created according to the values specified in the options. You can specify the following options: @@ -229,9 +229,9 @@ This subcommand has the following characteristics: | Characteristic | Value | | -------------- | ----- | | [Required ACLs] | `operator:write` | -| Corresponding HTTP API Endpoint | [\[DELETE\] /partitions](/api-docs/admin-partitions#delete-a-partition) | -| [Blocking Queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -| [Agent Caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| Corresponding HTTP API endpoint | [\[DELETE\] /v1/partitions](/api-docs/admin-partitions#delete-a-partition) | +| [Blocking queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | +| [Agent caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | In the following example, the `webdev-bu` partition is deleted: diff --git a/website/content/partials/http_api_and_cli_characteristics_links.mdx b/website/content/partials/http_api_and_cli_characteristics_links.mdx index 4669c1ec1..0cbb23702 100644 --- a/website/content/partials/http_api_and_cli_characteristics_links.mdx +++ b/website/content/partials/http_api_and_cli_characteristics_links.mdx @@ -1,9 +1,6 @@ -[HTTP Method]: /api-docs#http-methods -[URL Path]: /api-docs#version-prefix -[Response Type]: /api-docs#formatted-json-output -[Required ACLs]: /docs/security/acl/acl-system -[Blocking Queries]: /api-docs/features/blocking -[Consistency Modes]: /api-docs/features/consistency -[Agent Caching]: /api-docs/features/caching +[Required ACLs]: /docs/security/acl +[Blocking queries]: /api-docs/features/blocking +[Consistency modes]: /api-docs/features/consistency +[Agent caching]: /api-docs/features/caching From 641bf837b50d74a85b64f9155a0e375e3e2c7ed6 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Mon, 25 Jul 2022 15:31:07 -0700 Subject: [PATCH 091/107] docs: remove unnecessary partition CLI cmd info --- website/content/commands/partition.mdx | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/website/content/commands/partition.mdx b/website/content/commands/partition.mdx index df9f4067f..5b9fed9a3 100644 --- a/website/content/commands/partition.mdx +++ b/website/content/commands/partition.mdx @@ -74,8 +74,6 @@ This subcommand has the following characteristics: | -------------- | ----- | | [Required ACLs] | `operator:write` | | Corresponding HTTP API endpoint | [\[PUT\] /v1/partition](/api-docs/admin-partitions#create-a-partition) | -| [Blocking queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -| [Agent caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | The admin partition is created according to the values specified in the options. You can specify the following options: @@ -108,8 +106,6 @@ This subcommand has the following characteristics: | -------------- | ----- | | [Required ACLs] | `operator:write` | | Corresponding HTTP API endpoint | [\[PUT\] /v1/partition/:name](/api-docs/admin-partitions#update-a-partition) | -| [Blocking queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -| [Agent caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | Use the following syntax to write from file: @@ -154,8 +150,6 @@ This subcommand has the following characteristics: | -------------- | ----- | | [Required ACLs] | `operator:read`; however, a non-anonymous token can always read its own partition | | Corresponding HTTP API endpoint | [\[GET\] /v1/partition/:name](/api-docs/admin-partitions#read-a-partition) | -| [Blocking queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -| [Agent caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | The admin partition is created according to the values specified in the options. You can specify the following options: @@ -185,8 +179,6 @@ This subcommand has the following characteristics: | -------------- | ----- | | [Required ACLs] | `operator:read` | | Corresponding HTTP API endpoint | [\[GET\] /v1/partitions](/api-docs/admin-partitions#list-all-partitions) | -| [Blocking queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -| [Agent caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | The admin partition is created according to the values specified in the options. You can specify the following options: @@ -230,8 +222,6 @@ This subcommand has the following characteristics: | -------------- | ----- | | [Required ACLs] | `operator:write` | | Corresponding HTTP API endpoint | [\[DELETE\] /v1/partitions](/api-docs/admin-partitions#delete-a-partition) | -| [Blocking queries] | Not supported from commands, but may be from the corresponding HTTP API endpoint | -| [Agent caching] | Not supported from commands, but may be from the corresponding HTTP API endpoint | In the following example, the `webdev-bu` partition is deleted: From 2a8280a5187c47dc22202cf7f783e0bf38f9cc6e Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Mon, 25 Jul 2022 16:43:24 -0600 Subject: [PATCH 092/107] build: add a build job to build and push UBI images to DockerHub (#13808) --- .github/workflows/build.yml | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0320a7bb1..fc2506abf 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -254,8 +254,8 @@ jobs: docker.io/hashicorppreview/${{ env.repo }}:${{ env.dev_tag }}-${{ github.sha }} smoke_test: .github/scripts/verify_docker.sh v${{ env.version }} - build-docker-redhat: - name: Docker Build UBI Image for RedHat + build-docker-ubi-redhat: + name: Docker Build UBI Image for RedHat Registry needs: - get-product-version - build @@ -274,6 +274,39 @@ jobs: redhat_tag: scan.connect.redhat.com/ospid-60f9fdbec3a80eac643abedf/${{env.repo}}:${{env.version}}-ubi smoke_test: .github/scripts/verify_docker.sh v${{ env.version }} + build-docker-ubi-dockerhub: + name: Docker Build UBI Image for DockerHub + needs: + - get-product-version + - build + runs-on: ubuntu-latest + env: + repo: ${{github.event.repository.name}} + version: ${{needs.get-product-version.outputs.product-version}} + + steps: + - uses: actions/checkout@v2 + + # Strip everything but MAJOR.MINOR from the version string and add a `-dev` suffix + # This naming convention will be used ONLY for per-commit dev images + - name: Set docker dev tag + run: | + version="${{ env.version }}" + echo "dev_tag=${version%.*}-dev" >> $GITHUB_ENV + + - uses: hashicorp/actions-docker-build@v1 + with: + version: ${{env.version}} + target: ubi + arch: amd64 + tags: | + docker.io/hashicorp/${{env.repo}}:${{env.version}}-ubi + public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}}-ubi + dev_tags: | + docker.io/hashicorppreview/${{ env.repo }}:${{ env.dev_tag }}-ubi + docker.io/hashicorppreview/${{ env.repo }}:${{ env.dev_tag }}-ubi-${{ github.sha }} + smoke_test: .github/scripts/verify_docker.sh v${{ env.version }} + verify-linux: needs: - get-product-version From 5d4209eaf87e702c83131b71212b87ac549b9184 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Mon, 25 Jul 2022 16:08:03 -0700 Subject: [PATCH 093/107] Rename receive to recv in tracker (#13896) Because it's shorter --- .../services/peerstream/stream_resources.go | 10 +-- .../services/peerstream/stream_test.go | 64 +++++++++---------- .../services/peerstream/stream_tracker.go | 36 +++++------ 3 files changed, 54 insertions(+), 56 deletions(-) diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index 000e26ca9..1feb7f01d 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -265,11 +265,11 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { if err == io.EOF { logger.Info("stream ended by peer") - status.TrackReceiveError(err.Error()) + status.TrackRecvError(err.Error()) return } logger.Error("failed to receive from stream", "error", err) - status.TrackReceiveError(err.Error()) + status.TrackRecvError(err.Error()) return } }() @@ -459,9 +459,9 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { reply, err := s.processResponse(streamReq.PeerName, streamReq.Partition, status, resp, logger) if err != nil { logger.Error("failed to persist resource", "resourceURL", resp.ResourceURL, "resourceID", resp.ResourceID) - status.TrackReceiveError(err.Error()) + status.TrackRecvError(err.Error()) } else { - status.TrackReceiveResourceSuccess() + status.TrackRecvResourceSuccess() } if err := streamSend(reply); err != nil { @@ -482,7 +482,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { } if msg.GetHeartbeat() != nil { - status.TrackReceiveHeartbeat() + status.TrackRecvHeartbeat() // Reset the heartbeat timeout by creating a new context. // We first must cancel the old context so there's no leaks. This is safe to do because we're only diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index d9e18a1e6..174ecf59f 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -475,11 +475,11 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { api := structs.NewServiceName("api", nil) expect := Status{ - Connected: true, - LastAck: lastSendSuccess, - LastNack: lastNack, - LastNackMessage: lastNackMsg, - LastReceiveResourceSuccess: lastRecvResourceSuccess, + Connected: true, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + LastRecvResourceSuccess: lastRecvResourceSuccess, ImportedServices: map[string]struct{}{ api.String(): {}, }, @@ -534,13 +534,13 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { api := structs.NewServiceName("api", nil) expect := Status{ - Connected: true, - LastAck: lastSendSuccess, - LastNack: lastNack, - LastNackMessage: lastNackMsg, - LastReceiveResourceSuccess: lastRecvResourceSuccess, - LastReceiveError: lastRecvError, - LastReceiveErrorMessage: lastRecvErrorMsg, + Connected: true, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + LastRecvResourceSuccess: lastRecvResourceSuccess, + LastRecvError: lastRecvError, + LastRecvErrorMessage: lastRecvErrorMsg, ImportedServices: map[string]struct{}{ api.String(): {}, }, @@ -553,27 +553,27 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }) }) - var lastReceiveHeartbeat time.Time + var lastRecvHeartbeat time.Time testutil.RunStep(t, "receives heartbeat", func(t *testing.T) { resp := &pbpeerstream.ReplicationMessage{ Payload: &pbpeerstream.ReplicationMessage_Heartbeat_{ Heartbeat: &pbpeerstream.ReplicationMessage_Heartbeat{}, }, } - lastReceiveHeartbeat = it.FutureNow(1) + lastRecvHeartbeat = it.FutureNow(1) err := client.Send(resp) require.NoError(t, err) api := structs.NewServiceName("api", nil) expect := Status{ - Connected: true, - LastAck: lastSendSuccess, - LastNack: lastNack, - LastNackMessage: lastNackMsg, - LastReceiveResourceSuccess: lastRecvResourceSuccess, - LastReceiveError: lastRecvError, - LastReceiveErrorMessage: lastRecvErrorMsg, - LastReceiveHeartbeat: lastReceiveHeartbeat, + Connected: true, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + LastRecvResourceSuccess: lastRecvResourceSuccess, + LastRecvError: lastRecvError, + LastRecvErrorMessage: lastRecvErrorMsg, + LastRecvHeartbeat: lastRecvHeartbeat, ImportedServices: map[string]struct{}{ api.String(): {}, }, @@ -596,16 +596,16 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { api := structs.NewServiceName("api", nil) expect := Status{ - Connected: false, - DisconnectErrorMessage: "stream ended unexpectedly", - LastAck: lastSendSuccess, - LastNack: lastNack, - LastNackMessage: lastNackMsg, - DisconnectTime: disconnectTime, - LastReceiveResourceSuccess: lastRecvResourceSuccess, - LastReceiveError: lastRecvError, - LastReceiveErrorMessage: lastRecvErrorMsg, - LastReceiveHeartbeat: lastReceiveHeartbeat, + Connected: false, + DisconnectErrorMessage: "stream ended unexpectedly", + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + DisconnectTime: disconnectTime, + LastRecvResourceSuccess: lastRecvResourceSuccess, + LastRecvError: lastRecvError, + LastRecvErrorMessage: lastRecvErrorMsg, + LastRecvHeartbeat: lastRecvHeartbeat, ImportedServices: map[string]struct{}{ api.String(): {}, }, diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index 8632f36ec..f7a451595 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -167,21 +167,19 @@ type Status struct { // LastSendErrorMessage tracks the last error message when sending into the stream. LastSendErrorMessage string - // LastReceiveHeartbeat tracks when we last received a heartbeat from our peer. - LastReceiveHeartbeat time.Time + // LastRecvHeartbeat tracks when we last received a heartbeat from our peer. + LastRecvHeartbeat time.Time - // LastReceiveResourceSuccess tracks the time we last successfully stored a resource replicated FROM the peer. - LastReceiveResourceSuccess time.Time + // LastRecvResourceSuccess tracks the time we last successfully stored a resource replicated FROM the peer. + LastRecvResourceSuccess time.Time - // LastReceiveError tracks either: + // LastRecvError tracks either: // - The time we failed to store a resource replicated FROM the peer. // - The time of the last error when receiving from the stream. - LastReceiveError time.Time + LastRecvError time.Time - // LastReceiveError tracks either: - // - The error message when we failed to store a resource replicated FROM the peer. - // - The last error message when receiving from the stream. - LastReceiveErrorMessage string + // LastRecvErrorMessage tracks the last error message when receiving from the stream. + LastRecvErrorMessage string // TODO(peering): consider keeping track of imported and exported services thru raft // ImportedServices keeps track of which service names are imported for the peer @@ -225,24 +223,24 @@ func (s *MutableStatus) TrackSendError(error string) { s.mu.Unlock() } -// TrackReceiveResourceSuccess tracks receiving a replicated resource. -func (s *MutableStatus) TrackReceiveResourceSuccess() { +// TrackRecvResourceSuccess tracks receiving a replicated resource. +func (s *MutableStatus) TrackRecvResourceSuccess() { s.mu.Lock() - s.LastReceiveResourceSuccess = s.timeNow().UTC() + s.LastRecvResourceSuccess = s.timeNow().UTC() s.mu.Unlock() } -// TrackReceiveHeartbeat tracks receiving a heartbeat from our peer. -func (s *MutableStatus) TrackReceiveHeartbeat() { +// TrackRecvHeartbeat tracks receiving a heartbeat from our peer. +func (s *MutableStatus) TrackRecvHeartbeat() { s.mu.Lock() - s.LastReceiveHeartbeat = s.timeNow().UTC() + s.LastRecvHeartbeat = s.timeNow().UTC() s.mu.Unlock() } -func (s *MutableStatus) TrackReceiveError(error string) { +func (s *MutableStatus) TrackRecvError(error string) { s.mu.Lock() - s.LastReceiveError = s.timeNow().UTC() - s.LastReceiveErrorMessage = error + s.LastRecvError = s.timeNow().UTC() + s.LastRecvErrorMessage = error s.mu.Unlock() } From 03ea6517c9a2f0d93f676af9c5e134d735ed7b8f Mon Sep 17 00:00:00 2001 From: Nitya Dhanushkodi Date: Mon, 25 Jul 2022 16:33:47 -0700 Subject: [PATCH 094/107] peering: remove validation that forces peering token server addresses to be an IP, allow hostname based addresses (#13874) --- agent/rpc/peering/testing.go | 1 + agent/rpc/peering/validate.go | 6 +----- agent/rpc/peering/validate_test.go | 19 +++++++++---------- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/agent/rpc/peering/testing.go b/agent/rpc/peering/testing.go index 04f1bb223..577f78229 100644 --- a/agent/rpc/peering/testing.go +++ b/agent/rpc/peering/testing.go @@ -32,6 +32,7 @@ not valid ` var validAddress = "1.2.3.4:80" +var validHostnameAddress = "foo.bar.baz:80" var validServerName = "server.consul" diff --git a/agent/rpc/peering/validate.go b/agent/rpc/peering/validate.go index 32a3d5d29..340e4c5ad 100644 --- a/agent/rpc/peering/validate.go +++ b/agent/rpc/peering/validate.go @@ -3,7 +3,6 @@ package peering import ( "fmt" "net" - "net/netip" "strconv" "github.com/hashicorp/consul/agent/connect" @@ -25,7 +24,7 @@ func validatePeeringToken(tok *structs.PeeringToken) error { return errPeeringTokenEmptyServerAddresses } for _, addr := range tok.ServerAddresses { - host, portRaw, err := net.SplitHostPort(addr) + _, portRaw, err := net.SplitHostPort(addr) if err != nil { return &errPeeringInvalidServerAddress{addr} } @@ -37,9 +36,6 @@ func validatePeeringToken(tok *structs.PeeringToken) error { if port < 1 || port > 65535 { return &errPeeringInvalidServerAddress{addr} } - if _, err := netip.ParseAddr(host); err != nil { - return &errPeeringInvalidServerAddress{addr} - } } // TODO(peering): validate name matches SNI? diff --git a/agent/rpc/peering/validate_test.go b/agent/rpc/peering/validate_test.go index 1f0660c8f..06e893a65 100644 --- a/agent/rpc/peering/validate_test.go +++ b/agent/rpc/peering/validate_test.go @@ -53,16 +53,6 @@ func TestValidatePeeringToken(t *testing.T) { "1.2.3.4", }, }, - { - name: "invalid address IP", - token: &structs.PeeringToken{ - CA: []string{validCA}, - ServerAddresses: []string{"foo.bar.baz"}, - }, - wantErr: &errPeeringInvalidServerAddress{ - "foo.bar.baz", - }, - }, { name: "invalid server name", token: &structs.PeeringToken{ @@ -89,6 +79,15 @@ func TestValidatePeeringToken(t *testing.T) { PeerID: validPeerID, }, }, + { + name: "valid token with hostname address", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{validHostnameAddress}, + ServerName: validServerName, + PeerID: validPeerID, + }, + }, } for _, tc := range tt { From 0a66d0188d441fc7166a6b6305a8d73bb31089ac Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Mon, 25 Jul 2022 18:00:48 -0700 Subject: [PATCH 095/107] peering: prevent peering in same partition (#13851) Co-authored-by: Chris S. Kim --- agent/rpc/peering/service.go | 21 +++++++++++++++++++++ agent/rpc/peering/service_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 9a471e56a..86a8b11dc 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -346,6 +346,11 @@ func (s *Server) Establish( return nil, err } + // we don't want to default req.Partition unlike because partitions are empty in OSS + if err := s.validatePeeringInPartition(tok.PeerID, req.Partition); err != nil { + return nil, err + } + var id string if peering == nil { id, err = lib.GenerateUUID(s.Backend.CheckPeeringUUID) @@ -395,6 +400,22 @@ func (s *Server) Establish( return resp, nil } +// validatePeeringInPartition makes sure that we don't create a peering in the same partition. We validate by looking at +// the remotePeerID from the PeeringToken and looking up for a peering in the partition. If there is one and the +// request partition is the same, then we are attempting to peer within the partition, which we shouldn't. +func (s *Server) validatePeeringInPartition(remotePeerID, partition string) error { + _, peering, err := s.Backend.Store().PeeringReadByID(nil, remotePeerID) + if err != nil { + return fmt.Errorf("cannot read peering by ID: %w", err) + } + + if peering != nil && peering.Partition == partition { + return fmt.Errorf("cannot create a peering within the same partition (ENT) or cluster (OSS)") + } + + return nil +} + // OPTIMIZE: Handle blocking queries func (s *Server) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequest) (*pbpeering.PeeringReadResponse, error) { if !s.Config.PeeringEnabled { diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index ca3553a1b..883f4fcc0 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -314,6 +314,30 @@ func TestPeeringService_Establish(t *testing.T) { } } +// We define a valid peering by a peering that does not occur over the same server addresses +func TestPeeringService_Establish_validPeeringInPartition(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, nil) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + req := pbpeering.GenerateTokenRequest{PeerName: "peerOne"} + resp, err := client.GenerateToken(ctx, &req) + require.NoError(t, err) + require.NotEmpty(t, resp) + + establishReq := &pbpeering.EstablishRequest{ + PeerName: "peerTwo", + PeeringToken: resp.PeeringToken} + + respE, errE := client.Establish(ctx, establishReq) + require.Error(t, errE) + require.Contains(t, errE.Error(), "cannot create a peering within the same partition (ENT) or cluster (OSS)") + require.Nil(t, respE) +} + func TestPeeringService_Establish_ACLEnforcement(t *testing.T) { validToken := peering.TestPeeringToken("83474a06-cca4-4ff4-99a4-4152929c8160") validTokenJSON, _ := json.Marshal(&validToken) From bf9045db69c30be58fc1c3d4b45c6e7083201705 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Tue, 26 Jul 2022 15:01:09 +0200 Subject: [PATCH 096/107] ui: add deprecation worfklow addon (#13877) * add ember-cli-deprecation-workflow * Add deprecation workflow configuration This will silence all deprecations by default reducing noise in the test output significantly. We can tackle deprecations now one by one but won't have to deal with very verbose console logs anymore. --- .../consul-ui/config/deprecation-workflow.js | 19 +++++++++++ ui/packages/consul-ui/package.json | 3 +- ui/yarn.lock | 34 ++++++++++++++++++- 3 files changed, 54 insertions(+), 2 deletions(-) create mode 100644 ui/packages/consul-ui/config/deprecation-workflow.js diff --git a/ui/packages/consul-ui/config/deprecation-workflow.js b/ui/packages/consul-ui/config/deprecation-workflow.js new file mode 100644 index 000000000..dc412eaec --- /dev/null +++ b/ui/packages/consul-ui/config/deprecation-workflow.js @@ -0,0 +1,19 @@ +/* eslint-disable no-undef */ +self.deprecationWorkflow = self.deprecationWorkflow || {}; +self.deprecationWorkflow.config = { + workflow: [ + { handler: 'silence', matchId: 'ember-cli-page-object.string-properties-on-definition' }, + { handler: 'silence', matchId: 'ember-sinon-qunit.test' }, + { handler: 'silence', matchId: 'ember-qunit.deprecate-legacy-apis' }, + { handler: 'silence', matchId: 'ember-can.can-service' }, + { handler: 'silence', matchId: 'ember-data:model.toJSON' }, + { handler: 'silence', matchId: 'ember-cli-page-object.is-property' }, + { handler: 'silence', matchId: 'ember-views.partial' }, + { handler: 'silence', matchId: 'ember-component.send-action' }, + { handler: 'silence', matchId: 'ember-cli-page-object.multiple' }, + { handler: 'silence', matchId: 'computed-property.override' }, + { handler: 'silence', matchId: 'autotracking.mutation-after-consumption' }, + { handler: 'silence', matchId: 'ember-data:legacy-test-helper-support' }, + { handler: 'silence', matchId: 'ember-data:Model.data' }, + ], +}; diff --git a/ui/packages/consul-ui/package.json b/ui/packages/consul-ui/package.json index e03887cab..7960af1a7 100644 --- a/ui/packages/consul-ui/package.json +++ b/ui/packages/consul-ui/package.json @@ -82,8 +82,8 @@ "consul-hcp": "*", "consul-lock-sessions": "*", "consul-nspaces": "*", - "consul-peerings": "*", "consul-partitions": "*", + "consul-peerings": "*", "css.escape": "^1.5.1", "d3-array": "^2.8.0", "d3-scale": "^3.2.3", @@ -103,6 +103,7 @@ "ember-cli-babel": "^7.17.2", "ember-cli-code-coverage": "^1.0.0-beta.4", "ember-cli-dependency-checker": "^3.2.0", + "ember-cli-deprecation-workflow": "^2.1.0", "ember-cli-flash": "^2.1.1", "ember-cli-htmlbars": "^5.2.0", "ember-cli-inject-live-reload": "^2.0.2", diff --git a/ui/yarn.lock b/ui/yarn.lock index 39b3974a4..1f3f24f35 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -4657,7 +4657,7 @@ broccoli-plugin@^4.0.0, broccoli-plugin@^4.0.1, broccoli-plugin@^4.0.2, broccoli rimraf "^3.0.2" symlink-or-copy "^1.3.1" -broccoli-plugin@^4.0.7: +broccoli-plugin@^4.0.5, broccoli-plugin@^4.0.7: version "4.0.7" resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-4.0.7.tgz#dd176a85efe915ed557d913744b181abe05047db" integrity sha512-a4zUsWtA1uns1K7p9rExYVYG99rdKeGRymW0qOCNkvDPHQxVi3yVyJHhQbM3EZwdt2E0mnhr5e0c/bPpJ7p3Wg== @@ -6549,6 +6549,16 @@ ember-cli-dependency-checker@^3.2.0: resolve "^1.5.0" semver "^5.3.0" +ember-cli-deprecation-workflow@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/ember-cli-deprecation-workflow/-/ember-cli-deprecation-workflow-2.1.0.tgz#f0d38ece7ac0ab7b3f83790a3a092e3472f58cff" + integrity sha512-Ay9P9iKMJdY4Gq5XPowh3HqqeAzLfwBRj1oB1ZKkDW1fryZQWBN4pZuRnjnB+3VWZjBnZif5e7Pacc7YNW9hWg== + dependencies: + broccoli-funnel "^3.0.3" + broccoli-merge-trees "^4.2.0" + broccoli-plugin "^4.0.5" + ember-cli-htmlbars "^5.3.2" + ember-cli-flash@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/ember-cli-flash/-/ember-cli-flash-2.1.1.tgz#a1547676d20a8d6f5d0f523228d3e7c8a76486ee" @@ -6626,6 +6636,28 @@ ember-cli-htmlbars@^5.0.0, ember-cli-htmlbars@^5.1.0, ember-cli-htmlbars@^5.1.2, strip-bom "^4.0.0" walk-sync "^2.2.0" +ember-cli-htmlbars@^5.3.2: + version "5.7.2" + resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-5.7.2.tgz#e0cd2fb3c20d85fe4c3e228e6f0590ee1c645ba8" + integrity sha512-Uj6R+3TtBV5RZoJY14oZn/sNPnc+UgmC8nb5rI4P3fR/gYoyTFIZSXiIM7zl++IpMoIrocxOrgt+mhonKphgGg== + dependencies: + "@ember/edition-utils" "^1.2.0" + babel-plugin-htmlbars-inline-precompile "^5.0.0" + broccoli-debug "^0.6.5" + broccoli-persistent-filter "^3.1.2" + broccoli-plugin "^4.0.3" + common-tags "^1.8.0" + ember-cli-babel-plugin-helpers "^1.1.1" + ember-cli-version-checker "^5.1.2" + fs-tree-diff "^2.0.1" + hash-for-dep "^1.5.1" + heimdalljs-logger "^0.1.10" + json-stable-stringify "^1.0.1" + semver "^7.3.4" + silent-error "^1.1.1" + strip-bom "^4.0.0" + walk-sync "^2.2.0" + ember-cli-htmlbars@^6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-6.0.0.tgz#9d0b67c0f107467b6c8ecdc47d64e877489841bf" From f41a754cbe85eba40ee85701d29e42e3afde0cb5 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Tue, 26 Jul 2022 15:36:49 +0100 Subject: [PATCH 097/107] ui: Add peering establishment to the peer listing page (#13813) * ui: Add peering establishment to the peer listing page * Remove this.form.reset --- .../app/components/consul/peer/list/index.hbs | 18 ++ .../app/templates/dc/peers/index.hbs | 181 +++++++++++++----- .../app/components/buttons/index.scss | 2 +- .../app/styles/routes/dc/acls/index.scss | 3 + 4 files changed, 156 insertions(+), 48 deletions(-) diff --git a/ui/packages/consul-peerings/app/components/consul/peer/list/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/list/index.hbs index 30272119e..3282b987c 100644 --- a/ui/packages/consul-peerings/app/components/consul/peer/list/index.hbs +++ b/ui/packages/consul-peerings/app/components/consul/peer/list/index.hbs @@ -42,6 +42,24 @@ as |item index|> {{#if (can 'delete peer' item=item)}} + + + View + + +{{#if (can "write peer" item=item)}} + + + Regenerate token + + +{{/if}} +

      - +

      + {{#if (gt items.length 0)}} {{/if}} + + + + + + + {{did-insert (set this 'create' modal)}} +

      + Add peer connection +

      +
      + + + {{#if modal.opened}} + + + {{did-insert (set this 'form' form)}} + + + + {{/if}} + + + + + +
      + + Add peer connection + +
      + + - - + + + + {{did-insert (set this 'regenerate' modal)}} +

      + Regenerate token +

      +
      + + {{#if this.item}} + + {{did-insert (set this 'regenerateForm' form)}} + + + {{/if}} + + + + +
      + + + - {{!-- TODO: do we need to check permissions here or will we receive an error automatically? --}} - - -

      + {{!-- TODO: do we need to check permissions here or will we receive an error automatically? --}} + + +

      + {{#if (gt items.length 0)}} + No peers found + {{else}} + Welcome to Peers + {{/if}} +

      +
      + {{#if (gt items.length 0)}} - No peers found +

      No peers where found matching that search, or you may not have access to view the peers you are searching for.

      {{else}} - Welcome to Peers +

      + Cluster peering is the recommended way to connect services across or within Consul datacenters. Peering is a one-to-one relationship in which each peer is either a open-source Consul datacenter or a Consul enterprise admin partition. There don't seem to be any peers for this {{if (can "use partitions") "admin partition" "datacenter"}}, or you may not have the peering:read permissions to access this view. +

      {{/if}} -

      -
      - - {{#if (gt items.length 0)}} -

      No peers where found matching that search, or you may not have access to view the peers you are searching for.

      - {{else}} -

      - Cluster peering is the recommended way to connect services across or within Consul datacenters. Peering is a one-to-one relationship in which each peer is either a open-source Consul datacenter or a Consul enterprise admin partition. There don't seem to be any peers for this {{if (can "use partitions") "admin partition" "datacenter"}}, or you may not have the peering:read permissions to access this view. -

      - {{/if}} -
      - - - - -
      -
      -
      +
      + + + + + + +
      diff --git a/ui/packages/consul-ui/app/components/buttons/index.scss b/ui/packages/consul-ui/app/components/buttons/index.scss index 6bf35de27..f20214400 100644 --- a/ui/packages/consul-ui/app/components/buttons/index.scss +++ b/ui/packages/consul-ui/app/components/buttons/index.scss @@ -2,13 +2,13 @@ @import './layout'; button[type='submit'], button.type-submit, +button.type-create, a.type-create { @extend %primary-button; } // TODO: Once we move action-groups to use aria menu we can get rid of // some of this and just use not(aria-haspopup) button[type='reset'], -header .actions button[type='button']:not(.copy-btn), button.type-cancel { @extend %secondary-button; } diff --git a/ui/packages/consul-ui/app/styles/routes/dc/acls/index.scss b/ui/packages/consul-ui/app/styles/routes/dc/acls/index.scss index 54ac890e6..3600be20c 100644 --- a/ui/packages/consul-ui/app/styles/routes/dc/acls/index.scss +++ b/ui/packages/consul-ui/app/styles/routes/dc/acls/index.scss @@ -8,6 +8,9 @@ html[data-route^='dc.acls.index'] .filter-bar { html[data-route^='dc.acls.index'] .filter-bar [role='radiogroup'] { @extend %expanded-single-select; } +html[data-route^='dc.acls.tokens.edit'] header .actions button { + @extend %secondary-button; +} @media #{$--lt-wide-form} { html[data-route^='dc.acls.create'] main header .actions, From 4e5190245e267fe85e0a3ad6ce69c52194fddb5b Mon Sep 17 00:00:00 2001 From: John Cowen Date: Tue, 26 Jul 2022 17:29:37 +0100 Subject: [PATCH 098/107] ui: Make peered intentions read-only (#13814) * ui: Make peered intentions read-only * Replace "" to undefined for SourcePeer so its the same as PeerName * Fixup copypasta * Ensure tests run with no peers --- ui/packages/consul-ui/app/abilities/intention.js | 7 ++++++- .../components/consul/intention/list/table/index.hbs | 12 ++++++++++-- ui/packages/consul-ui/app/models/intention.js | 4 ++-- .../tests/acceptance/dc/intentions/delete.feature | 1 + .../acceptance/dc/intentions/navigation.feature | 1 + .../dc/services/show/intentions/index.feature | 5 ++++- 6 files changed, 24 insertions(+), 6 deletions(-) diff --git a/ui/packages/consul-ui/app/abilities/intention.js b/ui/packages/consul-ui/app/abilities/intention.js index 91f3a013b..a9435f975 100644 --- a/ui/packages/consul-ui/app/abilities/intention.js +++ b/ui/packages/consul-ui/app/abilities/intention.js @@ -4,7 +4,12 @@ export default class IntentionAbility extends BaseAbility { resource = 'intention'; get canWrite() { - return super.canWrite && (typeof this.item === 'undefined' || !this.canViewCRD); + // Peered intentions aren't writable + if(typeof this.item !== 'undefined' && typeof this.item.SourcePeer !== 'undefined') { + return false; + } + return super.canWrite && + (typeof this.item === 'undefined' || !this.canViewCRD); } get canViewCRD() { return (typeof this.item !== 'undefined' && this.item.IsManagedByCRD); diff --git a/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs b/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs index 03e47c17e..e8190f483 100644 --- a/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs @@ -71,7 +71,6 @@ as |item index|> {{/if}} -{{#if (and (or (can "write intention" item=item) (can "view CRD intention" item=item)) (not-eq item.Meta.external-source 'consul-api-gateway'))}}
      + {{else}} +
    • + + View + +
    • {{/if}}
      -{{/if}} diff --git a/ui/packages/consul-ui/app/models/intention.js b/ui/packages/consul-ui/app/models/intention.js index b074425ea..f7fa85a47 100644 --- a/ui/packages/consul-ui/app/models/intention.js +++ b/ui/packages/consul-ui/app/models/intention.js @@ -1,7 +1,7 @@ import Model, { attr } from '@ember-data/model'; import { computed } from '@ember/object'; import { fragmentArray } from 'ember-data-model-fragments/attributes'; -import { nullValue } from 'consul-ui/decorators/replace'; +import replace, { nullValue } from 'consul-ui/decorators/replace'; export const PRIMARY_KEY = 'uid'; export const SLUG_KEY = 'ID'; @@ -13,7 +13,7 @@ export default class Intention extends Model { @attr('string') Datacenter; @attr('string') Description; - @attr('string') SourcePeer; + @replace('', undefined) @attr('string') SourcePeer; @attr('string', { defaultValue: () => '*' }) SourceName; @attr('string', { defaultValue: () => '*' }) DestinationName; @attr('string', { defaultValue: () => 'default' }) SourceNS; diff --git a/ui/packages/consul-ui/tests/acceptance/dc/intentions/delete.feature b/ui/packages/consul-ui/tests/acceptance/dc/intentions/delete.feature index 52ca40690..85424e943 100644 --- a/ui/packages/consul-ui/tests/acceptance/dc/intentions/delete.feature +++ b/ui/packages/consul-ui/tests/acceptance/dc/intentions/delete.feature @@ -4,6 +4,7 @@ Feature: dc / intentions / deleting: Deleting items with confirmations, success Given 1 datacenter model with the value "datacenter" And 1 intention model from yaml --- + SourcePeer: "" SourceName: name DestinationName: destination SourceNS: default diff --git a/ui/packages/consul-ui/tests/acceptance/dc/intentions/navigation.feature b/ui/packages/consul-ui/tests/acceptance/dc/intentions/navigation.feature index 4b1bd2318..ff35ca22a 100644 --- a/ui/packages/consul-ui/tests/acceptance/dc/intentions/navigation.feature +++ b/ui/packages/consul-ui/tests/acceptance/dc/intentions/navigation.feature @@ -7,6 +7,7 @@ Feature: dc / intentions / navigation - ID: 755b72bd-f5ab-4c92-90cc-bed0e7d8e9f0 Action: allow Meta: ~ + SourcePeer: "" - ID: 755b72bd-f5ab-4c92-90cc-bed0e7d8e9f1 Action: deny Meta: ~ diff --git a/ui/packages/consul-ui/tests/acceptance/dc/services/show/intentions/index.feature b/ui/packages/consul-ui/tests/acceptance/dc/services/show/intentions/index.feature index 05521e23a..16e0b2bc6 100644 --- a/ui/packages/consul-ui/tests/acceptance/dc/services/show/intentions/index.feature +++ b/ui/packages/consul-ui/tests/acceptance/dc/services/show/intentions/index.feature @@ -21,13 +21,16 @@ Feature: dc / services / show / intentions / index: Intentions per service DestinationNS: default SourcePartition: default DestinationPartition: default + SourcePeer: "" - ID: 755b72bd-f5ab-4c92-90cc-bed0e7d8e9f1 Action: deny Meta: ~ + SourcePeer: "" - ID: 0755b72bd-f5ab-4c92-90cc-bed0e7d8e9f2 Action: deny Meta: ~ + SourcePeer: "" --- When I visit the service page for yaml --- @@ -41,7 +44,7 @@ Feature: dc / services / show / intentions / index: Intentions per service Scenario: I can see intentions And I see 3 intention models on the intentionList component And I click intention on the intentionList.intentions component - Then the url should be /dc1/services/service-0/intentions/peer:billing:default:name:default:default:destination + Then the url should be /dc1/services/service-0/intentions/default:default:name:default:default:destination Scenario: I can delete intentions And I click actions on the intentionList.intentions component And I click delete on the intentionList.intentions component From ae04e2f048eb0d1efe32fecb2b6fda37a463dda1 Mon Sep 17 00:00:00 2001 From: cskh Date: Tue, 26 Jul 2022 16:54:53 -0400 Subject: [PATCH 099/107] chore: removed unused method AddService (#13905) - This AddService is not used anywhere. AddServiceWithChecks is place of AddService - Test code is updated --- agent/acl_test.go | 16 ++--- agent/agent_endpoint_test.go | 18 ++--- agent/local/state.go | 13 +--- agent/local/state_test.go | 70 +++++++++---------- .../catalog/config_source_test.go | 2 +- agent/proxycfg-sources/local/sync_test.go | 8 +-- agent/user_event_test.go | 4 +- 7 files changed, 62 insertions(+), 69 deletions(-) diff --git a/agent/acl_test.go b/agent/acl_test.go index 2e8664c9f..79cc5f7b7 100644 --- a/agent/acl_test.go +++ b/agent/acl_test.go @@ -274,10 +274,10 @@ func TestACL_vetServiceRegister(t *testing.T) { // Try to register over a service without write privs to the existing // service. - a.State.AddService(&structs.NodeService{ + a.State.AddServiceWithChecks(&structs.NodeService{ ID: "my-service", Service: "other", - }, "") + }, nil, "") err = a.vetServiceRegister(serviceRWSecret, &structs.NodeService{ ID: "my-service", Service: "service", @@ -304,10 +304,10 @@ func TestACL_vetServiceUpdateWithAuthorizer(t *testing.T) { require.Contains(t, err.Error(), "Unknown service") // Update with write privs. - a.State.AddService(&structs.NodeService{ + a.State.AddServiceWithChecks(&structs.NodeService{ ID: "my-service", Service: "service", - }, "") + }, nil, "") err = vetServiceUpdate(serviceRWSecret, structs.NewServiceID("my-service", nil)) require.NoError(t, err) @@ -361,10 +361,10 @@ func TestACL_vetCheckRegisterWithAuthorizer(t *testing.T) { // Try to register over a service check without write privs to the // existing service. - a.State.AddService(&structs.NodeService{ + a.State.AddServiceWithChecks(&structs.NodeService{ ID: "my-service", Service: "service", - }, "") + }, nil, "") a.State.AddCheck(&structs.HealthCheck{ CheckID: types.CheckID("my-check"), ServiceID: "my-service", @@ -410,10 +410,10 @@ func TestACL_vetCheckUpdateWithAuthorizer(t *testing.T) { require.Contains(t, err.Error(), "Unknown check") // Update service check with write privs. - a.State.AddService(&structs.NodeService{ + a.State.AddServiceWithChecks(&structs.NodeService{ ID: "my-service", Service: "service", - }, "") + }, nil, "") a.State.AddCheck(&structs.HealthCheck{ CheckID: types.CheckID("my-service-check"), ServiceID: "my-service", diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 7bde62387..270cc7dc1 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -93,7 +93,7 @@ func TestAgent_Services(t *testing.T) { }, Port: 5000, } - require.NoError(t, a.State.AddService(srv1, "")) + require.NoError(t, a.State.AddServiceWithChecks(srv1, nil, "")) req, _ := http.NewRequest("GET", "/v1/agent/services", nil) resp := httptest.NewRecorder() @@ -128,7 +128,7 @@ func TestAgent_ServicesFiltered(t *testing.T) { }, Port: 5000, } - require.NoError(t, a.State.AddService(srv1, "")) + require.NoError(t, a.State.AddServiceWithChecks(srv1, nil, "")) // Add another service srv2 := &structs.NodeService{ @@ -140,7 +140,7 @@ func TestAgent_ServicesFiltered(t *testing.T) { }, Port: 1234, } - require.NoError(t, a.State.AddService(srv2, "")) + require.NoError(t, a.State.AddServiceWithChecks(srv2, nil, "")) req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape("foo in Meta"), nil) resp := httptest.NewRecorder() @@ -188,7 +188,7 @@ func TestAgent_Services_ExternalConnectProxy(t *testing.T) { Upstreams: structs.TestUpstreams(t), }, } - a.State.AddService(srv1, "") + a.State.AddServiceWithChecks(srv1, nil, "") req, _ := http.NewRequest("GET", "/v1/agent/services", nil) resp := httptest.NewRecorder() @@ -232,7 +232,7 @@ func TestAgent_Services_Sidecar(t *testing.T) { }, }, } - a.State.AddService(srv1, "") + a.State.AddServiceWithChecks(srv1, nil, "") req, _ := http.NewRequest("GET", "/v1/agent/services", nil) resp := httptest.NewRecorder() @@ -281,7 +281,7 @@ func TestAgent_Services_MeshGateway(t *testing.T) { }, }, } - a.State.AddService(srv1, "") + a.State.AddServiceWithChecks(srv1, nil, "") req, _ := http.NewRequest("GET", "/v1/agent/services", nil) resp := httptest.NewRecorder() @@ -325,7 +325,7 @@ func TestAgent_Services_TerminatingGateway(t *testing.T) { }, }, } - require.NoError(t, a.State.AddService(srv1, "")) + require.NoError(t, a.State.AddServiceWithChecks(srv1, nil, "")) req, _ := http.NewRequest("GET", "/v1/agent/services", nil) resp := httptest.NewRecorder() @@ -370,7 +370,7 @@ func TestAgent_Services_ACLFilter(t *testing.T) { }, } for _, s := range services { - a.State.AddService(s, "") + a.State.AddServiceWithChecks(s, nil, "") } t.Run("no token", func(t *testing.T) { @@ -7994,7 +7994,7 @@ func TestAgent_Services_ExposeConfig(t *testing.T) { }, }, } - a.State.AddService(srv1, "") + a.State.AddServiceWithChecks(srv1, nil, "") req, _ := http.NewRequest("GET", "/v1/agent/services", nil) resp := httptest.NewRecorder() diff --git a/agent/local/state.go b/agent/local/state.go index 74641a068..7909982db 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -256,15 +256,6 @@ func (l *State) aclTokenForServiceSync(id structs.ServiceID, fallback func() str return fallback() } -// AddService is used to add a service entry to the local state. -// This entry is persistent and the agent will make a best effort to -// ensure it is registered -func (l *State) AddService(service *structs.NodeService, token string) error { - l.Lock() - defer l.Unlock() - return l.addServiceLocked(service, token) -} - func (l *State) addServiceLocked(service *structs.NodeService, token string) error { if service == nil { return fmt.Errorf("no service") @@ -293,7 +284,9 @@ func (l *State) addServiceLocked(service *structs.NodeService, token string) err return nil } -// AddServiceWithChecks adds a service and its check tp the local state atomically +// AddServiceWithChecks adds a service entry and its checks to the local state atomically +// This entry is persistent and the agent will make a best effort to +// ensure it is registered func (l *State) AddServiceWithChecks(service *structs.NodeService, checks []*structs.HealthCheck, token string) error { l.Lock() defer l.Unlock() diff --git a/agent/local/state_test.go b/agent/local/state_test.go index 686c86a93..7aa539ea0 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -64,7 +64,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } assert.False(t, a.State.ServiceExists(structs.ServiceID{ID: srv1.ID})) - a.State.AddService(srv1, "") + a.State.AddServiceWithChecks(srv1, nil, "") assert.True(t, a.State.ServiceExists(structs.ServiceID{ID: srv1.ID})) args.Service = srv1 if err := a.RPC("Catalog.Register", args, &out); err != nil { @@ -83,7 +83,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv2, "") + a.State.AddServiceWithChecks(srv2, nil, "") srv2_mod := new(structs.NodeService) *srv2_mod = *srv2 @@ -105,7 +105,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv3, "") + a.State.AddServiceWithChecks(srv3, nil, "") // Exists remote (delete) srv4 := &structs.NodeService{ @@ -137,7 +137,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv5, "") + a.State.AddServiceWithChecks(srv5, nil, "") srv5_mod := new(structs.NodeService) *srv5_mod = *srv5 @@ -290,7 +290,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv1, "") + a.State.AddServiceWithChecks(srv1, nil, "") require.NoError(t, a.RPC("Catalog.Register", &structs.RegisterRequest{ Datacenter: "dc1", Node: a.Config.NodeName, @@ -311,7 +311,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv2, "") + a.State.AddServiceWithChecks(srv2, nil, "") srv2_mod := clone(srv2) srv2_mod.Port = 9000 @@ -335,7 +335,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv3, "") + a.State.AddServiceWithChecks(srv3, nil, "") // Exists remote (delete) srv4 := &structs.NodeService{ @@ -496,7 +496,7 @@ func TestAgent_ServiceWatchCh(t *testing.T) { Tags: []string{"tag1"}, Port: 6100, } - require.NoError(t, a.State.AddService(srv1, "")) + require.NoError(t, a.State.AddServiceWithChecks(srv1, nil, "")) verifyState := func(ss *local.ServiceState) { require.NotNil(t, ss) @@ -518,7 +518,7 @@ func TestAgent_ServiceWatchCh(t *testing.T) { go func() { srv2 := srv1 srv2.Port = 6200 - require.NoError(t, a.State.AddService(srv2, "")) + require.NoError(t, a.State.AddServiceWithChecks(srv2, nil, "")) }() // We should observe WatchCh close @@ -595,7 +595,7 @@ func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv1, "") + a.State.AddServiceWithChecks(srv1, nil, "") // register a local service with tag override disabled srv2 := &structs.NodeService{ @@ -610,7 +610,7 @@ func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv2, "") + a.State.AddServiceWithChecks(srv2, nil, "") // make sure they are both in the catalog if err := a.State.SyncChanges(); err != nil { @@ -722,7 +722,7 @@ func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) { Tags: []string{"primary"}, Port: 5000, } - a.State.AddService(srv, "") + a.State.AddServiceWithChecks(srv, nil, "") chk := &structs.HealthCheck{ Node: a.Config.NodeName, @@ -772,7 +772,7 @@ func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) { Tags: []string{"primary"}, Port: 5000, } - a.State.AddService(srv, "") + a.State.AddServiceWithChecks(srv, nil, "") chk1 := &structs.HealthCheck{ Node: a.Config.NodeName, @@ -873,7 +873,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv1, token) + a.State.AddServiceWithChecks(srv1, nil, token) // Create service (allowed) srv2 := &structs.NodeService{ @@ -887,7 +887,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv2, token) + a.State.AddServiceWithChecks(srv2, nil, token) if err := a.State.SyncFull(); err != nil { t.Fatalf("err: %v", err) @@ -1332,7 +1332,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv1, "root") + a.State.AddServiceWithChecks(srv1, nil, "root") srv2 := &structs.NodeService{ ID: "api", Service: "api", @@ -1344,7 +1344,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } - a.State.AddService(srv2, "root") + a.State.AddServiceWithChecks(srv2, nil, "root") if err := a.State.SyncFull(); err != nil { t.Fatalf("err: %v", err) @@ -1861,14 +1861,14 @@ func TestState_ServiceTokens(t *testing.T) { }) t.Run("empty string when there is no token", func(t *testing.T) { - err := l.AddService(&structs.NodeService{ID: "redis"}, "") + err := l.AddServiceWithChecks(&structs.NodeService{ID: "redis"}, nil, "") require.NoError(t, err) require.Equal(t, "", l.ServiceToken(id)) }) t.Run("returns configured token", func(t *testing.T) { - err := l.AddService(&structs.NodeService{ID: "redis"}, "abc123") + err := l.AddServiceWithChecks(&structs.NodeService{ID: "redis"}, nil, "abc123") require.NoError(t, err) require.Equal(t, "abc123", l.ServiceToken(id)) @@ -1931,7 +1931,7 @@ func TestAgent_CheckCriticalTime(t *testing.T) { l.TriggerSyncChanges = func() {} svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000} - l.AddService(svc, "") + l.AddServiceWithChecks(svc, nil, "") // Add a passing check and make sure it's not critical. checkID := types.CheckID("redis:1") @@ -2017,8 +2017,8 @@ func TestAgent_AliasCheck(t *testing.T) { l.TriggerSyncChanges = func() {} // Add checks - require.NoError(t, l.AddService(&structs.NodeService{Service: "s1"}, "")) - require.NoError(t, l.AddService(&structs.NodeService{Service: "s2"}, "")) + require.NoError(t, l.AddServiceWithChecks(&structs.NodeService{Service: "s1"}, nil, "")) + require.NoError(t, l.AddServiceWithChecks(&structs.NodeService{Service: "s2"}, nil, "")) require.NoError(t, l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("c1"), ServiceID: "s1"}, "")) require.NoError(t, l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("c2"), ServiceID: "s2"}, "")) @@ -2071,7 +2071,7 @@ func TestAgent_AliasCheck_ServiceNotification(t *testing.T) { require.NoError(t, l.AddAliasCheck(structs.NewCheckID(types.CheckID("a1"), nil), structs.NewServiceID("s1", nil), notifyCh)) // Add aliased service, s1, and verify we get notified - require.NoError(t, l.AddService(&structs.NodeService{Service: "s1"}, "")) + require.NoError(t, l.AddServiceWithChecks(&structs.NodeService{Service: "s1"}, nil, "")) select { case <-notifyCh: default: @@ -2079,7 +2079,7 @@ func TestAgent_AliasCheck_ServiceNotification(t *testing.T) { } // Re-adding same service should not lead to a notification - require.NoError(t, l.AddService(&structs.NodeService{Service: "s1"}, "")) + require.NoError(t, l.AddServiceWithChecks(&structs.NodeService{Service: "s1"}, nil, "")) select { case <-notifyCh: t.Fatal("notify received") @@ -2087,7 +2087,7 @@ func TestAgent_AliasCheck_ServiceNotification(t *testing.T) { } // Add different service and verify we do not get notified - require.NoError(t, l.AddService(&structs.NodeService{Service: "s2"}, "")) + require.NoError(t, l.AddServiceWithChecks(&structs.NodeService{Service: "s2"}, nil, "")) select { case <-notifyCh: t.Fatal("notify received") @@ -2189,10 +2189,10 @@ func TestState_RemoveServiceErrorMessages(t *testing.T) { state.TriggerSyncChanges = func() {} // Add 1 service - err := state.AddService(&structs.NodeService{ + err := state.AddServiceWithChecks(&structs.NodeService{ ID: "web-id", Service: "web-name", - }, "") + }, nil, "") require.NoError(t, err) // Attempt to remove service that doesn't exist @@ -2230,9 +2230,9 @@ func TestState_Notify(t *testing.T) { drainCh(notifyCh) // Add a service - err := state.AddService(&structs.NodeService{ + err := state.AddServiceWithChecks(&structs.NodeService{ Service: "web", - }, "fake-token-web") + }, nil, "fake-token-web") require.NoError(t, err) // Should have a notification @@ -2240,10 +2240,10 @@ func TestState_Notify(t *testing.T) { drainCh(notifyCh) // Re-Add same service - err = state.AddService(&structs.NodeService{ + err = state.AddServiceWithChecks(&structs.NodeService{ Service: "web", Port: 4444, - }, "fake-token-web") + }, nil, "fake-token-web") require.NoError(t, err) // Should have a notification @@ -2261,9 +2261,9 @@ func TestState_Notify(t *testing.T) { state.StopNotify(notifyCh) // Add a service - err = state.AddService(&structs.NodeService{ + err = state.AddServiceWithChecks(&structs.NodeService{ Service: "web", - }, "fake-token-web") + }, nil, "fake-token-web") require.NoError(t, err) // Should NOT have a notification @@ -2293,7 +2293,7 @@ func TestAliasNotifications_local(t *testing.T) { Address: "127.0.0.10", Port: 8080, } - a.State.AddService(srv, "") + a.State.AddServiceWithChecks(srv, nil, "") scID := "socat-sidecar-proxy" sc := &structs.NodeService{ @@ -2303,7 +2303,7 @@ func TestAliasNotifications_local(t *testing.T) { Address: "127.0.0.10", Port: 9090, } - a.State.AddService(sc, "") + a.State.AddServiceWithChecks(sc, nil, "") tcpID := types.CheckID("service:socat-tcp") chk0 := &structs.HealthCheck{ diff --git a/agent/proxycfg-sources/catalog/config_source_test.go b/agent/proxycfg-sources/catalog/config_source_test.go index dffb0c2e5..4df59a7d3 100644 --- a/agent/proxycfg-sources/catalog/config_source_test.go +++ b/agent/proxycfg-sources/catalog/config_source_test.go @@ -116,7 +116,7 @@ func TestConfigSource_LocallyManagedService(t *testing.T) { token := "token" localState := testLocalState(t) - localState.AddService(&structs.NodeService{ID: serviceID.ID}, "") + localState.AddServiceWithChecks(&structs.NodeService{ID: serviceID.ID}, nil, "") localWatcher := NewMockWatcher(t) localWatcher.On("Watch", serviceID, nodeName, token). diff --git a/agent/proxycfg-sources/local/sync_test.go b/agent/proxycfg-sources/local/sync_test.go index b73c0e3b3..62b4e8db7 100644 --- a/agent/proxycfg-sources/local/sync_test.go +++ b/agent/proxycfg-sources/local/sync_test.go @@ -29,10 +29,10 @@ func TestSync(t *testing.T) { state := local.NewState(local.Config{}, hclog.NewNullLogger(), tokens) state.TriggerSyncChanges = func() {} - state.AddService(&structs.NodeService{ + state.AddServiceWithChecks(&structs.NodeService{ ID: serviceID, Kind: structs.ServiceKindConnectProxy, - }, serviceToken) + }, nil, serviceToken) cfgMgr := NewMockConfigManager(t) @@ -96,10 +96,10 @@ func TestSync(t *testing.T) { Return([]proxycfg.ProxyID{}). Maybe() - state.AddService(&structs.NodeService{ + state.AddServiceWithChecks(&structs.NodeService{ ID: serviceID, Kind: structs.ServiceKindConnectProxy, - }, "") + }, nil, "") select { case reg := <-registerCh: diff --git a/agent/user_event_test.go b/agent/user_event_test.go index 3f391ba2f..8cae94dde 100644 --- a/agent/user_event_test.go +++ b/agent/user_event_test.go @@ -64,7 +64,7 @@ func TestShouldProcessUserEvent(t *testing.T) { Tags: []string{"test", "foo", "bar", "primary"}, Port: 5000, } - a.State.AddService(srv1, "") + a.State.AddServiceWithChecks(srv1, nil, "") p := &UserEvent{} if !a.shouldProcessUserEvent(p) { @@ -172,7 +172,7 @@ func TestFireReceiveEvent(t *testing.T) { Tags: []string{"test", "foo", "bar", "primary"}, Port: 5000, } - a.State.AddService(srv1, "") + a.State.AddServiceWithChecks(srv1, nil, "") p1 := &UserEvent{Name: "deploy", ServiceFilter: "web"} err := a.UserEvent("dc1", "root", p1) From 9bd8c0cd6d955644b51ac6f5cfc0946b50a206c3 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Tue, 26 Jul 2022 17:54:51 -0600 Subject: [PATCH 100/107] docs: update helm reference docs (#13910) --- website/content/docs/k8s/helm.mdx | 37 +++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index 83d6d73bf..14ffbb6dd 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -62,6 +62,7 @@ Use these links to navigate to a particular top-level stanza. Consul into Kubernetes will have, e.g. `service-name.service.consul`. - `peering` ((#v-global-peering)) - [Experimental] Configures the Cluster Peering feature. Requires Consul v1.13+ and Consul-K8s v0.45+. + - `enabled` ((#v-global-peering-enabled)) (`boolean: false`) - If true, the Helm chart enables Cluster Peering for the cluster. This option enables peering controllers and allows use of the PeeringAcceptor and PeeringDialer CRDs for establishing service mesh peerings. @@ -486,7 +487,8 @@ Use these links to navigate to a particular top-level stanza. `-federation` (if setting `global.name`), otherwise `-consul-federation`. - - `primaryDatacenter` ((#v-global-federation-primarydatacenter)) (`string: null`) - The name of the primary datacenter. + - `primaryDatacenter` ((#v-global-federation-primarydatacenter)) (`string: null`) - The name of the primary datacenter. This should only be set for datacenters + that are not the primary datacenter. - `primaryGateways` ((#v-global-federation-primarygateways)) (`array: []`) - A list of addresses of the primary mesh gateways in the form `:`. (e.g. ["1.1.1.1:443", "2.3.4.5:443"] @@ -663,7 +665,7 @@ Use these links to navigate to a particular top-level stanza. - `resources` ((#v-server-resources)) (`map`) - The resource requests (CPU, memory, etc.) for each of the server agents. This should be a YAML map corresponding to a Kubernetes - ResourceRequirements (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#resourcerequirements-v1-core) + ResourceRequirements (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#resourcerequirements-v1-core) object. NOTE: The use of a YAML string is deprecated. Example: @@ -1475,6 +1477,15 @@ Use these links to navigate to a particular top-level stanza. anotherLabelKey: another-label-value ``` + - `annotations` ((#v-synccatalog-annotations)) (`string: null`) - This value defines additional annotations for + the catalog sync pods. This should be formatted as a multi-line string. + + ```yaml + annotations: | + "sample/annotation1": "foo" + "sample/annotation2": "bar" + ``` + ### connectInject ((#h-connectinject)) - `connectInject` ((#v-connectinject)) - Configures the automatic Connect sidecar injector. @@ -1506,6 +1517,19 @@ Use these links to navigate to a particular top-level stanza. This value is also overridable via the "consul.hashicorp.com/transparent-proxy-overwrite-probes" annotation. Note: This value has no effect if transparent proxy is disabled on the pod. + - `disruptionBudget` ((#v-connectinject-disruptionbudget)) - This configures the PodDisruptionBudget (https://kubernetes.io/docs/tasks/run-application/configure-pdb/) + for the service mesh sidecar injector. + + - `enabled` ((#v-connectinject-disruptionbudget-enabled)) (`boolean: true`) - This will enable/disable registering a PodDisruptionBudget for the + service mesh sidecar injector. If this is enabled, it will only register the budget so long as + the service mesh is enabled. + + - `maxUnavailable` ((#v-connectinject-disruptionbudget-maxunavailable)) (`integer: null`) - The maximum number of unavailable pods. By default, this will be + automatically computed based on the `connectInject.replicas` value to be `(n/2)-1`. + If you need to set this to `0`, you will need to add a + --set 'connectInject.disruptionBudget.maxUnavailable=0'` flag to the helm chart installation + command because of a limitation in the Helm templating language. + - `metrics` ((#v-connectinject-metrics)) - Configures metrics for Consul Connect services. All values are overridable via annotations on a per-pod basis. @@ -1549,6 +1573,15 @@ Use these links to navigate to a particular top-level stanza. - `priorityClassName` ((#v-connectinject-priorityclassname)) (`string: ""`) - Optional priorityClassName. + - `annotations` ((#v-connectinject-annotations)) (`string: null`) - This value defines additional annotations for + connect inject pods. This should be formatted as a multi-line string. + + ```yaml + annotations: | + "sample/annotation1": "foo" + "sample/annotation2": "bar" + ``` + - `imageConsul` ((#v-connectinject-imageconsul)) (`string: null`) - The Docker image for Consul to use when performing Connect injection. Defaults to global.image. From d4a0dde9fd5e595fd72433b3575e098f62a7609a Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Tue, 26 Jul 2022 22:18:49 -0700 Subject: [PATCH 101/107] docs: remove comparative info from ref docs site --- website/content/docs/intro/index.mdx | 6 +- website/content/docs/intro/vs/chef-puppet.mdx | 45 ----------- website/content/docs/intro/vs/custom.mdx | 35 -------- website/content/docs/intro/vs/eureka.mdx | 54 ------------- website/content/docs/intro/vs/index.mdx | 22 ----- website/content/docs/intro/vs/istio.mdx | 80 ------------------- website/content/docs/intro/vs/nagios.mdx | 43 ---------- website/content/docs/intro/vs/proxies.mdx | 55 ------------- website/content/docs/intro/vs/serf.mdx | 54 ------------- website/content/docs/intro/vs/skydns.mdx | 45 ----------- website/content/docs/intro/vs/smartstack.mdx | 64 --------------- website/data/docs-nav-data.json | 45 ----------- website/redirects.js | 46 ----------- 13 files changed, 2 insertions(+), 592 deletions(-) delete mode 100644 website/content/docs/intro/vs/chef-puppet.mdx delete mode 100644 website/content/docs/intro/vs/custom.mdx delete mode 100644 website/content/docs/intro/vs/eureka.mdx delete mode 100644 website/content/docs/intro/vs/index.mdx delete mode 100644 website/content/docs/intro/vs/istio.mdx delete mode 100644 website/content/docs/intro/vs/nagios.mdx delete mode 100644 website/content/docs/intro/vs/proxies.mdx delete mode 100644 website/content/docs/intro/vs/serf.mdx delete mode 100644 website/content/docs/intro/vs/skydns.mdx delete mode 100644 website/content/docs/intro/vs/smartstack.mdx diff --git a/website/content/docs/intro/index.mdx b/website/content/docs/intro/index.mdx index 6dd5f6fe9..3a14541bd 100644 --- a/website/content/docs/intro/index.mdx +++ b/website/content/docs/intro/index.mdx @@ -115,7 +115,5 @@ forward the request to the remote datacenter and return the result. ## Next Steps -- See [how Consul compares to other software](/docs/intro/vs) to assess how it fits into your - existing infrastructure. -- Continue onwards with [HashiCorp Learn](https://learn.hashicorp.com/tutorials/consul/get-started-install) - to learn more about Consul and how to get Consul up and running. +Continue onwards with [HashiCorp Learn](https://learn.hashicorp.com/tutorials/consul/get-started-install) +to learn more about Consul and how to get Consul up and running. diff --git a/website/content/docs/intro/vs/chef-puppet.mdx b/website/content/docs/intro/vs/chef-puppet.mdx deleted file mode 100644 index 744e96861..000000000 --- a/website/content/docs/intro/vs/chef-puppet.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -layout: docs -page_title: 'Consul vs. Chef, Puppet, etc.' -description: >- - It is not uncommon to find people using Chef, Puppet, and other configuration - management tools to build service discovery mechanisms. This is usually done - by querying global state to construct configuration files on each node during - a periodic convergence run. ---- - -# Consul vs. Chef, Puppet, etc. - -It is not uncommon to find people using Chef, Puppet, and other configuration -management tools to build service discovery mechanisms. This is usually -done by querying global state to construct configuration files on each -node during a periodic convergence run. - -Unfortunately, this approach has -a number of pitfalls. The configuration information is static -and cannot update any more frequently than convergence runs. Generally this -is on the interval of many minutes or hours. Additionally, there is no -mechanism to incorporate the system state in the configuration: nodes which -are unhealthy may receive traffic exacerbating issues further. Using this -approach also makes supporting multiple datacenters challenging as a central -group of servers must manage all datacenters. - -Consul is designed specifically as a service discovery tool. As such, -it is much more dynamic and responsive to the state of the cluster. Nodes -can register and deregister the services they provide, enabling dependent -applications and services to rapidly discover all providers. By using the -integrated health checking, Consul can route traffic away from unhealthy -nodes, allowing systems and services to gracefully recover. Static configuration -that may be provided by configuration management tools can be moved into the -dynamic key/value store. This allows application configuration to be updated -without a slow convergence run. Lastly, because each datacenter runs independently, -supporting multiple datacenters is no different than a single datacenter. - -That said, Consul is not a replacement for configuration management tools. -These tools are still critical to set up applications, including Consul itself. -Static provisioning is best managed by existing tools while dynamic state and -discovery is better managed by Consul. The separation of configuration management -and cluster management also has a number of advantageous side effects: Chef recipes -and Puppet manifests become simpler without global state, periodic runs are no longer -required for service or configuration changes, and the infrastructure can become -immutable since config management runs require no global state. diff --git a/website/content/docs/intro/vs/custom.mdx b/website/content/docs/intro/vs/custom.mdx deleted file mode 100644 index 0cb0cc51d..000000000 --- a/website/content/docs/intro/vs/custom.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -layout: docs -page_title: Consul vs. Custom Solutions -description: >- - As a codebase grows, a monolithic app often evolves into a Service Oriented - Architecture (SOA). A universal pain point for SOA is service discovery and - configuration. In many cases, this leads to organizations building home grown - solutions. It is an undisputed fact that distributed systems are hard; - building one is error-prone and time-consuming. Most systems cut corners by - introducing single points of failure such as a single Redis or RDBMS to - maintain cluster state. These solutions may work in the short term, but they - are rarely fault tolerant or scalable. Besides these limitations, they require - time and resources to build and maintain. ---- - -# Consul vs. Custom Solutions - -As a codebase grows, a monolithic app often evolves into a Service Oriented -Architecture (SOA). A universal pain point for SOA is service discovery and -configuration. In many cases, this leads to organizations building home grown -solutions. It is an undisputed fact that distributed systems are hard; building -one is error-prone and time-consuming. Most systems cut corners by introducing -single points of failure such as a single Redis or RDBMS to maintain cluster -state. These solutions may work in the short term, but they are rarely fault -tolerant or scalable. Besides these limitations, they require time and resources -to build and maintain. - -Consul provides the core set of features needed by an SOA out of the box. By -using Consul, organizations can leverage open source work to reduce the time -and effort spent re-inventing the wheel and can focus instead on their business -applications. - -Consul is built on well-cited research and is designed with the constraints of -distributed systems in mind. At every step, Consul takes efforts to provide a -robust and scalable solution for organizations of any size. diff --git a/website/content/docs/intro/vs/eureka.mdx b/website/content/docs/intro/vs/eureka.mdx deleted file mode 100644 index f64b4c4e7..000000000 --- a/website/content/docs/intro/vs/eureka.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -layout: docs -page_title: Consul vs. Eureka -description: >- - Eureka is a service discovery tool that provides a best effort registry and - discovery service. It uses central servers and clients which are typically - natively integrated with SDKs. Consul provides a super set of features, such - as health checking, key/value storage, ACLs, and multi-datacenter awareness. ---- - -# Consul vs. Eureka - -Eureka is a service discovery tool. The architecture is primarily client/server, -with a set of Eureka servers per datacenter, usually one per availability zone. -Typically clients of Eureka use an embedded SDK to register and discover services. -For clients that are not natively integrated, a sidecar such as Ribbon is used -to transparently discover services via Eureka. - -Eureka provides a weakly consistent view of services, using best effort replication. -When a client registers with a server, that server will make an attempt to replicate -to the other servers but provides no guarantee. Service registrations have a short -Time-To-Live (TTL), requiring clients to heartbeat with the servers. Unhealthy services -or nodes will stop heartbeating, causing them to timeout and be removed from the registry. -Discovery requests can route to any service, which can serve stale or missing data due to -the best effort replication. This simplified model allows for easy cluster administration -and high scalability. - -Consul provides a super set of features, including richer health checking, key/value store, -and multi-datacenter awareness. Consul requires a set of servers in each datacenter, along -with an agent on each client, similar to using a sidecar like Ribbon. The Consul agent allows -most applications to be Consul unaware, performing the service registration via configuration -files and discovery via DNS or load balancer sidecars. - -Consul provides a strong consistency guarantee, since servers replicate state using the -[Raft protocol](/docs/architecture/consensus). Consul supports a rich set of health checks -including TCP, HTTP, Nagios/Sensu compatible scripts, or TTL based like Eureka. Client nodes -participate in a [gossip based health check](/docs/architecture/gossip), which distributes -the work of health checking, unlike centralized heartbeating which becomes a scalability challenge. -Discovery requests are routed to the elected Consul leader which allows them to be strongly consistent -by default. Clients that allow for stale reads enable any server to process their request allowing -for linear scalability like Eureka. - -The strongly consistent nature of Consul means it can be used as a locking service for leader -elections and cluster coordination. Eureka does not provide similar guarantees, and typically -requires running ZooKeeper for services that need to perform coordination or have stronger -consistency needs. - -Consul provides a toolkit of features needed to support a service oriented architecture. -This includes service discovery, but also rich health checking, locking, Key/Value, multi-datacenter -federation, an event system, and ACLs. Both Consul and the ecosystem of tools like consul-template -and envconsul try to minimize application changes required to integration, to avoid needing -native integration via SDKs. Eureka is part of a larger Netflix OSS suite, which expects applications -to be relatively homogeneous and tightly integrated. As a result, Eureka only solves a limited -subset of problems, expecting other tools such as ZooKeeper to be used alongside. diff --git a/website/content/docs/intro/vs/index.mdx b/website/content/docs/intro/vs/index.mdx deleted file mode 100644 index efc9952e3..000000000 --- a/website/content/docs/intro/vs/index.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: docs -page_title: Consul vs. Other Software -description: >- - The problems Consul solves are varied, but each individual feature has been - solved by many different systems. Although there is no single system that - provides all the features of Consul, there are other options available to - solve some of these problems. ---- - -# Consul vs. Other Software - -The problems Consul solves are varied, but each individual feature has been -solved by many different systems. Although there is no single system that -provides all the features of Consul, there are other options available to solve -some of these problems. - -In this section, we compare Consul to some other options. In most cases, Consul -is not mutually exclusive with any other system. - -Use the navigation to the left to read the comparison of Consul to specific -systems. diff --git a/website/content/docs/intro/vs/istio.mdx b/website/content/docs/intro/vs/istio.mdx deleted file mode 100644 index 19715e36a..000000000 --- a/website/content/docs/intro/vs/istio.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -layout: docs -page_title: Consul vs. Istio -description: >- - Istio is a platform for connecting and securing microservices. This page - describes the similarities and differences between Istio and Consul. ---- - -# Consul vs. Istio - -Istio is an open platform to connect, manage, and secure microservices. - -To enable the full functionality of Istio, multiple services must -be deployed. For the control plane: Pilot, Mixer, and Citadel must be -deployed and for the data plane an Envoy sidecar is deployed. Additionally, -Istio requires a 3rd party service catalog from Kubernetes, Consul, Eureka, -or others. Finally, Istio requires an external system for storing state, -typically etcd. At a minimum, three Istio-dedicated services along with at -least one separate distributed system (in addition to Istio) must be -configured to use the full functionality of Istio. - -Istio provides layer 7 features for path-based routing, traffic shaping, -load balancing, and telemetry. Access control policies can be configured -targeting both layer 7 and layer 4 properties to control access, routing, -and more based on service identity. - -Consul is a single binary providing both server and client capabilities, and -includes all functionality for service catalog, configuration, TLS certificates, -authorization, and more. No additional systems need to be installed to use -Consul, although Consul optionally supports external systems such as Vault -to augment behavior. This architecture enables Consul to be easily installed -on any platform, including directly onto the machine. - -Consul uses an agent-based model where each node in the cluster runs a -Consul Client. This client maintains a local cache that is efficiently updated -from servers. As a result, all secure service communication APIs respond in -microseconds and do not require any external communication. This allows us to -do connection enforcement at the edge without communicating to central -servers. Istio flows requests to a central Mixer service and must push -updates out via Pilot. This dramatically reduces the scalability of Istio, -whereas Consul is able to efficiently distribute updates and perform all -work on the edge. - -Consul provides layer 7 features for path-based routing, traffic shifting, -load balancing, and telemetry. Consul enforces authorization and identity to -layer 4 only — either the TLS connection can be established or it can't. -We believe service identity should be tied to layer 4, whereas layer 7 should be -used for routing, telemetry, etc. We will be adding more layer 7 features to Consul in the future. - -The data plane for Consul is pluggable. It includes a built-in proxy with -a larger performance trade off for ease of use. But you may also use third -party proxies such as Envoy to leverage layer 7 features. The ability to use the -right proxy for the job allows flexible heterogeneous deployments where -different proxies may be more correct for the applications they're proxying. We -encourage users leverage the pluggable data plane layer and use a proxy which -supports the layer 7 features necessary for the cluster. - -In addition to third party proxy support, applications can natively integrate -with the Connect protocol. As a result, the performance overhead of introducing -Connect is negligible. These "Connect-native" applications can interact with -any other Connect-capable services, whether they're using a proxy or are -also Connect-native. - -Consul implements automatic TLS certificate management complete with rotation -support. Both leaf and root certificates can be rotated automatically across -a large Consul cluster with zero disruption to connections. The certificate -management system is pluggable through code change in Consul and will be -exposed as an external plugin system shortly. This enables Consul to work -with any PKI solution. - -Because Consul's service connection feature "Connect" is built-in, it -inherits the operational stability of Consul. Consul has been in production -for large companies since 2014 and is known to be deployed on as many as -50,000 nodes in a single cluster. - -This comparison is based on our own limited usage of Istio as well as -talking to Istio users. If you feel there are inaccurate statements in this -comparison, please click "Edit This Page" in the footer of this page and -propose edits. We strive for technical accuracy and will review and update -this post for inaccuracies as quickly as possible. diff --git a/website/content/docs/intro/vs/nagios.mdx b/website/content/docs/intro/vs/nagios.mdx deleted file mode 100644 index c1f9aee8d..000000000 --- a/website/content/docs/intro/vs/nagios.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -layout: docs -page_title: 'Consul vs. Nagios' -description: >- - Nagios is a tool built for monitoring. It is used to quickly - notify operators when an issue occurs. ---- - -# Consul vs. Nagios - -Nagios is a tool built for monitoring. It is used to quickly notify -operators when an issue occurs. - -Nagios uses a group of central servers that are configured to perform -checks on remote hosts. This design makes it difficult to scale Nagios, -as large fleets quickly reach the limit of vertical scaling, and Nagios -does not easily scale horizontally. Nagios is also notoriously -difficult to use with modern DevOps and configuration management tools, -as local configurations must be updated when remote servers are added -or removed. - -Consul provides the same health checking abilities as Nagios, -is friendly to modern DevOps, and avoids the inherent scaling issues. -Consul runs all checks locally, avoiding placing a burden on central servers. -The status of checks is maintained by the Consul servers, which are fault -tolerant and have no single point of failure. Lastly, Consul can scale to -vastly more checks because it relies on edge-triggered updates. This means -that an update is only triggered when a check transitions from "passing" -to "failing" or vice versa. - -In a large fleet, the majority of checks are passing, and even the minority -that are failing are persistent. By capturing changes only, Consul reduces -the amount of networking and compute resources used by the health checks, -allowing the system to be much more scalable. - -An astute reader may notice that if a Consul agent dies, then no edge triggered -updates will occur. From the perspective of other nodes, all checks will appear -to be in a steady state. However, Consul guards against this as well. The -[gossip protocol](/docs/architecture/gossip) used between clients and servers -integrates a distributed failure detector. This means that if a Consul agent fails, -the failure will be detected, and thus all checks being run by that node can be -assumed failed. This failure detector distributes the work among the entire cluster -while, most importantly, enabling the edge triggered architecture to work. diff --git a/website/content/docs/intro/vs/proxies.mdx b/website/content/docs/intro/vs/proxies.mdx deleted file mode 100644 index d4df15ab8..000000000 --- a/website/content/docs/intro/vs/proxies.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -layout: docs -page_title: Consul vs. Envoy and Other Proxies -description: >- - Modern service proxies provide high-level service routing, authentication, - telemetry, and more for microservice and cloud environments. Envoy is a - popular and feature rich proxy. This page describes how Consul relates to - proxies such as Envoy. ---- - -# Consul vs. Envoy and Other Proxies - -Modern service proxies provide high-level service routing, authentication, -telemetry, and more for microservice and cloud environments. Envoy is -a popular and feature-rich proxy that is often -used on its own. Consul [integrates with Envoy](/docs/connect/proxies/envoy) to simplify its configuration. - -Proxies require a rich set of configuration to operate since backend -addresses, frontend listeners, routes, filters, telemetry shipping, and -more must all be configured. Further, a modern infrastructure contains -many proxies, often one proxy per service as proxies are deployed in -a "sidecar" model next to a service. Therefore, a primary challenge of -proxies is the configuration sprawl and orchestration. - -Proxies form what is referred to as the "data plane": the pathway which -data travels for network connections. Above this is the "control plane" -which provides the rules and configuration for the data plane. Proxies -typically integrate with outside solutions to provide the control plane. -For example, Envoy integrates with Consul to dynamically populate -service backend addresses. - -Consul is a control plane solution. The service catalog serves as a registry -for services and their addresses and can be used to route traffic for proxies. -The Connect feature of Consul provides the TLS certificates and service -access graph, but still requires a proxy to exist in the data path. As a -control plane, Consul integrates with many data plane solutions including -Envoy, HAProxy, Nginx, and more. - -The [Consul Envoy integration](/docs/connect/proxies/envoy) -is currently the primary way to utilize advanced layer 7 features provided -by Consul. In addition to Envoy, Consul enables -third party proxies to integrate with Connect and provide the data -plane with Consul operating as the control plane. - -Proxies provide excellent solutions to layer 7 concerns such as path-based -routing, tracing and telemetry, and more. By supporting a pluggable data plane model, the right proxy can be -deployed as needed. -For performance-critical applications or those -that utilize layer 7 functionality, Envoy can be used. For non-performance critical layer 4 applications, you can use Consul's [built-in proxy](/docs/connect/proxies/built-in) for convenience. - -For some applications that may require hardware, a hardware load balancer -such as an F5 appliance may be deployed. Consul encourages this use of the right -proxy for the scenario and treats hardware load balancers as swappable components that can be run -alongside other proxies, assuming they integrate with the [necessary APIs](/docs/connect/proxies/integrate) -for Connect. diff --git a/website/content/docs/intro/vs/serf.mdx b/website/content/docs/intro/vs/serf.mdx deleted file mode 100644 index 437b76e10..000000000 --- a/website/content/docs/intro/vs/serf.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -layout: docs -page_title: Consul vs. Serf -description: >- - Serf is a node discovery and orchestration tool and is the only tool discussed - so far that is built on an eventually-consistent gossip model with no - centralized servers. It provides a number of features, including group - membership, failure detection, event broadcasts, and a query mechanism. - However, Serf does not provide any high-level features such as service - discovery, health checking or key/value storage. Consul is a complete system - providing all of those features. ---- - -# Consul vs. Serf - -[Serf](https://www.serf.io) is a node discovery and orchestration tool and is the only -tool discussed so far that is built on an eventually-consistent gossip model -with no centralized servers. It provides a number of features, including group -membership, failure detection, event broadcasts, and a query mechanism. However, -Serf does not provide any high-level features such as service discovery, health -checking or key/value storage. Consul is a complete system providing all of those -features. - -The internal [gossip protocol](/docs/architecture/gossip) used within Consul is in -fact powered by the Serf library: Consul leverages the membership and failure detection -features and builds upon them to add service discovery. By contrast, the discovery -feature of Serf is at a node level, while Consul provides a service and node level -abstraction. - -The health checking provided by Serf is very low level and only indicates if the -agent is alive. Consul extends this to provide a rich health checking system -that handles liveness in addition to arbitrary host and service-level checks. -Health checks are integrated with a central catalog that operators can easily -query to gain insight into the cluster. - -The membership provided by Serf is at a node level, while Consul focuses -on the service level abstraction, mapping single nodes to multiple services. -This can be simulated in Serf using tags, but it is much more limited and does -not provide useful query interfaces. Consul also makes use of a strongly-consistent -catalog while Serf is only eventually-consistent. - -In addition to the service level abstraction and improved health checking, -Consul provides a key/value store and support for multiple datacenters. -Serf can run across the WAN but with degraded performance. Consul makes use -of [multiple gossip pools](/docs/architecture) so that -the performance of Serf over a LAN can be retained while still using it over -a WAN for linking together multiple datacenters. - -Consul is opinionated in its usage while Serf is a more flexible and -general purpose tool. In [CAP](https://en.wikipedia.org/wiki/CAP_theorem) terms, -Consul uses a CP architecture, favoring consistency over availability. Serf is an -AP system and sacrifices consistency for availability. This means Consul cannot -operate if the central servers cannot form a quorum while Serf will continue to -function under almost all circumstances. diff --git a/website/content/docs/intro/vs/skydns.mdx b/website/content/docs/intro/vs/skydns.mdx deleted file mode 100644 index 3db400076..000000000 --- a/website/content/docs/intro/vs/skydns.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -layout: docs -page_title: Consul vs. SkyDNS -description: >- - SkyDNS is a tool designed to provide service discovery. It uses multiple - central servers that are strongly-consistent and fault-tolerant. Nodes - register services using an HTTP API, and queries can be made over HTTP or DNS - to perform discovery. ---- - -# Consul vs. SkyDNS - -SkyDNS is a tool designed to provide service discovery. -It uses multiple central servers that are strongly-consistent and -fault-tolerant. Nodes register services using an HTTP API, and -queries can be made over HTTP or DNS to perform discovery. - -Consul is very similar but provides a superset of features. Consul -also relies on multiple central servers to provide strong consistency -and fault tolerance. Nodes can use an HTTP API or use an agent to -register services, and queries are made over HTTP or DNS. - -However, the systems differ in many ways. Consul provides a much richer -health checking framework with support for arbitrary checks and -a highly scalable failure detection scheme. SkyDNS relies on naive -heartbeating and TTLs, an approach which has known scalability issues. -Additionally, the heartbeat only provides a limited liveness check -versus the rich health checks that Consul performs. - -Multiple datacenters can be supported by using "regions" in SkyDNS; -however, the data is managed and queried from a single cluster. If servers -are split between datacenters, the replication protocol will suffer from -very long commit times. If all the SkyDNS servers are in a central datacenter, -then connectivity issues can cause entire datacenters to lose availability. -Additionally, even without a connectivity issue, query performance will -suffer as requests must always be performed in a remote datacenter. - -Consul supports multiple datacenters out of the box, and it purposely -scopes the managed data to be per-datacenter. This means each datacenter -runs an independent cluster of servers. Requests are forwarded to remote -datacenters if necessary; requests for services within a datacenter -never go over the WAN, and connectivity issues between datacenters do not -affect availability within a datacenter. Additionally, the unavailability -of one datacenter does not affect the discovery of services -in any other datacenter. diff --git a/website/content/docs/intro/vs/smartstack.mdx b/website/content/docs/intro/vs/smartstack.mdx deleted file mode 100644 index 668841e61..000000000 --- a/website/content/docs/intro/vs/smartstack.mdx +++ /dev/null @@ -1,64 +0,0 @@ ---- -layout: docs -page_title: Consul vs. SmartStack -description: >- - SmartStack is a tool which tackles the service discovery problem. It has a - rather unique architecture and has 4 major components: ZooKeeper, HAProxy, - Synapse, and Nerve. The ZooKeeper servers are responsible for storing cluster - state in a consistent and fault-tolerant manner. Each node in the SmartStack - cluster then runs both Nerves and Synapses. The Nerve is responsible for - running health checks against a service and registering with the ZooKeeper - servers. Synapse queries ZooKeeper for service providers and dynamically - configures HAProxy. Finally, clients speak to HAProxy, which does health - checking and load balancing across service providers. ---- - -# Consul vs. SmartStack - -SmartStack is a tool which tackles the service discovery problem. It has a rather -unique architecture and has 4 major components: ZooKeeper, HAProxy, Synapse, and Nerve. -The ZooKeeper servers are responsible for storing cluster state in a consistent and -fault-tolerant manner. Each node in the SmartStack cluster then runs both Nerves and -Synapses. The Nerve is responsible for running health checks against a service and -registering with the ZooKeeper servers. Synapse queries ZooKeeper for service providers -and dynamically configures HAProxy. Finally, clients speak to HAProxy, which does -health checking and load balancing across service providers. - -Consul is a much simpler and more contained system as it does not rely on any external -components. Consul uses an integrated [gossip protocol](/docs/architecture/gossip) -to track all nodes and perform server discovery. This means that server addresses -do not need to be hardcoded and updated fleet-wide on changes, unlike SmartStack. - -Service registration for both Consul and Nerves can be done with a configuration file, -but Consul also supports an API to dynamically change the services and checks that are -in use. - -For discovery, SmartStack clients must use HAProxy, requiring that Synapse be -configured with all desired endpoints in advance. Consul clients instead -use the DNS or HTTP APIs without any configuration needed in advance. Consul -also provides a "tag" abstraction, allowing services to provide metadata such -as versions, primary/secondary designations, or opaque labels that can be used for -filtering. Clients can then request only the service providers which have -matching tags. - -The systems also differ in how they manage health checking. Nerve performs local health -checks in a manner similar to Consul agents. However, Consul maintains separate catalog -and health systems. This division allows operators to see which nodes are in each service -pool and provides insight into failing checks. Nerve simply deregisters nodes on failed -checks, providing limited operational insight. Synapse also configures HAProxy to perform -additional health checks. This causes all potential service clients to check for -liveness. With large fleets, this N-to-N style health checking may be prohibitively -expensive. - -Consul generally provides a much richer health checking system. Consul supports -Nagios-style plugins, enabling a vast catalog of checks to be used. Consul allows for -both service- and host-level checks. There is even a "dead man's switch" check that allows -applications to easily integrate custom health checks. Finally, all of this is integrated -into a Health and Catalog system with APIs enabling operators to gain insight into the -broader system. - -In addition to the service discovery and health checking, Consul also provides -an integrated key/value store for configuration and multi-datacenter support. -While it may be possible to configure SmartStack for multiple datacenters, -the central ZooKeeper cluster would be a serious impediment to a fault-tolerant -deployment. diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 8d975e427..ae5624a0d 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -19,51 +19,6 @@ "path": "intro/usecases/what-is-a-service-mesh" } ] - }, - { - "title": "Consul vs. Other Software", - "routes": [ - { - "title": "Overview", - "path": "intro/vs" - }, - { - "title": "Chef, Puppet, etc.", - "path": "intro/vs/chef-puppet" - }, - { - "title": "Nagios", - "path": "intro/vs/nagios" - }, - { - "title": "SkyDNS", - "path": "intro/vs/skydns" - }, - { - "title": "SmartStack", - "path": "intro/vs/smartstack" - }, - { - "title": "Serf", - "path": "intro/vs/serf" - }, - { - "title": "Eureka", - "path": "intro/vs/eureka" - }, - { - "title": "Istio", - "path": "intro/vs/istio" - }, - { - "title": "Envoy and Other Proxies", - "path": "intro/vs/proxies" - }, - { - "title": "Custom Solutions", - "path": "intro/vs/custom" - } - ] } ] }, diff --git a/website/redirects.js b/website/redirects.js index c2f751df7..76d18f652 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -291,52 +291,6 @@ module.exports = [ permanent: true, }, { source: '/intro', destination: '/docs/intro', permanent: true }, - { source: '/intro/vs', destination: '/docs/intro/vs', permanent: true }, - { - source: '/intro/vs/chef-puppet', - destination: '/docs/intro/vs/chef-puppet', - permanent: true, - }, - { - source: '/intro/vs/nagios', - destination: '/docs/intro/vs/nagios', - permanent: true, - }, - { - source: '/intro/vs/skydns', - destination: '/docs/intro/vs/skydns', - permanent: true, - }, - { - source: '/intro/vs/smartstack', - destination: '/docs/intro/vs/smartstack', - permanent: true, - }, - { - source: '/intro/vs/serf', - destination: '/docs/intro/vs/serf', - permanent: true, - }, - { - source: '/intro/vs/eureka', - destination: '/docs/intro/vs/eureka', - permanent: true, - }, - { - source: '/intro/vs/istio', - destination: '/docs/intro/vs/istio', - permanent: true, - }, - { - source: '/intro/vs/proxies', - destination: '/docs/intro/vs/proxies', - permanent: true, - }, - { - source: '/intro/vs/custom', - destination: '/docs/intro/vs/custom', - permanent: true, - }, { source: '/docs/k8s/ambassador', destination: From 13b3430a4e7554b14a92f3a1a44cb23b682383ef Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Fri, 29 Apr 2022 16:22:08 -0700 Subject: [PATCH 102/107] docs: show CLI cmd-specific opts before general opts Applied to a single command (acl auth-method create). --- .../content/commands/acl/auth-method/create.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/content/commands/acl/auth-method/create.mdx b/website/content/commands/acl/auth-method/create.mdx index dfdce5d13..affecbc4f 100644 --- a/website/content/commands/acl/auth-method/create.mdx +++ b/website/content/commands/acl/auth-method/create.mdx @@ -23,12 +23,6 @@ are not supported from commands, but may be from the corresponding HTTP endpoint Usage: `consul acl auth-method create [options] [args]` -#### API Options - -@include 'http_api_options_client.mdx' - -@include 'http_api_options_server.mdx' - #### Command Options - `-description=` - A description of the auth method. @@ -71,6 +65,8 @@ Usage: `consul acl auth-method create [options] [args]` #### Enterprise Options +@include 'http_api_partition_options.mdx' + @include 'http_api_namespace_options.mdx' - `-namespace-rule-bind-namespace=` - Namespace to bind on match. Can @@ -80,7 +76,11 @@ Usage: `consul acl auth-method create [options] [args]` verified identity attributes returned from the auth method during login to determine if the namespace rule applies. Added in Consul 1.8.0. -@include 'http_api_partition_options.mdx' +#### API Options + +@include 'http_api_options_client.mdx' + +@include 'http_api_options_server.mdx' ## Examples From f7858a1bdacfc7d844c3d63051ee8d2faec7508c Mon Sep 17 00:00:00 2001 From: cskh Date: Wed, 27 Jul 2022 10:16:46 -0400 Subject: [PATCH 103/107] chore: clarify the error message: service.service must not be empty (#13907) - when register service using catalog endpoint, the key of service name actually should be "service". Add this information to the error message will help user to quickly fix in the request. --- agent/consul/catalog_endpoint.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 6508ba220..5ab8fb12d 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -176,7 +176,7 @@ func servicePreApply(service *structs.NodeService, authz resolver.Result, authzC // Verify ServiceName provided if ID. if service.ID != "" && service.Service == "" { - return fmt.Errorf("Must provide service name with ID") + return fmt.Errorf("Must provide service name (Service.Service) when service ID is provided") } // Check the service address here and in the agent endpoint From 85dd506ecbd0cb2fa5dde0dd6d70439c50f71871 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Wed, 27 Jul 2022 10:21:27 -0400 Subject: [PATCH 104/107] Remove unnecessary goroutine in flaky test The watch is established in a background goroutine and the first assertion proves that the watcher is active so there is no reason for the update to happen in a racy goroutine. Note that this does not completely remove the race condition as the first call to testGetConfigValTimeout could time out before a config is returned. --- connect/proxy/config_test.go | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/connect/proxy/config_test.go b/connect/proxy/config_test.go index 067a9048e..ddef16fb9 100644 --- a/connect/proxy/config_test.go +++ b/connect/proxy/config_test.go @@ -4,7 +4,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent" @@ -143,23 +142,17 @@ func TestAgentConfigWatcherSidecarProxy(t *testing.T) { }, }, } - require.Equal(t, expectCfg, cfg) // Now keep watching and update the config. - go func() { - // Wait for watcher to be watching - time.Sleep(20 * time.Millisecond) - reg.Connect.SidecarService.Proxy.Upstreams = append(reg.Connect.SidecarService.Proxy.Upstreams, - api.Upstream{ - DestinationName: "cache", - LocalBindPort: 9292, - LocalBindAddress: "127.10.10.10", - }) - reg.Connect.SidecarService.Proxy.Config["local_connect_timeout_ms"] = 444 - err := agent.ServiceRegister(reg) - require.NoError(t, err) - }() + reg.Connect.SidecarService.Proxy.Upstreams = append(reg.Connect.SidecarService.Proxy.Upstreams, + api.Upstream{ + DestinationName: "cache", + LocalBindPort: 9292, + LocalBindAddress: "127.10.10.10", + }) + reg.Connect.SidecarService.Proxy.Config["local_connect_timeout_ms"] = 444 + require.NoError(t, agent.ServiceRegister(reg)) cfg = testGetConfigValTimeout(t, w, 2*time.Second) @@ -173,7 +166,7 @@ func TestAgentConfigWatcherSidecarProxy(t *testing.T) { }) expectCfg.PublicListener.LocalConnectTimeoutMs = 444 - assert.Equal(t, expectCfg, cfg) + require.Equal(t, expectCfg, cfg) } func testGetConfigValTimeout(t *testing.T, w ConfigWatcher, From 146dd9377588242d9236b43348936ea76ef080a3 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Wed, 27 Jul 2022 11:06:20 -0400 Subject: [PATCH 105/107] Sort slice of ServiceNames deterministically --- agent/consul/state/peering.go | 4 +++- agent/proxycfg-glue/exported_peered_services_test.go | 5 +++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/agent/consul/state/peering.go b/agent/consul/state/peering.go index a3529cbda..4bf19de69 100644 --- a/agent/consul/state/peering.go +++ b/agent/consul/state/peering.go @@ -353,7 +353,9 @@ func (s *Store) ExportedServicesForAllPeersByName(ws memdb.WatchSet, entMeta acl } m := list.ListAllDiscoveryChains() if len(m) > 0 { - out[peering.Name] = maps.SliceOfKeys(m) + sns := maps.SliceOfKeys[structs.ServiceName, structs.ExportedDiscoveryChainInfo](m) + sort.Sort(structs.ServiceList(sns)) + out[peering.Name] = sns } } diff --git a/agent/proxycfg-glue/exported_peered_services_test.go b/agent/proxycfg-glue/exported_peered_services_test.go index f0b41d9f3..552519bb1 100644 --- a/agent/proxycfg-glue/exported_peered_services_test.go +++ b/agent/proxycfg-glue/exported_peered_services_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/sdk/testutil" ) func TestServerExportedPeeredServices(t *testing.T) { @@ -59,7 +60,7 @@ func TestServerExportedPeeredServices(t *testing.T) { }) require.NoError(t, dataSource.Notify(ctx, &structs.DCSpecificRequest{}, "", eventCh)) - t.Run("initial state", func(t *testing.T) { + testutil.RunStep(t, "initial state", func(t *testing.T) { result := getEventResult[*structs.IndexedExportedServiceList](t, eventCh) require.Equal(t, map[string]structs.ServiceList{ @@ -69,7 +70,7 @@ func TestServerExportedPeeredServices(t *testing.T) { ) }) - t.Run("update exported services", func(t *testing.T) { + testutil.RunStep(t, "update exported services", func(t *testing.T) { require.NoError(t, store.EnsureConfigEntry(nextIndex(), &structs.ExportedServicesConfigEntry{ Name: "default", Services: []structs.ExportedService{ From c80ab105276a7fa8cb880a5087d42fba3a277e55 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Wed, 27 Jul 2022 11:06:38 -0400 Subject: [PATCH 106/107] Retry checks for virtual IP metadata --- agent/consul/helper_test.go | 10 ++++++---- agent/consul/internal_endpoint_test.go | 4 ++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/agent/consul/helper_test.go b/agent/consul/helper_test.go index 48186b548..0f89856d7 100644 --- a/agent/consul/helper_test.go +++ b/agent/consul/helper_test.go @@ -1213,10 +1213,12 @@ func registerTestRoutingConfigTopologyEntries(t *testing.T, codec rpc.ClientCode func registerLocalAndRemoteServicesVIPEnabled(t *testing.T, state *state.Store) { t.Helper() - _, entry, err := state.SystemMetadataGet(nil, structs.SystemMetadataVirtualIPsEnabled) - require.NoError(t, err) - require.NotNil(t, entry) - require.Equal(t, "true", entry.Value) + retry.Run(t, func(r *retry.R) { + _, entry, err := state.SystemMetadataGet(nil, structs.SystemMetadataVirtualIPsEnabled) + require.NoError(r, err) + require.NotNil(r, entry) + require.Equal(r, "true", entry.Value) + }) // Register a local connect-native service require.NoError(t, state.EnsureRegistration(10, &structs.RegisterRequest{ diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index 6d35f55b6..91d48601c 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -2782,6 +2782,10 @@ func TestInternal_PeeredUpstreams(t *testing.T) { t.Skip("too slow for testing.Short") } + orig := virtualIPVersionCheckInterval + virtualIPVersionCheckInterval = 50 * time.Millisecond + t.Cleanup(func() { virtualIPVersionCheckInterval = orig }) + t.Parallel() _, s1 := testServerWithConfig(t) From 213e985d1704a58729d529a0e657cba6aac4d8f1 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Wed, 27 Jul 2022 12:08:12 -0400 Subject: [PATCH 107/107] Reduce arm64 flakes for TestConnectCA_ConfigurationSet_ChangeKeyConfig_Primary There were 16 combinations of tests but 4 of them were duplicates since the default key type and bits were "ec" and 256. That entry was commented out to reduce the subtest count to 12. testrpc.WaitForLeader was failing on arm64 environments; the cause is unknown but it might be due to the environment being flooded with parallel tests making RPC calls. The RPC polling+retry was replaced with a simpler check for leadership based on raft. --- agent/consul/leader_connect_test.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/agent/consul/leader_connect_test.go b/agent/consul/leader_connect_test.go index 5e90de6b0..d9b386386 100644 --- a/agent/consul/leader_connect_test.go +++ b/agent/consul/leader_connect_test.go @@ -36,7 +36,7 @@ func TestConnectCA_ConfigurationSet_ChangeKeyConfig_Primary(t *testing.T) { keyBits int }{ {connect.DefaultPrivateKeyType, connect.DefaultPrivateKeyBits}, - {"ec", 256}, + // {"ec", 256}, skip since values are same as Defaults {"ec", 384}, {"rsa", 2048}, {"rsa", 4096}, @@ -55,7 +55,7 @@ func TestConnectCA_ConfigurationSet_ChangeKeyConfig_Primary(t *testing.T) { providerState := map[string]string{"foo": "dc1-value"} // Initialize primary as the primary DC - dir1, srv := testServerWithConfig(t, func(c *Config) { + _, srv := testServerWithConfig(t, func(c *Config) { c.Datacenter = "dc1" c.PrimaryDatacenter = "dc1" c.Build = "1.6.0" @@ -63,12 +63,9 @@ func TestConnectCA_ConfigurationSet_ChangeKeyConfig_Primary(t *testing.T) { c.CAConfig.Config["PrivateKeyBits"] = src.keyBits c.CAConfig.Config["test_state"] = providerState }) - defer os.RemoveAll(dir1) - defer srv.Shutdown() codec := rpcClient(t, srv) - defer codec.Close() - testrpc.WaitForLeader(t, srv.RPC, "dc1") + waitForLeaderEstablishment(t, srv) testrpc.WaitForActiveCARoot(t, srv.RPC, "dc1", nil) var (