From 1bd5470b07d9666e1331c951bb98226a46820275 Mon Sep 17 00:00:00 2001 From: Sarah Pratt Date: Tue, 26 Jul 2022 13:31:06 -0500 Subject: [PATCH 001/104] Separate port and socket path requirement in case of local agent assignment --- agent/agent_endpoint.go | 8 ++++---- agent/sidecar_service_test.go | 2 +- agent/structs/structs.go | 25 +++++++++++++++++++++---- agent/structs/structs_test.go | 10 ++++++++++ 4 files changed, 36 insertions(+), 9 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 11ec5f9ca..65d8af1df 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1123,9 +1123,9 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http. return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid Service Meta: %v", err)} } - // Run validation. This is the same validation that would happen on - // the catalog endpoint so it helps ensure the sync will work properly. - if err := ns.Validate(); err != nil { + // Run validation. This same validation would happen on the catalog endpoint, + // so it helps ensure the sync will work properly. + if err := ns.ValidateForAgent(); err != nil { return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Validation failed: %v", err.Error())} } @@ -1164,7 +1164,7 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http. return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid SidecarService: %s", err)} } if sidecar != nil { - if err := sidecar.Validate(); err != nil { + if err := sidecar.ValidateForAgent(); err != nil { return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Failed Validation: %v", err.Error())} } // Make sure we are allowed to register the sidecar using the token diff --git a/agent/sidecar_service_test.go b/agent/sidecar_service_test.go index a2ffe9af4..7ced9720a 100644 --- a/agent/sidecar_service_test.go +++ b/agent/sidecar_service_test.go @@ -339,7 +339,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) { } ns := tt.sd.NodeService() - err := ns.Validate() + err := ns.ValidateForAgent() require.NoError(t, err, "Invalid test case - NodeService must validate") gotNS, gotChecks, gotToken, err := a.sidecarServiceFromNodeService(ns, tt.token) diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 275bf4c18..7a7bd93f5 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -1411,6 +1411,27 @@ func (s *NodeService) IsGateway() bool { func (s *NodeService) Validate() error { var result error + if s.Kind == ServiceKindConnectProxy { + if s.Port == 0 && s.SocketPath == "" { + result = multierror.Append(result, fmt.Errorf("Port or SocketPath must be set for a %s", s.Kind)) + } + } + + commonValidation := s.ValidateForAgent() + if commonValidation != nil { + result = multierror.Append(result, commonValidation) + } + + return result +} + +// ValidateForAgent does a subset validation, with the assumption that a local agent can assist with missing values. +// +// I.e. in the catalog case, a local agent cannot be assumed to facilitate auto-assignment of port or socket path, +// so additional checks are needed. +func (s *NodeService) ValidateForAgent() error { + var result error + // TODO(partitions): remember to double check that this doesn't cross partition boundaries // ConnectProxy validation @@ -1426,10 +1447,6 @@ func (s *NodeService) Validate() error { "services")) } - if s.Port == 0 && s.SocketPath == "" { - result = multierror.Append(result, fmt.Errorf("Port or SocketPath must be set for a %s", s.Kind)) - } - if s.Connect.Native { result = multierror.Append(result, fmt.Errorf( "A Proxy cannot also be Connect Native, only typical services")) diff --git a/agent/structs/structs_test.go b/agent/structs/structs_test.go index 0b6efb330..844189c2f 100644 --- a/agent/structs/structs_test.go +++ b/agent/structs/structs_test.go @@ -1157,6 +1157,16 @@ func TestStructs_NodeService_ValidateConnectProxy(t *testing.T) { } } +func TestStructs_NodeService_ValidateConnectProxyWithAgentAutoAssign(t *testing.T) { + t.Run("connect-proxy: no port set", func(t *testing.T) { + ns := TestNodeServiceProxy(t) + ns.Port = 0 + + err := ns.ValidateForAgent() + assert.True(t, err == nil) + }) +} + func TestStructs_NodeService_ValidateConnectProxy_In_Partition(t *testing.T) { cases := []struct { Name string From f01a4f91dc456e1d20f5046929ae925ffe8bd90e Mon Sep 17 00:00:00 2001 From: Sarah Pratt Date: Wed, 27 Jul 2022 13:19:17 -0500 Subject: [PATCH 002/104] refactor sidecare_service method into parts --- agent/agent_endpoint.go | 2 +- agent/sidecar_service.go | 81 +++++++++------- agent/sidecar_service_test.go | 176 +++++++++++++++++----------------- 3 files changed, 137 insertions(+), 122 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 65d8af1df..b2d68e304 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1125,7 +1125,7 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http. // Run validation. This same validation would happen on the catalog endpoint, // so it helps ensure the sync will work properly. - if err := ns.ValidateForAgent(); err != nil { + if err := ns.Validate(); err != nil { return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Validation failed: %v", err.Error())} } diff --git a/agent/sidecar_service.go b/agent/sidecar_service.go index 673a02252..ea58a7a67 100644 --- a/agent/sidecar_service.go +++ b/agent/sidecar_service.go @@ -114,9 +114,32 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str } } + port, err := a.sidecarPortFromServiceIDLocked(sidecar.Port, sidecar.CompoundServiceID()) + if err != nil { + return nil, nil, "", err + } + sidecar.Port = port + + // Setup checks + checks, err := ns.Connect.SidecarService.CheckTypes() + if err != nil { + return nil, nil, "", err + } + // Setup default check if none given + if len(checks) < 1 { + checks = sidecarDefaultChecks(ns.ID, sidecar.Proxy.LocalServiceAddress, sidecar.Port) + } + + return sidecar, checks, token, nil +} + +// sidecarPortFromServiceID is used to allocate a unique port for a sidecar proxy. +// This is called immediately before registration to avoid value collisions. This function assumes the state lock is already held. +func (a *Agent) sidecarPortFromServiceIDLocked(sidecarPort int, sidecarCompoundServiceID structs.ServiceID) (int, error) { + // Allocate port if needed (min and max inclusive). rangeLen := a.config.ConnectSidecarMaxPort - a.config.ConnectSidecarMinPort + 1 - if sidecar.Port < 1 && a.config.ConnectSidecarMinPort > 0 && rangeLen > 0 { + if sidecarPort < 1 && a.config.ConnectSidecarMinPort > 0 && rangeLen > 0 { // This did pick at random which was simpler but consul reload would assign // new ports to all the sidecars since it unloads all state and // re-populates. It also made this more difficult to test (have to pin the @@ -130,11 +153,11 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str // Check if other port is in auto-assign range if otherNS.Port >= a.config.ConnectSidecarMinPort && otherNS.Port <= a.config.ConnectSidecarMaxPort { - if otherNS.CompoundServiceID() == sidecar.CompoundServiceID() { + if otherNS.CompoundServiceID() == sidecarCompoundServiceID { // This sidecar is already registered with an auto-port and is just // being updated so pick the same port as before rather than allocate // a new one. - sidecar.Port = otherNS.Port + sidecarPort = otherNS.Port break } usedPorts[otherNS.Port] = struct{}{} @@ -147,54 +170,48 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str // Check we still need to assign a port and didn't find we already had one // allocated. - if sidecar.Port < 1 { + if sidecarPort < 1 { // Iterate until we find lowest unused port for p := a.config.ConnectSidecarMinPort; p <= a.config.ConnectSidecarMaxPort; p++ { _, used := usedPorts[p] if !used { - sidecar.Port = p + sidecarPort = p break } } } } // If no ports left (or auto ports disabled) fail - if sidecar.Port < 1 { + if sidecarPort < 1 { // If ports are set to zero explicitly, config builder switches them to // `-1`. In this case don't show the actual values since we don't know what // was actually in config (zero or negative) and it might be confusing, we // just know they explicitly disabled auto assignment. if a.config.ConnectSidecarMinPort < 1 || a.config.ConnectSidecarMaxPort < 1 { - return nil, nil, "", fmt.Errorf("no port provided for sidecar_service " + + return 0, fmt.Errorf("no port provided for sidecar_service " + "and auto-assignment disabled in config") } - return nil, nil, "", fmt.Errorf("no port provided for sidecar_service and none "+ + return 0, fmt.Errorf("no port provided for sidecar_service and none "+ "left in the configured range [%d, %d]", a.config.ConnectSidecarMinPort, a.config.ConnectSidecarMaxPort) } - // Setup checks - checks, err := ns.Connect.SidecarService.CheckTypes() - if err != nil { - return nil, nil, "", err - } - - // Setup default check if none given - if len(checks) < 1 { - checks = []*structs.CheckType{ - { - Name: "Connect Sidecar Listening", - // Default to localhost rather than agent/service public IP. The checks - // can always be overridden if a non-loopback IP is needed. - TCP: ipaddr.FormatAddressPort(sidecar.Proxy.LocalServiceAddress, sidecar.Port), - Interval: 10 * time.Second, - }, - { - Name: "Connect Sidecar Aliasing " + ns.ID, - AliasService: ns.ID, - }, - } - } - - return sidecar, checks, token, nil + return sidecarPort, nil +} + +func sidecarDefaultChecks(serviceID string, localServiceAddress string, port int) []*structs.CheckType { + // Setup default check if none given + return []*structs.CheckType{ + { + Name: "Connect Sidecar Listening", + // Default to localhost rather than agent/service public IP. The checks + // can always be overridden if a non-loopback IP is needed. + TCP: ipaddr.FormatAddressPort(localServiceAddress, port), + Interval: 10 * time.Second, + }, + { + Name: "Connect Sidecar Aliasing " + serviceID, + AliasService: serviceID, + }, + } } diff --git a/agent/sidecar_service_test.go b/agent/sidecar_service_test.go index 7ced9720a..cffe054c2 100644 --- a/agent/sidecar_service_test.go +++ b/agent/sidecar_service_test.go @@ -2,6 +2,7 @@ package agent import ( "fmt" + "github.com/hashicorp/consul/acl" "testing" "time" @@ -16,16 +17,13 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) { } tests := []struct { - name string - maxPort int - preRegister *structs.ServiceDefinition - sd *structs.ServiceDefinition - token string - autoPortsDisabled bool - wantNS *structs.NodeService - wantChecks []*structs.CheckType - wantToken string - wantErr string + name string + sd *structs.ServiceDefinition + token string + wantNS *structs.NodeService + wantChecks []*structs.CheckType + wantToken string + wantErr string }{ { name: "no sidecar", @@ -141,42 +139,6 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) { }, wantToken: "custom-token", }, - { - name: "no auto ports available", - // register another sidecar consuming our 1 and only allocated auto port. - preRegister: &structs.ServiceDefinition{ - Kind: structs.ServiceKindConnectProxy, - Name: "api-proxy-sidecar", - Port: 2222, // Consume the one available auto-port - Proxy: &structs.ConnectProxyConfig{ - DestinationServiceName: "api", - }, - }, - sd: &structs.ServiceDefinition{ - ID: "web1", - Name: "web", - Port: 1111, - Connect: &structs.ServiceConnect{ - SidecarService: &structs.ServiceDefinition{}, - }, - }, - token: "foo", - wantErr: "none left in the configured range [2222, 2222]", - }, - { - name: "auto ports disabled", - autoPortsDisabled: true, - sd: &structs.ServiceDefinition{ - ID: "web1", - Name: "web", - Port: 1111, - Connect: &structs.ServiceConnect{ - SidecarService: &structs.ServiceDefinition{}, - }, - }, - token: "foo", - wantErr: "auto-assignment disabled in config", - }, { name: "inherit tags and meta", sd: &structs.ServiceDefinition{ @@ -252,6 +214,64 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) { token: "foo", wantErr: "reserved for internal use", }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + hcl := ` + ports { + sidecar_min_port = 2222 + sidecar_max_port = 2222 + } + ` + a := StartTestAgent(t, TestAgent{Name: "jones", HCL: hcl}) + defer a.Shutdown() + + ns := tt.sd.NodeService() + err := ns.Validate() + require.NoError(t, err, "Invalid test case - NodeService must validate") + + gotNS, gotChecks, gotToken, err := a.sidecarServiceFromNodeService(ns, tt.token) + if tt.wantErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.wantErr) + return + } + + require.NoError(t, err) + require.Equal(t, tt.wantNS, gotNS) + require.Equal(t, tt.wantChecks, gotChecks) + require.Equal(t, tt.wantToken, gotToken) + }) + } +} + +func TestAgent_SidecarPortFromServiceIDLocked(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + tests := []struct { + name string + autoPortsDisabled bool + enterpriseMeta acl.EnterpriseMeta + maxPort int + port int + preRegister *structs.ServiceDefinition + serviceID string + wantPort int + wantErr string + }{ + { + name: "port pre-specified", + serviceID: "web1", + wantPort: 2222, + }, + { + name: "use auto ports", + serviceID: "web1", + port: 1111, + wantPort: 1111, + }, { name: "re-registering same sidecar with no port should pick same one", // Allow multiple ports to be sure we get the right one @@ -269,42 +289,27 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) { LocalServicePort: 1111, }, }, - // Register same again but with different service port - sd: &structs.ServiceDefinition{ - ID: "web1", - Name: "web", - Port: 1112, - Connect: &structs.ServiceConnect{ - SidecarService: &structs.ServiceDefinition{}, + // Register same again + serviceID: "web1-sidecar-proxy", + wantPort: 2222, // Should claim the same port as before + }, + { + name: "all auto ports already taken", + // register another sidecar consuming our 1 and only allocated auto port. + preRegister: &structs.ServiceDefinition{ + Kind: structs.ServiceKindConnectProxy, + Name: "api-proxy-sidecar", + Port: 2222, // Consume the one available auto-port + Proxy: &structs.ConnectProxyConfig{ + DestinationServiceName: "api", }, }, - token: "foo", - wantNS: &structs.NodeService{ - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - Kind: structs.ServiceKindConnectProxy, - ID: "web1-sidecar-proxy", - Service: "web-sidecar-proxy", - Port: 2222, // Should claim the same port as before - LocallyRegisteredAsSidecar: true, - Proxy: structs.ConnectProxyConfig{ - DestinationServiceName: "web", - DestinationServiceID: "web1", - LocalServiceAddress: "127.0.0.1", - LocalServicePort: 1112, - }, - }, - wantChecks: []*structs.CheckType{ - { - Name: "Connect Sidecar Listening", - TCP: "127.0.0.1:2222", - Interval: 10 * time.Second, - }, - { - Name: "Connect Sidecar Aliasing web1", - AliasService: "web1", - }, - }, - wantToken: "foo", + wantErr: "none left in the configured range [2222, 2222]", + }, + { + name: "auto ports disabled", + autoPortsDisabled: true, + wantErr: "auto-assignment disabled in config", }, } for _, tt := range tests { @@ -329,7 +334,6 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) { } ` } - a := StartTestAgent(t, TestAgent{Name: "jones", HCL: hcl}) defer a.Shutdown() @@ -338,11 +342,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) { require.NoError(t, err) } - ns := tt.sd.NodeService() - err := ns.ValidateForAgent() - require.NoError(t, err, "Invalid test case - NodeService must validate") - - gotNS, gotChecks, gotToken, err := a.sidecarServiceFromNodeService(ns, tt.token) + gotPort, err := a.sidecarPortFromServiceIDLocked(tt.port, structs.ServiceID{ID: tt.serviceID, EnterpriseMeta: tt.enterpriseMeta}) if tt.wantErr != "" { require.Error(t, err) require.Contains(t, err.Error(), tt.wantErr) @@ -350,9 +350,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) { } require.NoError(t, err) - require.Equal(t, tt.wantNS, gotNS) - require.Equal(t, tt.wantChecks, gotChecks) - require.Equal(t, tt.wantToken, gotToken) + require.Equal(t, tt.wantPort, gotPort) }) } } From f29758fff8e15e9a775283a7eb00111f432afda4 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Fri, 5 Aug 2022 10:45:24 -0700 Subject: [PATCH 003/104] Allow uppercase in proxy launch -sidecar-for arg Previously, when launching a sidecar proxy with one of the following commands: - consul connect envoy -sidecar-for=... - consul connect proxy -sidecar-for=... ... the -sidecar-for argument could only contain lowercase letters, even if the service was registered with some uppercase letters. Now, the -sidecar-for argument is treated as case-insensitive. --- .changelog/14034.txt | 3 +++ command/connect/proxy/proxy.go | 2 +- command/connect/proxy/proxy_test.go | 11 +++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 .changelog/14034.txt diff --git a/.changelog/14034.txt b/.changelog/14034.txt new file mode 100644 index 000000000..216c5406a --- /dev/null +++ b/.changelog/14034.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: When launching a sidecar proxy with `consul connect envoy` or `consul connect proxy`, the `-sidecar-for` service ID argument is now treated as case-insensitive. +``` diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index d2d0b90cf..a0477a6a1 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -232,7 +232,7 @@ func LookupProxyIDForSidecar(client *api.Client, sidecarFor string) (string, err var proxyIDs []string for _, svc := range svcs { if svc.Kind == api.ServiceKindConnectProxy && svc.Proxy != nil && - strings.ToLower(svc.Proxy.DestinationServiceID) == sidecarFor { + strings.EqualFold(svc.Proxy.DestinationServiceID, sidecarFor) { proxyIDs = append(proxyIDs, svc.ID) } } diff --git a/command/connect/proxy/proxy_test.go b/command/connect/proxy/proxy_test.go index ae7b1cdfb..28d5a9da2 100644 --- a/command/connect/proxy/proxy_test.go +++ b/command/connect/proxy/proxy_test.go @@ -110,6 +110,17 @@ func TestCommandConfigWatcher(t *testing.T) { require.Equal(t, 9999, cfg.PublicListener.BindPort) }, }, + + { + Name: "-sidecar-for, one sidecar case-insensitive", + Flags: []string{ + "-sidecar-for", "One-SideCar", + }, + Test: func(t *testing.T, cfg *proxy.Config) { + // Sanity check we got the right instance. + require.Equal(t, 9999, cfg.PublicListener.BindPort) + }, + }, } for _, tc := range cases { From d35dd36cbf8d720567d7771e4765b742ad007466 Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Tue, 9 Aug 2022 12:54:49 +0100 Subject: [PATCH 004/104] cli: update agent log preamble to reflect per-listener TLS config --- command/agent/agent.go | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/command/agent/agent.go b/command/agent/agent.go index a69e63071..cc08213e1 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -201,24 +201,29 @@ func (c *cmd) run(args []string) int { if config.ServerMode { segment = "" } - ui.Info(fmt.Sprintf(" Version: '%s'", c.versionHuman)) + ui.Info(fmt.Sprintf(" Version: '%s'", c.versionHuman)) if strings.Contains(c.versionHuman, "dev") { - ui.Info(fmt.Sprintf(" Revision: '%s'", c.revision)) + ui.Info(fmt.Sprintf(" Revision: '%s'", c.revision)) } - ui.Info(fmt.Sprintf(" Build Date: '%s'", c.buildDate)) - ui.Info(fmt.Sprintf(" Node ID: '%s'", config.NodeID)) - ui.Info(fmt.Sprintf(" Node name: '%s'", config.NodeName)) + ui.Info(fmt.Sprintf(" Build Date: '%s'", c.buildDate)) + ui.Info(fmt.Sprintf(" Node ID: '%s'", config.NodeID)) + ui.Info(fmt.Sprintf(" Node name: '%s'", config.NodeName)) if ap := config.PartitionOrEmpty(); ap != "" { - ui.Info(fmt.Sprintf(" Partition: '%s'", ap)) + ui.Info(fmt.Sprintf(" Partition: '%s'", ap)) } - ui.Info(fmt.Sprintf(" Datacenter: '%s' (Segment: '%s')", config.Datacenter, segment)) - ui.Info(fmt.Sprintf(" Server: %v (Bootstrap: %v)", config.ServerMode, config.Bootstrap)) - ui.Info(fmt.Sprintf(" Client Addr: %v (HTTP: %d, HTTPS: %d, gRPC: %d, DNS: %d)", config.ClientAddrs, + ui.Info(fmt.Sprintf(" Datacenter: '%s' (Segment: '%s')", config.Datacenter, segment)) + ui.Info(fmt.Sprintf(" Server: %v (Bootstrap: %v)", config.ServerMode, config.Bootstrap)) + ui.Info(fmt.Sprintf(" Client Addr: %v (HTTP: %d, HTTPS: %d, gRPC: %d, DNS: %d)", config.ClientAddrs, config.HTTPPort, config.HTTPSPort, config.GRPCPort, config.DNSPort)) - ui.Info(fmt.Sprintf(" Cluster Addr: %v (LAN: %d, WAN: %d)", config.AdvertiseAddrLAN, + ui.Info(fmt.Sprintf(" Cluster Addr: %v (LAN: %d, WAN: %d)", config.AdvertiseAddrLAN, config.SerfPortLAN, config.SerfPortWAN)) - ui.Info(fmt.Sprintf(" Encrypt: Gossip: %v, TLS-Outgoing: %v, TLS-Incoming: %v, Auto-Encrypt-TLS: %t", - config.EncryptKey != "", config.TLS.InternalRPC.VerifyOutgoing, config.TLS.InternalRPC.VerifyIncoming, config.AutoEncryptTLS || config.AutoEncryptAllowTLS)) + ui.Info(fmt.Sprintf("Gossip Encryption: %t", config.EncryptKey != "")) + ui.Info(fmt.Sprintf(" Auto-Encrypt-TLS: %t", config.AutoEncryptTLS || config.AutoEncryptAllowTLS)) + ui.Info(fmt.Sprintf(" HTTPS TLS: Verify Incoming: %t, Verify Outgoing: %t, Min Version: %s", + config.TLS.HTTPS.VerifyIncoming, config.TLS.HTTPS.VerifyOutgoing, config.TLS.HTTPS.TLSMinVersion)) + ui.Info(fmt.Sprintf(" gRPC TLS: Verify Incoming: %t, Min Version: %s", config.TLS.GRPC.VerifyIncoming, config.TLS.GRPC.TLSMinVersion)) + ui.Info(fmt.Sprintf(" Internal RPC TLS: Verify Incoming: %t, Verify Outgoing: %t (Verify Hostname: %t), Min Version: %s", + config.TLS.InternalRPC.VerifyIncoming, config.TLS.InternalRPC.VerifyOutgoing, config.TLS.InternalRPC.VerifyServerHostname, config.TLS.InternalRPC.TLSMinVersion)) // Enable log streaming ui.Output("") ui.Output("Log data will now stream in as it occurs:\n") From 174b9e23f4348550b48048e4cde29cc5d03657e5 Mon Sep 17 00:00:00 2001 From: boruszak Date: Wed, 10 Aug 2022 09:48:18 -0500 Subject: [PATCH 005/104] Not available on HCP Consul update --- .../docs/connect/cluster-peering/create-manage-peering.mdx | 2 +- website/content/docs/connect/cluster-peering/index.mdx | 2 +- website/content/docs/connect/cluster-peering/k8s.mdx | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/website/content/docs/connect/cluster-peering/create-manage-peering.mdx b/website/content/docs/connect/cluster-peering/create-manage-peering.mdx index 566286883..009c60f40 100644 --- a/website/content/docs/connect/cluster-peering/create-manage-peering.mdx +++ b/website/content/docs/connect/cluster-peering/create-manage-peering.mdx @@ -7,7 +7,7 @@ description: >- # Create and Manage Peering Connections -~> **Cluster peering is currently in beta:** Functionality associated with cluster peering is subject to change. You should never use the beta release in secure environments or production scenarios. Features in beta may have performance issues, scaling issues, and limited support. +~> **Cluster peering is currently in beta:** Functionality associated with cluster peering is subject to change. You should never use the beta release in secure environments or production scenarios. Features in beta may have performance issues, scaling issues, and limited support.

Cluster peering is not currently available in the HCP Consul offering. A peering token enables cluster peering between different datacenters. Once you generate a peering token, you can use it to establish a connection between clusters. Then you can export services and create intentions so that peered clusters can call those services. diff --git a/website/content/docs/connect/cluster-peering/index.mdx b/website/content/docs/connect/cluster-peering/index.mdx index 317fe320a..8c4be1ce4 100644 --- a/website/content/docs/connect/cluster-peering/index.mdx +++ b/website/content/docs/connect/cluster-peering/index.mdx @@ -7,7 +7,7 @@ description: >- # What is Cluster Peering? -~> **Cluster peering is currently in beta**: Functionality associated with cluster peering is subject to change. You should never use the beta release in secure environments or production scenarios. Features in beta may have performance issues, scaling issues, and limited support. +~> **Cluster peering is currently in beta**: Functionality associated with cluster peering is subject to change. You should never use the beta release in secure environments or production scenarios. Features in beta may have performance issues, scaling issues, and limited support.

Cluster peering is not currently available in the HCP Consul offering. You can create peering connections between two or more independent clusters so that services deployed to different partitions or datacenters can communicate. diff --git a/website/content/docs/connect/cluster-peering/k8s.mdx b/website/content/docs/connect/cluster-peering/k8s.mdx index 69b34e2e5..77ac6d38a 100644 --- a/website/content/docs/connect/cluster-peering/k8s.mdx +++ b/website/content/docs/connect/cluster-peering/k8s.mdx @@ -9,7 +9,7 @@ description: >- ~> **Cluster peering is currently in beta:** Functionality associated with cluster peering is subject to change. You should never use the beta release in secure environments or production scenarios. Features in -beta may have performance issues, scaling issues, and limited support. +beta may have performance issues, scaling issues, and limited support.

Cluster peering is not currently available in the HCP Consul offering. To establish a cluster peering connection on Kubernetes, you need to enable the feature in the Helm chart and create custom resource definitions (CRDs) for each side of the peering. From 647f9787f8b4fba7d4ff58dbc27a1d8517bc53fa Mon Sep 17 00:00:00 2001 From: cskh Date: Wed, 10 Aug 2022 10:53:57 -0400 Subject: [PATCH 006/104] fix: shadowed err in retryJoin() (#14112) - err value will be used later to surface the error message if r.join() returns any err. --- agent/retry_join.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/agent/retry_join.go b/agent/retry_join.go index 8cfb00e22..b807697e8 100644 --- a/agent/retry_join.go +++ b/agent/retry_join.go @@ -226,7 +226,8 @@ func (r *retryJoiner) retryJoin() error { for { addrs := retryJoinAddrs(disco, r.variant, r.cluster, r.addrs, r.logger) if len(addrs) > 0 { - n, err := r.join(addrs) + n := 0 + n, err = r.join(addrs) if err == nil { if r.variant == retryJoinMeshGatewayVariant { r.logger.Info("Refreshing mesh gateways completed") From 69f995574cf334a28d2e15d1d998a82ba2c7fdce Mon Sep 17 00:00:00 2001 From: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Date: Wed, 10 Aug 2022 10:21:20 -0500 Subject: [PATCH 007/104] Update website/content/docs/connect/cluster-peering/index.mdx Co-authored-by: Tu Nguyen --- website/content/docs/connect/cluster-peering/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/connect/cluster-peering/index.mdx b/website/content/docs/connect/cluster-peering/index.mdx index 8c4be1ce4..c4b8c7a4e 100644 --- a/website/content/docs/connect/cluster-peering/index.mdx +++ b/website/content/docs/connect/cluster-peering/index.mdx @@ -7,7 +7,7 @@ description: >- # What is Cluster Peering? -~> **Cluster peering is currently in beta**: Functionality associated with cluster peering is subject to change. You should never use the beta release in secure environments or production scenarios. Features in beta may have performance issues, scaling issues, and limited support.

Cluster peering is not currently available in the HCP Consul offering. +~> **Cluster peering is currently in beta**: Functionality associated with cluster peering is subject to change. You should never use the beta release in secure environments or production scenarios. Features in beta may have performance issues, scaling issues, and limited support.

Cluster peering is not currently available in the HCP Consul offering. You can create peering connections between two or more independent clusters so that services deployed to different partitions or datacenters can communicate. From f44f1881edcfa1a5e7841ee1e2d8f875d798cdce Mon Sep 17 00:00:00 2001 From: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Date: Wed, 10 Aug 2022 10:21:26 -0500 Subject: [PATCH 008/104] Update website/content/docs/connect/cluster-peering/k8s.mdx Co-authored-by: Tu Nguyen --- website/content/docs/connect/cluster-peering/k8s.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/connect/cluster-peering/k8s.mdx b/website/content/docs/connect/cluster-peering/k8s.mdx index 77ac6d38a..8b178ad9c 100644 --- a/website/content/docs/connect/cluster-peering/k8s.mdx +++ b/website/content/docs/connect/cluster-peering/k8s.mdx @@ -9,7 +9,7 @@ description: >- ~> **Cluster peering is currently in beta:** Functionality associated with cluster peering is subject to change. You should never use the beta release in secure environments or production scenarios. Features in -beta may have performance issues, scaling issues, and limited support.

Cluster peering is not currently available in the HCP Consul offering. +beta may have performance issues, scaling issues, and limited support.

Cluster peering is not currently available in the HCP Consul offering. To establish a cluster peering connection on Kubernetes, you need to enable the feature in the Helm chart and create custom resource definitions (CRDs) for each side of the peering. From d42a04243609872fa436ee8c47b9e1a6db75041e Mon Sep 17 00:00:00 2001 From: Ashwin Venkatesh Date: Wed, 10 Aug 2022 13:14:36 -0400 Subject: [PATCH 009/104] Add docs to recreate peering token. --- website/content/docs/connect/cluster-peering/k8s.mdx | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/website/content/docs/connect/cluster-peering/k8s.mdx b/website/content/docs/connect/cluster-peering/k8s.mdx index 8b178ad9c..a3716d3d7 100644 --- a/website/content/docs/connect/cluster-peering/k8s.mdx +++ b/website/content/docs/connect/cluster-peering/k8s.mdx @@ -237,3 +237,13 @@ To confirm that you deleted your peering connection, in `cluster-01`, query the ```shell-session $ curl "localhost:8500/v1/health/connect/backend?peer=cluster-02" ``` + +## Recreate/Reset a peering connection + +To recreate or reset the peering connection, a new peering token needs to be generated on the cluster where the `PeeringAcceptor` was created, which in this case is `cluster-01`. + +This can be performed by creating/updating the annotation `consul.hashicorp.com/peering-version` on the `PeeringAcceptor`. If the annotation already exists, update its value to a version that is higher. + +Once the above is done, repeat the steps in the peering process from saving your peering token so that you can export it to the other cluster. This will re-establish peering with the updated token. + +-> **NOTE:** A new peering token is only generated upon manually setting and updating the value of the annotation `consul.hashicorp.com/peering-version`. Creating a new token will cause the previous token to expire. From 576680ddf205a1fd07dd36193a29ffb5b53f0f9c Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Wed, 10 Aug 2022 20:02:43 +0200 Subject: [PATCH 010/104] Use actual intention for permission check intentions edit (#14113) --- ui/packages/consul-ui/app/templates/dc/intentions/edit.hbs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/packages/consul-ui/app/templates/dc/intentions/edit.hbs b/ui/packages/consul-ui/app/templates/dc/intentions/edit.hbs index 2139fd731..2affe93b2 100644 --- a/ui/packages/consul-ui/app/templates/dc/intentions/edit.hbs +++ b/ui/packages/consul-ui/app/templates/dc/intentions/edit.hbs @@ -22,7 +22,7 @@ as |route|> {{#let loader.data - (not (can "write intention" item=item)) + (not (can "write intention" item=loader.data)) as |item readOnly|}} From 4e740bf82471ba0db9c8378f858e1f99ddf515d0 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Wed, 10 Aug 2022 20:04:30 +0200 Subject: [PATCH 011/104] Don't surface partitions in service search sources (#14078) --- .../consul/service/search-bar/index.hbs | 22 +------------------ 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs b/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs index f74926e25..a73d0b6ca 100644 --- a/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs @@ -139,27 +139,8 @@ as |key value|}} - {{#let components.Optgroup components.Option as |Optgroup Option|}} -{{#let - (reject-by 'Partition' @partition @partitions) -as |nonDefaultPartitions|}} -{{#if (gt nonDefaultPartitions.length 0)}} - - {{#each @partitions as |partition|}} - - {{/each}} - -{{/if}} -{{/let}} - + {{#let components.Option as |Option|}} {{#if (gt @sources.length 0)}} - {{#each @sources as |source|}} - {{/if}} {{/let}} From 97eec3f2b9cc5d248b17c2029b3c9641d4e05675 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Wed, 10 Aug 2022 20:07:59 +0200 Subject: [PATCH 012/104] ui: Improve display peer info in service list (#14111) * Include nspace when surfacing peer in bucket-list Whenever we display a peer and we are not on OSS we will surface the namespace as well. The rest of the ui logic of the bucket list has not changed. * Display bucket-list after instance-count service-list --- .../app/components/consul/bucket/list/index.js | 14 ++++++-------- .../app/components/consul/service/list/index.hbs | 10 +++++----- .../components/consul/bucket/list-test.js | 14 +++++++------- 3 files changed, 18 insertions(+), 20 deletions(-) diff --git a/ui/packages/consul-ui/app/components/consul/bucket/list/index.js b/ui/packages/consul-ui/app/components/consul/bucket/list/index.js index c874ac91b..c1aa8afe9 100644 --- a/ui/packages/consul-ui/app/components/consul/bucket/list/index.js +++ b/ui/packages/consul-ui/app/components/consul/bucket/list/index.js @@ -58,7 +58,7 @@ export default class ConsulBucketList extends Component { get namespacePart() { const { item, nspace } = this.args; - const { abilities, partitionPart } = this; + const { abilities, partitionPart, peerPart } = this; const nspaceItem = { type: 'nspace', @@ -71,15 +71,13 @@ export default class ConsulBucketList extends Component { return [nspaceItem]; } + if (peerPart.length && abilities.can('use nspaces')) { + return [nspaceItem]; + } + if (nspace && abilities.can('use nspaces')) { if (item.Namespace !== nspace) { - return [ - { - type: 'nspace', - label: 'Namespace', - item: item.Namespace, - }, - ]; + return [nspaceItem]; } } diff --git a/ui/packages/consul-ui/app/components/consul/service/list/index.hbs b/ui/packages/consul-ui/app/components/consul/service/list/index.hbs index 551cf027c..a97184b98 100644 --- a/ui/packages/consul-ui/app/components/consul/service/list/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/service/list/index.hbs @@ -56,6 +56,11 @@ {{format-number item.InstanceCount}} {{pluralize item.InstanceCount 'instance' without-count=true}} {{/if}} + {{#if (eq item.Kind 'terminating-gateway')}} {{format-number item.GatewayConfig.AssociatedServiceCount}} {{pluralize item.GatewayConfig.AssociatedServiceCount 'linked service' without-count=true}} @@ -87,11 +92,6 @@ {{/if}} {{/if}} - \ No newline at end of file diff --git a/ui/packages/consul-ui/tests/integration/components/consul/bucket/list-test.js b/ui/packages/consul-ui/tests/integration/components/consul/bucket/list-test.js index 15d6b81ce..063f2b086 100644 --- a/ui/packages/consul-ui/tests/integration/components/consul/bucket/list-test.js +++ b/ui/packages/consul-ui/tests/integration/components/consul/bucket/list-test.js @@ -224,31 +224,31 @@ module('Integration | Component | consul bucket list', function(hooks) { assert.dom('[data-test-bucket-item="partition"]').doesNotExist('partition is not displayed'); }); - test('it displays a peer and no nspace and no service when item.namespace and nspace match', async function(assert) { + test('it displays a peer and nspace when item.namespace and nspace match', async function(assert) { const PEER_NAME = 'Tomster'; const NAMESPACE_NAME = 'Mascot'; - const SERVICE_NAME = 'Ember.js'; this.set('peerName', PEER_NAME); this.set('namespace', NAMESPACE_NAME); - this.set('service', SERVICE_NAME); await render(hbs` `); assert.dom('[data-test-bucket-item="peer"]').hasText(PEER_NAME, 'Peer is displayed'); - assert.dom('[data-test-bucket-item="nspace"]').doesNotExist('namespace is not displayed'); - assert.dom('[data-test-bucket-item="service"]').doesNotExist('service is not displayed'); + assert + .dom('[data-test-bucket-item="nspace"]') + .hasText( + NAMESPACE_NAME, + 'namespace is displayed when peer is displayed and we are not on OSS (i.e. cannot use nspaces)' + ); assert.dom('[data-test-bucket-item="partition"]').doesNotExist('partition is not displayed'); }); }); From cd28cfc8926de13f4dc2f696a1dfbf05a21291e7 Mon Sep 17 00:00:00 2001 From: Ashwin Venkatesh Date: Wed, 10 Aug 2022 14:25:12 -0400 Subject: [PATCH 013/104] Update website/content/docs/connect/cluster-peering/k8s.mdx Co-authored-by: Tu Nguyen --- .../docs/connect/cluster-peering/k8s.mdx | 29 +++++++++++++++---- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/website/content/docs/connect/cluster-peering/k8s.mdx b/website/content/docs/connect/cluster-peering/k8s.mdx index a3716d3d7..b23bf74b1 100644 --- a/website/content/docs/connect/cluster-peering/k8s.mdx +++ b/website/content/docs/connect/cluster-peering/k8s.mdx @@ -238,12 +238,31 @@ To confirm that you deleted your peering connection, in `cluster-01`, query the $ curl "localhost:8500/v1/health/connect/backend?peer=cluster-02" ``` -## Recreate/Reset a peering connection +## Recreate or reset a peering connection -To recreate or reset the peering connection, a new peering token needs to be generated on the cluster where the `PeeringAcceptor` was created, which in this case is `cluster-01`. +To recreate or reset the peering connection, you need to generate a new peering token on the cluster where you created the `PeeringAcceptor` (in this example, `cluster-01`). -This can be performed by creating/updating the annotation `consul.hashicorp.com/peering-version` on the `PeeringAcceptor`. If the annotation already exists, update its value to a version that is higher. +1. You can do this by creating or updating the annotation `consul.hashicorp.com/peering-version` on the `PeeringAcceptor`. If the annotation already exists, update its value to a version that is higher. -Once the above is done, repeat the steps in the peering process from saving your peering token so that you can export it to the other cluster. This will re-establish peering with the updated token. + --> **NOTE:** A new peering token is only generated upon manually setting and updating the value of the annotation `consul.hashicorp.com/peering-version`. Creating a new token will cause the previous token to expire. + ```yaml + apiVersion: consul.hashicorp.com/v1alpha1 + kind: PeeringAcceptor + metadata: + name: cluster-02 + annotations: + consul.hashicorp.com/peering-version: 1 ## The peering version you want to set. + spec: + peer: + secret: + name: "peering-token" + key: "data" + backend: "kubernetes" + ``` + + + +1. Once you have done this, repeat the steps in the peering process. This includes saving your peering token so that you can export it to the other cluster. This will re-establish peering with the updated token. + +~> **Note:** A new peering token is only generated upon manually setting and updating the value of the annotation `consul.hashicorp.com/peering-version`. Creating a new token will cause the previous token to expire. From d12cbcc3c146622c853ff51b292c8400c7c682a3 Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Wed, 10 Aug 2022 11:57:09 -0700 Subject: [PATCH 014/104] docs: Update supported Envoy versions (#14130) --- website/content/docs/connect/proxies/envoy.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/connect/proxies/envoy.mdx b/website/content/docs/connect/proxies/envoy.mdx index ee0b1b165..526d642bc 100644 --- a/website/content/docs/connect/proxies/envoy.mdx +++ b/website/content/docs/connect/proxies/envoy.mdx @@ -36,9 +36,9 @@ Consul supports **four major Envoy releases** at the beginning of each major Con | Consul Version | Compatible Envoy Versions | | ------------------- | -----------------------------------------------------------------------------------| +| 1.13.x | 1.23.0, 1.22.2, 1.21.4, 1.20.6 | | 1.12.x | 1.22.2, 1.21.3, 1.20.4, 1.19.5 | | 1.11.x | 1.20.2, 1.19.3, 1.18.6, 1.17.41 | -| 1.10.x | 1.18.6, 1.17.41, 1.16.51 , 1.15.51 | 1. Envoy 1.20.1 and earlier are vulnerable to [CVE-2022-21654](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21654) and [CVE-2022-21655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21655). Both CVEs were patched in Envoy versions 1.18.6, 1.19.3, and 1.20.2. Envoy 1.16.x and older releases are no longer supported (see [HCSEC-2022-07](https://discuss.hashicorp.com/t/hcsec-2022-07-consul-s-connect-service-mesh-affected-by-recent-envoy-security-releases/36332)). Consul 1.9.x clusters should be upgraded to 1.10.x and Envoy upgraded to the latest supported Envoy version for that release, 1.18.6. From 5c125cea240e851c4778eeece0d5b52d537c02d4 Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Wed, 10 Aug 2022 12:21:21 -0700 Subject: [PATCH 015/104] Sync changes from 1.13.0 release (#14104) --- CHANGELOG.md | 115 +++++++++++++++++++++++---------------------- version/version.go | 2 +- 2 files changed, 61 insertions(+), 56 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d77193026..0c4769834 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,63 @@ +## 1.13.0 (August 9, 2022) + +BREAKING CHANGES: + +* config-entry: Exporting a specific service name across all namespace is invalid. +* connect: Removes support for Envoy 1.19 [[GH-13807](https://github.com/hashicorp/consul/issues/13807)] +* telemetry: config flag `telemetry { disable_compat_1.9 = (true|false) }` has been removed. Before upgrading you should remove this flag from your config if the flag is being used. [[GH-13532](https://github.com/hashicorp/consul/issues/13532)] + +FEATURES: + +* **Cluster Peering (Beta)** This version adds a new model to federate Consul clusters for both service mesh and traditional service discovery. Cluster peering allows for service interconnectivity with looser coupling than the existing WAN federation. For more information refer to the [cluster peering](https://www.consul.io/docs/connect/cluster-peering) documentation. +* **Transparent proxying through terminating gateways** This version adds egress traffic control to destinations outside of Consul's catalog, such as APIs on the public internet. Transparent proxies can dial [destinations defined in service-defaults](https://www.consul.io/docs/connect/config-entries/service-defaults#destination) and have the traffic routed through terminating gateways. For more information refer to the [terminating gateway](https://www.consul.io/docs/connect/gateways/terminating-gateway#terminating-gateway-configuration) documentation. +* acl: It is now possible to login and logout using the gRPC API [[GH-12935](https://github.com/hashicorp/consul/issues/12935)] +* agent: Added information about build date alongside other version information for Consul. Extended /agent/self endpoint and `consul version` commands +to report this. Agent also reports build date in log on startup. [[GH-13357](https://github.com/hashicorp/consul/issues/13357)] +* ca: Leaf certificates can now be obtained via the gRPC API: `Sign` [[GH-12787](https://github.com/hashicorp/consul/issues/12787)] +* checks: add UDP health checks.. [[GH-12722](https://github.com/hashicorp/consul/issues/12722)] +* cli: A new flag for config delete to delete a config entry in a +valid config file, e.g., config delete -filename intention-allow.hcl [[GH-13677](https://github.com/hashicorp/consul/issues/13677)] +* connect: Adds a new `destination` field to the `service-default` config entry that allows routing egress traffic +through a terminating gateway in transparent proxy mode without modifying the catalog. [[GH-13613](https://github.com/hashicorp/consul/issues/13613)] +* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-12825](https://github.com/hashicorp/consul/issues/12825)] +* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-1717](https://github.com/hashicorp/consul/issues/1717)] +* grpc: New gRPC service and endpoint to return the list of supported consul dataplane features [[GH-12695](https://github.com/hashicorp/consul/issues/12695)] +* server: broadcast the public grpc port using lan serf and update the consul service in the catalog with the same data [[GH-13687](https://github.com/hashicorp/consul/issues/13687)] +* streaming: Added topic that can be used to consume updates about the list of services in a datacenter [[GH-13722](https://github.com/hashicorp/consul/issues/13722)] +* streaming: Added topics for `ingress-gateway`, `mesh`, `service-intentions` and `service-resolver` config entry events. [[GH-13658](https://github.com/hashicorp/consul/issues/13658)] + +IMPROVEMENTS: + +* api: `merge-central-config` query parameter support added to `/catalog/node-services/:node-name` API, to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13450](https://github.com/hashicorp/consul/issues/13450)] +* api: `merge-central-config` query parameter support added to `/catalog/node-services/:node-name` API, to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-2046](https://github.com/hashicorp/consul/issues/2046)] +* api: `merge-central-config` query parameter support added to some catalog and health endpoints to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13001](https://github.com/hashicorp/consul/issues/13001)] +* api: add the ability to specify a path prefix for when consul is behind a reverse proxy or API gateway [[GH-12914](https://github.com/hashicorp/consul/issues/12914)] +* catalog: Add per-node indexes to reduce watchset firing for unrelated nodes and services. [[GH-12399](https://github.com/hashicorp/consul/issues/12399)] +* connect: add validation to ensure connect native services have a port or socketpath specified on catalog registration. +This was the only missing piece to ensure all mesh services are validated for a port (or socketpath) specification on catalog registration. [[GH-12881](https://github.com/hashicorp/consul/issues/12881)] +* ui: Add new CopyableCode component and use it in certain pre-existing areas [[GH-13686](https://github.com/hashicorp/consul/issues/13686)] +* acl: Clarify node/service identities must be lowercase [[GH-12807](https://github.com/hashicorp/consul/issues/12807)] +* command: Add support for enabling TLS in the Envoy Prometheus endpoint via the `consul connect envoy` command. +Adds the `-prometheus-ca-file`, `-prometheus-ca-path`, `-prometheus-cert-file` and `-prometheus-key-file` flags. [[GH-13481](https://github.com/hashicorp/consul/issues/13481)] +* connect: Add Envoy 1.23.0 to support matrix [[GH-13807](https://github.com/hashicorp/consul/issues/13807)] +* connect: Added a `max_inbound_connections` setting to service-defaults for limiting the number of concurrent inbound connections to each service instance. [[GH-13143](https://github.com/hashicorp/consul/issues/13143)] +* grpc: Add a new ServerDiscovery.WatchServers gRPC endpoint for being notified when the set of ready servers has changed. [[GH-12819](https://github.com/hashicorp/consul/issues/12819)] +* telemetry: Added `consul.raft.thread.main.saturation` and `consul.raft.thread.fsm.saturation` metrics to measure approximate saturation of the Raft goroutines [[GH-12865](https://github.com/hashicorp/consul/issues/12865)] +* ui: removed external dependencies for serving UI assets in favor of Go's native embed capabilities [[GH-10996](https://github.com/hashicorp/consul/issues/10996)] +* ui: upgrade ember-composable-helpers to v5.x [[GH-13394](https://github.com/hashicorp/consul/issues/13394)] + +BUG FIXES: + +* acl: Fixed a bug where the ACL down policy wasn't being applied on remote errors from the primary datacenter. [[GH-12885](https://github.com/hashicorp/consul/issues/12885)] +* cli: when `acl token read` is used with the `-self` and `-expanded` flags, return an error instead of panicking [[GH-13787](https://github.com/hashicorp/consul/issues/13787)] +* connect: Fixed a goroutine/memory leak that would occur when using the ingress gateway. [[GH-13847](https://github.com/hashicorp/consul/issues/13847)] +* connect: Ingress gateways with a wildcard service entry should no longer pick up non-connect services as upstreams. +connect: Terminating gateways with a wildcard service entry should no longer pick up connect services as upstreams. [[GH-13958](https://github.com/hashicorp/consul/issues/13958)] +* proxycfg: Fixed a minor bug that would cause configuring a terminating gateway to watch too many service resolvers and waste resources doing filtering. [[GH-13012](https://github.com/hashicorp/consul/issues/13012)] +* raft: upgrade to v1.3.8 which fixes a bug where non cluster member can still be able to participate in an election. [[GH-12844](https://github.com/hashicorp/consul/issues/12844)] +* serf: upgrade serf to v0.9.8 which fixes a bug that crashes Consul when serf keyrings are listed [[GH-13062](https://github.com/hashicorp/consul/issues/13062)] +* ui: Fixes an issue where client side validation errors were not showing in certain areas [[GH-14021](https://github.com/hashicorp/consul/issues/14021)] + ## 1.12.3 (July 13, 2022) IMPROVEMENTS: @@ -36,61 +96,6 @@ BUG FIXES: * agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13264](https://github.com/hashicorp/consul/issues/13264)] * fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)] -## 1.13.0-alpha2 (June 21, 2022) - -IMPROVEMENTS: - -* api: `merge-central-config` query parameter support added to `/catalog/node-services/:node-name` API, to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13450](https://github.com/hashicorp/consul/issues/13450)] -* connect: Update Envoy support matrix to latest patch releases (1.22.2, 1.21.3, 1.20.4, 1.19.5) [[GH-13431](https://github.com/hashicorp/consul/issues/13431)] - -BUG FIXES: - -* ui: Fix incorrect text on certain page empty states [[GH-13409](https://github.com/hashicorp/consul/issues/13409)] - -## 1.13.0-alpha1 (June 15, 2022) - -BREAKING CHANGES: - -* config-entry: Exporting a specific service name across all namespace is invalid. - -FEATURES: - -* acl: It is now possible to login and logout using the gRPC API [[GH-12935](https://github.com/hashicorp/consul/issues/12935)] -* agent: Added information about build date alongside other version information for Consul. Extended /agent/self endpoint and `consul version` commands -to report this. Agent also reports build date in log on startup. [[GH-13357](https://github.com/hashicorp/consul/issues/13357)] -* ca: Leaf certificates can now be obtained via the gRPC API: `Sign` [[GH-12787](https://github.com/hashicorp/consul/issues/12787)] -* checks: add UDP health checks.. [[GH-12722](https://github.com/hashicorp/consul/issues/12722)] -* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-12825](https://github.com/hashicorp/consul/issues/12825)] -* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-1717](https://github.com/hashicorp/consul/issues/1717)] -* grpc: New gRPC service and endpoint to return the list of supported consul dataplane features [[GH-12695](https://github.com/hashicorp/consul/issues/12695)] - -IMPROVEMENTS: - -* api: `merge-central-config` query parameter support added to some catalog and health endpoints to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13001](https://github.com/hashicorp/consul/issues/13001)] -* api: add the ability to specify a path prefix for when consul is behind a reverse proxy or API gateway [[GH-12914](https://github.com/hashicorp/consul/issues/12914)] -* connect: add validation to ensure connect native services have a port or socketpath specified on catalog registration. -This was the only missing piece to ensure all mesh services are validated for a port (or socketpath) specification on catalog registration. [[GH-12881](https://github.com/hashicorp/consul/issues/12881)] -* Support Vault namespaces in Connect CA by adding RootPKINamespace and -IntermediatePKINamespace fields to the config. [[GH-12904](https://github.com/hashicorp/consul/issues/12904)] -* acl: Clarify node/service identities must be lowercase [[GH-12807](https://github.com/hashicorp/consul/issues/12807)] -* connect: Added a `max_inbound_connections` setting to service-defaults for limiting the number of concurrent inbound connections to each service instance. [[GH-13143](https://github.com/hashicorp/consul/issues/13143)] -* dns: Added support for specifying admin partition in node lookups. [[GH-13421](https://github.com/hashicorp/consul/issues/13421)] -* grpc: Add a new ServerDiscovery.WatchServers gRPC endpoint for being notified when the set of ready servers has changed. [[GH-12819](https://github.com/hashicorp/consul/issues/12819)] -* telemetry: Added `consul.raft.thread.main.saturation` and `consul.raft.thread.fsm.saturation` metrics to measure approximate saturation of the Raft goroutines [[GH-12865](https://github.com/hashicorp/consul/issues/12865)] -* telemetry: Added a `consul.server.isLeader` metric to track if a server is a leader or not. [[GH-13304](https://github.com/hashicorp/consul/issues/13304)] -* ui: removed external dependencies for serving UI assets in favor of Go's native embed capabilities [[GH-10996](https://github.com/hashicorp/consul/issues/10996)] -* ui: upgrade ember-composable-helpers to v5.x [[GH-13394](https://github.com/hashicorp/consul/issues/13394)] - -BUG FIXES: - -* acl: Fixed a bug where the ACL down policy wasn't being applied on remote errors from the primary datacenter. [[GH-12885](https://github.com/hashicorp/consul/issues/12885)] -* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13256](https://github.com/hashicorp/consul/issues/13256)] -* deps: Update go-grpc/grpc, resolving connection memory leak [[GH-13051](https://github.com/hashicorp/consul/issues/13051)] -* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)] -* proxycfg: Fixed a minor bug that would cause configuring a terminating gateway to watch too many service resolvers and waste resources doing filtering. [[GH-13012](https://github.com/hashicorp/consul/issues/13012)] -* raft: upgrade to v1.3.8 which fixes a bug where non cluster member can still be able to participate in an election. [[GH-12844](https://github.com/hashicorp/consul/issues/12844)] -* serf: upgrade serf to v0.9.8 which fixes a bug that crashes Consul when serf keyrings are listed [[GH-13062](https://github.com/hashicorp/consul/issues/13062)] - ## 1.12.2 (June 3, 2022) BUG FIXES: diff --git a/version/version.go b/version/version.go index edcdca85b..8930a432a 100644 --- a/version/version.go +++ b/version/version.go @@ -14,7 +14,7 @@ var ( // // Version must conform to the format expected by github.com/hashicorp/go-version // for tests to work. - Version = "1.13.0" + Version = "1.14.0" // https://semver.org/#spec-item-10 VersionMetadata = "" From 8ca0a872ede6fc46a6048a4ee3d49e4d26fb7a03 Mon Sep 17 00:00:00 2001 From: "A.J. Sanon" <47250909+sanon-dev@users.noreply.github.com> Date: Wed, 10 Aug 2022 16:17:56 -0400 Subject: [PATCH 016/104] Add Consul ECS v0.5 release notes (#14010) --- .../docs/release-notes/consul-ecs/v0_4_x.mdx | 6 ++-- .../docs/release-notes/consul-ecs/v0_5_x.mdx | 30 +++++++++++++++++++ website/data/docs-nav-data.json | 4 +++ 3 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 website/content/docs/release-notes/consul-ecs/v0_5_x.mdx diff --git a/website/content/docs/release-notes/consul-ecs/v0_4_x.mdx b/website/content/docs/release-notes/consul-ecs/v0_4_x.mdx index 18e6028a9..5ada94722 100644 --- a/website/content/docs/release-notes/consul-ecs/v0_4_x.mdx +++ b/website/content/docs/release-notes/consul-ecs/v0_4_x.mdx @@ -5,7 +5,7 @@ description: >- Consul ECS release notes for version 0.4.x --- -# Consul ECS 0.4.0 +# Consul ECS 0.4.x ## Release Highlights @@ -23,6 +23,6 @@ The changelogs for this major release version and any maintenance versions are l -> **Note**: These links will take you to the changelogs on the GitHub website. -- [0.4.0](https://github.com/hashicorp/consul-ecs/releases/tag/v0.4.0) - - [0.4.1](https://github.com/hashicorp/consul-ecs/releases/tag/v0.4.1) + +- [0.4.0](https://github.com/hashicorp/consul-ecs/releases/tag/v0.4.0) diff --git a/website/content/docs/release-notes/consul-ecs/v0_5_x.mdx b/website/content/docs/release-notes/consul-ecs/v0_5_x.mdx new file mode 100644 index 000000000..54b29b3b3 --- /dev/null +++ b/website/content/docs/release-notes/consul-ecs/v0_5_x.mdx @@ -0,0 +1,30 @@ +--- +layout: docs +page_title: 0.5.x +description: >- + Consul ECS release notes for version 0.5.x +--- + +# Consul ECS 0.5.x + +## Release Highlights + +- **Audit Logging (Enterprise) :** Consul on ECS now captures authentication events and processes them with the HTTP API. Audit logging provides insight into access and usage patterns. Refer to [Audit Logging](/docs/ecs/enterprise#audit-logging) for usage information. + +- **AWS IAM Auth Method :** This feature provides support for Consul's AWS IAM auth method. This allows AWS IAM roles and users to authenticate with Consul to obtain ACL tokens. Refer to [ECS Configuration Reference](/docs/ecs/configuration-reference#consullogin) for configuration information. + +- **Mesh Gateways :** This feature introduces support for running mesh gateways as ECS tasks. Mesh gateways enable service mesh communication across datacenter and admin partition boundaries. Refer to [ECS Installation with Terraform](/docs/ecs/terraform/install#configure-the-gateway-task-module) for usage information. + +## Supported Software Versions + +- Consul: 1.12.x + +## Changelogs + +The changelogs for this major release version and any maintenance versions are listed below. + +-> **Note**: These links will take you to the changelogs on the GitHub website. + +- [0.5.1](https://github.com/hashicorp/consul-ecs/releases/tag/v0.5.1) + +- [0.5.0](https://github.com/hashicorp/consul-ecs/releases/tag/v0.5.0) diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 71ee064f0..b7c211741 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -1278,6 +1278,10 @@ { "title": "Consul ECS", "routes": [ + { + "title": "v0.5.x", + "path": "release-notes/consul-ecs/v0_5_x" + }, { "title": "v0.4.x", "path": "release-notes/consul-ecs/v0_4_x" From 3c8ab23f7617218d04a48c493e4fb17ab4dafd18 Mon Sep 17 00:00:00 2001 From: Ashwin Venkatesh Date: Wed, 10 Aug 2022 16:53:45 -0400 Subject: [PATCH 017/104] Update website/content/docs/connect/cluster-peering/k8s.mdx Co-authored-by: Tu Nguyen --- website/content/docs/connect/cluster-peering/k8s.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/connect/cluster-peering/k8s.mdx b/website/content/docs/connect/cluster-peering/k8s.mdx index b23bf74b1..26d86cf31 100644 --- a/website/content/docs/connect/cluster-peering/k8s.mdx +++ b/website/content/docs/connect/cluster-peering/k8s.mdx @@ -244,7 +244,7 @@ To recreate or reset the peering connection, you need to generate a new peering 1. You can do this by creating or updating the annotation `consul.hashicorp.com/peering-version` on the `PeeringAcceptor`. If the annotation already exists, update its value to a version that is higher. - + ```yaml apiVersion: consul.hashicorp.com/v1alpha1 From 9cb82e231a2601ad57a2d777209ff4757d9a004c Mon Sep 17 00:00:00 2001 From: boruszak Date: Wed, 10 Aug 2022 16:54:42 -0500 Subject: [PATCH 018/104] Blank commit --- .../mesh-gateway/service-to-service-traffic-partitions.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx index dfd4780c6..f3542c4d6 100644 --- a/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx +++ b/website/content/docs/connect/gateways/mesh-gateway/service-to-service-traffic-partitions.mdx @@ -3,7 +3,7 @@ layout: docs page_title: Service-to-service Traffic Across Partitions description: >- This topic describes how to configure mesh gateways to route a service's data to upstreams - in other partitions. It describes how to use Envoy and how you can integrate with your preferred gateway. + in other partitions. It describes how to use Envoy and how you can integrate with your preferred gateway. --- # Service-to-service Traffic Across Partitions From 534096a6aca2d882cd0e8c68cd44de5682519dbd Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Wed, 10 Aug 2022 11:53:25 -0400 Subject: [PATCH 019/104] Handle wrapped errors in isFailedPreconditionErr --- agent/consul/leader_peering.go | 9 +++++++++ agent/consul/leader_peering_test.go | 11 +++++++++++ 2 files changed, 20 insertions(+) diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index dd6185a19..bc5b669cd 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -606,6 +606,15 @@ func isFailedPreconditionErr(err error) bool { if err == nil { return false } + + // Handle wrapped errors, since status.FromError does a naive assertion. + var statusErr interface { + GRPCStatus() *grpcstatus.Status + } + if errors.As(err, &statusErr) { + return statusErr.GRPCStatus().Code() == codes.FailedPrecondition + } + grpcErr, ok := grpcstatus.FromError(err) if !ok { return false diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index 48e48e14b..46a74b6ad 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -12,6 +12,7 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -1332,3 +1333,13 @@ func TestLeader_Peering_retryLoopBackoffPeering_cancelContext(t *testing.T) { fmt.Errorf("error 1"), }, allErrors) } + +func Test_isFailedPreconditionErr(t *testing.T) { + st := grpcstatus.New(codes.FailedPrecondition, "cannot establish a peering stream on a follower node") + err := st.Err() + assert.True(t, isFailedPreconditionErr(err)) + + // test that wrapped errors are checked correctly + werr := fmt.Errorf("wrapped: %w", err) + assert.True(t, isFailedPreconditionErr(werr)) +} From fbbb54fdc2b1232daff497ad494052c9f91bbe60 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Wed, 10 Aug 2022 15:42:54 -0400 Subject: [PATCH 020/104] Register peerStreamServer internally to enable RPC forwarding --- agent/consul/server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/agent/consul/server.go b/agent/consul/server.go index 10b9d48f0..1afa74c91 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -816,6 +816,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler // Note: these external gRPC services are also exposed on the internal server to // enable RPC forwarding. + s.peerStreamServer.Register(srv) s.externalACLServer.Register(srv) s.externalConnectCAServer.Register(srv) } From 55945a8231c1e67cdc5621c778a44024cb14cfdc Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Wed, 10 Aug 2022 18:31:55 -0400 Subject: [PATCH 021/104] Add test to verify forwarding --- agent/consul/peering_backend_test.go | 60 ++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/agent/consul/peering_backend_test.go b/agent/consul/peering_backend_test.go index fc73ba53d..7636dc48b 100644 --- a/agent/consul/peering_backend_test.go +++ b/agent/consul/peering_backend_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/proto/pbpeerstream" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" ) @@ -76,3 +77,62 @@ func newServerDialer(serverAddr string) func(context.Context, string) (net.Conn, return conn, nil } } + +func TestPeerStreamService_ForwardToLeader(t *testing.T) { + t.Parallel() + + _, conf1 := testServerConfig(t) + server1, err := newServer(t, conf1) + require.NoError(t, err) + + _, conf2 := testServerConfig(t) + conf2.Bootstrap = false + server2, err := newServer(t, conf2) + require.NoError(t, err) + + // server1 is leader, server2 follower + testrpc.WaitForLeader(t, server1.RPC, "dc1") + joinLAN(t, server2, server1) + testrpc.WaitForLeader(t, server2.RPC, "dc1") + + peerId := testUUID() + + // Simulate a GenerateToken call on server1, which stores the establishment secret + { + require.NoError(t, server1.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: "foo", + ID: peerId, + }, + SecretsRequest: &pbpeering.SecretsWriteRequest{ + PeerID: peerId, + Request: &pbpeering.SecretsWriteRequest_GenerateToken{ + GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{ + EstablishmentSecret: "389bbcdf-1c31-47d6-ae96-f2a3f4c45f84", + }, + }, + }, + })) + } + + testutil.RunStep(t, "server2 forwards write to server1", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + // We will dial server2 which should forward to server1 + conn, err := gogrpc.DialContext(ctx, server2.config.RPCAddr.String(), + gogrpc.WithContextDialer(newServerDialer(server2.config.RPCAddr.String())), + gogrpc.WithInsecure(), + gogrpc.WithBlock()) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + + peerStreamClient := pbpeerstream.NewPeerStreamServiceClient(conn) + req := &pbpeerstream.ExchangeSecretRequest{ + PeerID: peerId, + EstablishmentSecret: "389bbcdf-1c31-47d6-ae96-f2a3f4c45f84", + } + _, err = peerStreamClient.ExchangeSecret(ctx, req) + require.NoError(t, err) + }) +} From 2b8e8280f55c13d6e3e6e9cb35646de78e6b9865 Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Thu, 11 Aug 2022 10:26:21 -0700 Subject: [PATCH 022/104] ci: Disable Arm RPM verifications (#14142) --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5d50b708c..45891047d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -410,8 +410,8 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - arch: ["i386", "x86_64", "armv7hl", "aarch64"] - # fail-fast: true + # TODO(eculver): re-enable when there is a smaller verification container available + arch: ["i386", "x86_64"] #, "armv7hl", "aarch64"] env: version: ${{ needs.get-product-version.outputs.product-version }} From 47a36c91373fc723436c66a8a3320c7afaa0026b Mon Sep 17 00:00:00 2001 From: Mike Morris Date: Thu, 11 Aug 2022 13:32:34 -0400 Subject: [PATCH 023/104] docs(capigw): add v0.4.0 upgrade instructions (#14101) docs(capigw): add manual ReferencePolicy -> ReferenceGrant migration steps, comment out kube-storage-version-migrator workflow in case we choose to publish it later --- website/content/docs/api-gateway/upgrades.mdx | 159 +++++++++++++++++- 1 file changed, 158 insertions(+), 1 deletion(-) diff --git a/website/content/docs/api-gateway/upgrades.mdx b/website/content/docs/api-gateway/upgrades.mdx index 59381baa7..24c29fde1 100644 --- a/website/content/docs/api-gateway/upgrades.mdx +++ b/website/content/docs/api-gateway/upgrades.mdx @@ -9,6 +9,163 @@ description: >- This topic describes how to upgrade Consul API Gateway. +## Upgrade to v0.4.0 + +Consul API Gateway v0.4.0 adds support for [Gateway API v0.5.0](https://github.com/kubernetes-sigs/gateway-api/releases/tag/v0.5.0) and the following resources: + +- The graduated v1beta1 `GatewayClass`, `Gateway` and `HTTPRoute` resources. + +- The [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant) resource, which replaces the identical [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy) resource. + +Consul API Gateway v0.4.0 is backward-compatible with existing `ReferencePolicy` resources, but we will remove support for `ReferencePolicy` resources in a future release. We recommend that you migrate to `ReferenceGrant` after upgrading. + +### Requirements + +Ensure that the following requirements are met prior to upgrading: + +- Consul API Gateway should be running version v0.3.0. + +### Procedure + +1. Complete the [standard upgrade](#standard-upgrade). + +1. After completing the upgrade, complete the [post-upgrade configuration changes](#v0.4.0-post-upgrade-configuration-changes). The post-upgrade procedure describes how to replace your `ReferencePolicy` resources with `ReferenceGrant` resources and how to upgrade your `GatewayClass`, `Gateway`, and `HTTPRoute` resources from v1alpha2 to v1beta1. + + + +### Post-upgrade configuration changes +Complete the following steps after performing standard upgrade procedure. +#### Requirements + +- Consul API Gateway should be running version v0.4.0. +- Consul Helm chart should be v0.47.0 or later. +- You should have the ability to run `kubectl` CLI commands. +- `kubectl` should be configured to point to the cluster containing the installation you are upgrading. +- You should have the following permissions for your Kubernetes cluster: + - `Gateway.read` + - `ReferenceGrant.create` (Added in Consul Helm chart v0.47.0) + - `ReferencePolicy.delete` + +#### Procedure + +1. Verify the current version of the `consul-api-gateway-controller` `Deployment`: + + ```shell-session + $ kubectl get deployment --namespace consul consul-api-gateway-controller --output=jsonpath="{@.spec.template.spec.containers[?(@.name=='api-gateway-controller')].image}" + ``` + + You should receive a response similar to the following: + + ```log + "hashicorp/consul-api-gateway:0.4.0" + ``` + + + +1. Issue the following command to get all `ReferencePolicy` resources across all namespaces. + + ```shell-session + $ kubectl get referencepolicy --all-namespaces + ``` +If you have any active `ReferencePolicy` resources, you will receive output similar to the response below. + + ```log + Warning: ReferencePolicy has been renamed to ReferenceGrant. ReferencePolicy will be removed in v0.6.0 in favor of the identical ReferenceGrant resource. + NAMESPACE NAME + default example-reference-policy + ``` + + If your output is empty, upgrade your `GatewayClass`, `Gateway` and `HTTPRoute` resources to v1beta1 as described in [step 7](#v1beta1-gatewayclass-gateway-httproute). + +1. For each `ReferencePolicy` in the source YAML files, change the `kind` field to `ReferenceGrant`. You can optionally update the `metadata.name` field or filename if they include the term "policy". In the following example, the `kind` and `metadata.name` fields and filename have been changed to reflect the new resource. Note that updating the `kind` field prevents you from using the `kubectl edit` command to edit the remote state directly. + + + + ```yaml + apiVersion: gateway.networking.k8s.io/v1alpha2 + kind: ReferenceGrant + metadata: + name: reference-grant + namespace: web-namespace + spec: + from: + - group: gateway.networking.k8s.io + kind: HTTPRoute + namespace: example-namesapce + to: + - group: "" + kind: Service + name: web-backend + ``` + + + +1. For each file, apply the updated YAML to your cluster to create a new `ReferenceGrant` resource. + + ```shell-session + $ kubectl apply --filename + ``` + +1. Check to confirm that each new `ReferenceGrant` was created successfully. + + ```shell-session + $ kubectl get referencegrant --namespace + NAME + example-reference-grant + ``` + +1. Finally, delete each corresponding old `ReferencePolicy` resource. Because replacement `ReferenceGrant` resources have already been created, there should be no interruption in the availability of any referenced `Service` or `Secret`. + + ```shell-session + $ kubectl delete referencepolicy --namespace + Warning: ReferencePolicy has been renamed to ReferenceGrant. ReferencePolicy will be removed in v0.6.0 in favor of the identical ReferenceGrant resource. + referencepolicy.gateway.networking.k8s.io "example-reference-policy" deleted + ``` + + + +1. For each `GatewayClass`, `Gateway`, and `HTTPRoute` in the source YAML, update the `apiVersion` field to `gateway.networking.k8s.io/v1beta1`. Note that updating the `apiVersion` field prevents you from using the `kubectl edit` command to edit the remote state directly. + + + + ```yaml + apiVersion: gateway.networking.k8s.io/v1beta1 + kind: Gateway + metadata: + name: example-gateway + namespace: gateway-namespace + spec: + ... + ``` + + + +1. For each file, apply the updated YAML to your cluster to update the existing `GatewayClass`, `Gateway` or `HTTPRoute` resources. + + ```shell-session + $ kubectl apply --filename + gateway.gateway.networking.k8s.io/example-gateway configured + ``` + + ## Upgrade to v0.3.0 from v0.2.0 or lower @@ -32,7 +189,7 @@ Ensure that the following requirements are met prior to upgrading: 1. Verify the current version of the `consul-api-gateway-controller` `Deployment`: ```shell-session - $ kubectl get deployment --namespace consul consul-api-gateway-controller --output=jsonpath= "{@.spec.template.spec.containers[?(@.name=='api-gateway-controller')].image}" + $ kubectl get deployment --namespace consul consul-api-gateway-controller --output=jsonpath="{@.spec.template.spec.containers[?(@.name=='api-gateway-controller')].image}" ``` You should receive a response similar to the following: From 3189433a30ae896838221740bb8549b7eb197b59 Mon Sep 17 00:00:00 2001 From: Tu Nguyen Date: Thu, 11 Aug 2022 11:17:17 -0700 Subject: [PATCH 024/104] Add upgrade instructions and considerations for Consul 1.13.1 --- website/content/docs/upgrading/instructions/index.mdx | 3 +++ website/content/docs/upgrading/upgrade-specific.mdx | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/website/content/docs/upgrading/instructions/index.mdx b/website/content/docs/upgrading/instructions/index.mdx index 9bae599d9..9b93fb2dd 100644 --- a/website/content/docs/upgrading/instructions/index.mdx +++ b/website/content/docs/upgrading/instructions/index.mdx @@ -28,6 +28,9 @@ The upgrade guides will mention notable changes and link to relevant changelogs we recommend reviewing the changelog for versions between the one you are on and the one you are upgrading to at each step to familiarize yourself with changes. +~> **Note:** If you are upgrading from `1.11`+ and have connect proxies +registered, upgrade directly to `1.13.1` instead of `1.13.0`. + Select your _currently installed_ release series: - [1.9.x](/docs/upgrading/instructions/upgrade-to-1-10-x) - [1.8.x](/docs/upgrading/instructions/upgrade-to-1-10-x) diff --git a/website/content/docs/upgrading/upgrade-specific.mdx b/website/content/docs/upgrading/upgrade-specific.mdx index bf8c73125..13bd4973c 100644 --- a/website/content/docs/upgrading/upgrade-specific.mdx +++ b/website/content/docs/upgrading/upgrade-specific.mdx @@ -14,7 +14,10 @@ provided for their upgrades as a result of new features or changed behavior. This page is used to document those details separately from the standard upgrade flow. -## Consul 1.13.0 +## Consul 1.13.x + +~> **Note:** If you are upgrading from `1.11`+ and have connect proxies +registered, upgrade directly to `1.13.1` instead of `1.13.0`. ### gRPC TLS From 182399255be92cac3c8af08dc6c849f5fa0caf20 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Thu, 11 Aug 2022 14:47:10 -0400 Subject: [PATCH 025/104] Handle breaking change for ServiceVirtualIP restore (#14149) Consul 1.13.0 changed ServiceVirtualIP to use PeeredServiceName instead of ServiceName which was a breaking change for those using service mesh and wanted to restore their snapshot after upgrading to 1.13.0. This commit handles existing data with older ServiceName and converts it during restore so that there are no issues when restoring from older snapshots. --- .changelog/14149.txt | 3 ++ agent/consul/fsm/snapshot_oss.go | 40 ++++++++++++++++- agent/consul/fsm/snapshot_oss_test.go | 64 +++++++++++++++++++++++++++ agent/structs/structs.go | 4 +- 4 files changed, 107 insertions(+), 4 deletions(-) create mode 100644 .changelog/14149.txt diff --git a/.changelog/14149.txt b/.changelog/14149.txt new file mode 100644 index 000000000..726861f5a --- /dev/null +++ b/.changelog/14149.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fixed a compatibility issue when restoring snapshots from pre-1.13.0 versions of Consul [[GH-14107](https://github.com/hashicorp/consul/issues/14107)] +``` \ No newline at end of file diff --git a/agent/consul/fsm/snapshot_oss.go b/agent/consul/fsm/snapshot_oss.go index 167ffd100..7fa53381a 100644 --- a/agent/consul/fsm/snapshot_oss.go +++ b/agent/consul/fsm/snapshot_oss.go @@ -1,8 +1,12 @@ package fsm import ( + "fmt" + "net" + "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" "github.com/hashicorp/raft" + "github.com/mitchellh/mapstructure" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" @@ -886,11 +890,43 @@ func restoreSystemMetadata(header *SnapshotHeader, restore *state.Restore, decod } func restoreServiceVirtualIP(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { - var req state.ServiceVirtualIP + // state.ServiceVirtualIP was changed in a breaking way in 1.13.0 (2e4cb6f77d2be36b02e9be0b289b24e5b0afb794). + // We attempt to reconcile the older type by decoding to a map then decoding that map into + // structs.PeeredServiceName first, and then structs.ServiceName. + var req struct { + Service map[string]interface{} + IP net.IP + + structs.RaftIndex + } if err := decoder.Decode(&req); err != nil { return err } - if err := restore.ServiceVirtualIP(req); err != nil { + + vip := state.ServiceVirtualIP{ + IP: req.IP, + RaftIndex: req.RaftIndex, + } + + // PeeredServiceName is the expected primary key type. + var psn structs.PeeredServiceName + if err := mapstructure.Decode(req.Service, &psn); err != nil { + return fmt.Errorf("cannot decode to structs.PeeredServiceName: %w", err) + } + vip.Service = psn + + // If the expected primary key field is empty, it must be the older ServiceName type. + if vip.Service.ServiceName.Name == "" { + var sn structs.ServiceName + if err := mapstructure.Decode(req.Service, &sn); err != nil { + return fmt.Errorf("cannot decode to structs.ServiceName: %w", err) + } + vip.Service = structs.PeeredServiceName{ + ServiceName: sn, + } + } + + if err := restore.ServiceVirtualIP(vip); err != nil { return err } return nil diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index b893c73bc..2b2d3e870 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -3,6 +3,7 @@ package fsm import ( "bytes" "fmt" + "net" "testing" "time" @@ -962,3 +963,66 @@ func TestFSM_BadSnapshot_NilCAConfig(t *testing.T) { require.EqualValues(t, 0, idx) require.Nil(t, config) } + +// This test asserts that ServiceVirtualIP, which made a breaking change +// in 1.13.0, can still restore from older snapshots which use the old +// state.ServiceVirtualIP type. +func Test_restoreServiceVirtualIP(t *testing.T) { + psn := structs.PeeredServiceName{ + ServiceName: structs.ServiceName{ + Name: "foo", + }, + } + + run := func(t *testing.T, input interface{}) { + t.Helper() + + var b []byte + buf := bytes.NewBuffer(b) + // Encode input + encoder := codec.NewEncoder(buf, structs.MsgpackHandle) + require.NoError(t, encoder.Encode(input)) + + // Create a decoder + dec := codec.NewDecoder(buf, structs.MsgpackHandle) + + logger := testutil.Logger(t) + fsm, err := New(nil, logger) + require.NoError(t, err) + + restore := fsm.State().Restore() + + // Call restore + require.NoError(t, restoreServiceVirtualIP(nil, restore, dec)) + require.NoError(t, restore.Commit()) + + ip, err := fsm.State().VirtualIPForService(psn) + require.NoError(t, err) + + // 240->224 due to addIPOffset + require.Equal(t, "224.0.0.2", ip) + } + + t.Run("new ServiceVirtualIP with PeeredServiceName", func(t *testing.T) { + run(t, state.ServiceVirtualIP{ + Service: psn, + IP: net.ParseIP("240.0.0.2"), + RaftIndex: structs.RaftIndex{}, + }) + }) + t.Run("pre-1.13.0 ServiceVirtualIP with ServiceName", func(t *testing.T) { + type compatServiceVirtualIP struct { + Service structs.ServiceName + IP net.IP + RaftIndex structs.RaftIndex + } + + run(t, compatServiceVirtualIP{ + Service: structs.ServiceName{ + Name: "foo", + }, + IP: net.ParseIP("240.0.0.2"), + RaftIndex: structs.RaftIndex{}, + }) + }) +} diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 4821b164c..22fb47ca9 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -2211,8 +2211,8 @@ type PeeredServiceName struct { } type ServiceName struct { - Name string - acl.EnterpriseMeta + Name string + acl.EnterpriseMeta `mapstructure:",squash"` } func NewServiceName(name string, entMeta *acl.EnterpriseMeta) ServiceName { From 089a9c6e5988f76220158b2d875180deab9aeb35 Mon Sep 17 00:00:00 2001 From: Tu Nguyen Date: Thu, 11 Aug 2022 14:40:28 -0700 Subject: [PATCH 026/104] Apply suggestions from code review Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> --- website/content/docs/upgrading/upgrade-specific.mdx | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/website/content/docs/upgrading/upgrade-specific.mdx b/website/content/docs/upgrading/upgrade-specific.mdx index 13bd4973c..56225fcef 100644 --- a/website/content/docs/upgrading/upgrade-specific.mdx +++ b/website/content/docs/upgrading/upgrade-specific.mdx @@ -16,8 +16,14 @@ upgrade flow. ## Consul 1.13.x -~> **Note:** If you are upgrading from `1.11`+ and have connect proxies -registered, upgrade directly to `1.13.1` instead of `1.13.0`. +### Service Mesh Compatibility +Existing Consul deployments using service mesh (i.e., containing any registered Connect proxies) +should upgrade to **at least Consul 1.13.1**. + +Consul 1.13.0 contains a bug that prevents Consul server agents from restoring saved state +on startup if the state (1) was generated before Consul 1.13 (such as during an upgrade), +and (2) contained any Connect proxy registrations. +This bug is fixed in Consul versions 1.13.1 and newer. ### gRPC TLS From db936a3412a6e07c62f17dee69833fcce0d922aa Mon Sep 17 00:00:00 2001 From: Tu Nguyen Date: Thu, 11 Aug 2022 14:43:27 -0700 Subject: [PATCH 027/104] Update with more details on 1.13.0 issue --- .../content/docs/upgrading/instructions/index.mdx | 7 +++---- .../content/docs/upgrading/upgrade-specific.mdx | 14 +++++++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/website/content/docs/upgrading/instructions/index.mdx b/website/content/docs/upgrading/instructions/index.mdx index 9b93fb2dd..466acf335 100644 --- a/website/content/docs/upgrading/instructions/index.mdx +++ b/website/content/docs/upgrading/instructions/index.mdx @@ -13,12 +13,14 @@ This document is intended to help users who find themselves many versions behind ## Upgrade Path Our recommended upgrade path is to move through the following sequence of versions: + - 0.8.5 (final 0.8.x) - 1.2.4 (final 1.2.x) - 1.6.10 (final 1.6.x) - 1.8.19 (final 1.8.x) - Latest 1.10.x -- Latest current version (1.11.x or 1.12.x) +- Latest 1.12.x +- Latest 1.13.x ([at least 1.13.1](/docs/upgrading/upgrade-specific#service-mesh-compatibility)) ## Getting Started @@ -28,9 +30,6 @@ The upgrade guides will mention notable changes and link to relevant changelogs we recommend reviewing the changelog for versions between the one you are on and the one you are upgrading to at each step to familiarize yourself with changes. -~> **Note:** If you are upgrading from `1.11`+ and have connect proxies -registered, upgrade directly to `1.13.1` instead of `1.13.0`. - Select your _currently installed_ release series: - [1.9.x](/docs/upgrading/instructions/upgrade-to-1-10-x) - [1.8.x](/docs/upgrading/instructions/upgrade-to-1-10-x) diff --git a/website/content/docs/upgrading/upgrade-specific.mdx b/website/content/docs/upgrading/upgrade-specific.mdx index 56225fcef..75884adb2 100644 --- a/website/content/docs/upgrading/upgrade-specific.mdx +++ b/website/content/docs/upgrading/upgrade-specific.mdx @@ -17,12 +17,16 @@ upgrade flow. ## Consul 1.13.x ### Service Mesh Compatibility -Existing Consul deployments using service mesh (i.e., containing any registered Connect proxies) -should upgrade to **at least Consul 1.13.1**. -Consul 1.13.0 contains a bug that prevents Consul server agents from restoring saved state -on startup if the state (1) was generated before Consul 1.13 (such as during an upgrade), -and (2) contained any Connect proxy registrations. +Existing Consul deployments using service mesh (i.e., containing any registered +Connect proxies) should upgrade to **at least Consul 1.13.1**. + +Consul 1.13.0 contains a bug that prevents Consul server agents from restoring +saved state on startup if the state + +1. was generated before Consul 1.13 (such as during an upgrade), and +2. contained any Connect proxy registrations. + This bug is fixed in Consul versions 1.13.1 and newer. ### gRPC TLS From 6dd09fbb2ecbf87efcbc730213a2546ea6dd1a6e Mon Sep 17 00:00:00 2001 From: DanStough Date: Thu, 11 Aug 2022 17:27:44 -0400 Subject: [PATCH 028/104] docs: changelog 1.12.4 and 1.11.8 --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c4769834..08b8a37a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,21 @@ +## 1.12.4 (August 11, 2022) + +BUG FIXES: + +* cli: when `acl token read` is used with the `-self` and `-expanded` flags, return an error instead of panicking [[GH-13787](https://github.com/hashicorp/consul/issues/13787)] +* connect: Fixed a goroutine/memory leak that would occur when using the ingress gateway. [[GH-13847](https://github.com/hashicorp/consul/issues/13847)] +* connect: Ingress gateways with a wildcard service entry should no longer pick up non-connect services as upstreams. +connect: Terminating gateways with a wildcard service entry should no longer pick up connect services as upstreams. [[GH-13958](https://github.com/hashicorp/consul/issues/13958)] +* ui: Fixes an issue where client side validation errors were not showing in certain areas [[GH-14021](https://github.com/hashicorp/consul/issues/14021)] + +## 1.11.8 (August 11, 2022) + +BUG FIXES: + +* connect: Fixed a goroutine/memory leak that would occur when using the ingress gateway. [[GH-13847](https://github.com/hashicorp/consul/issues/13847)] +* connect: Ingress gateways with a wildcard service entry should no longer pick up non-connect services as upstreams. +connect: Terminating gateways with a wildcard service entry should no longer pick up connect services as upstreams. [[GH-13958](https://github.com/hashicorp/consul/issues/13958)] + ## 1.13.0 (August 9, 2022) BREAKING CHANGES: From 690a5bf8f3f4c811492decb051d5085f9e4fd0ef Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Thu, 11 Aug 2022 16:23:02 -0700 Subject: [PATCH 029/104] Add changelog entry for peering fix (#14160) --- .changelog/14119.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/14119.txt diff --git a/.changelog/14119.txt b/.changelog/14119.txt new file mode 100644 index 000000000..f0958361b --- /dev/null +++ b/.changelog/14119.txt @@ -0,0 +1,3 @@ +```release-note:bug +connect: Fixed some spurious issues during peering establishment when a follower is dialed +``` From 76b39a54539517ab8f066c04fea8fb32668f8f9a Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Thu, 11 Aug 2022 16:24:30 -0700 Subject: [PATCH 030/104] docs: changelog for 1.13.1 (#14168) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 08b8a37a7..b92ca84f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## 1.13.1 (August 11, 2022) + +BUG FIXES: + +* agent: Fixed a compatibility issue when restoring snapshots from pre-1.13.0 versions of Consul [[GH-14107](https://github.com/hashicorp/consul/issues/14107)] [[GH-14149](https://github.com/hashicorp/consul/issues/14149)] +* connect: Fixed some spurious issues during peering establishment when a follower is dialed [[GH-14119](https://github.com/hashicorp/consul/issues/14119)] + ## 1.12.4 (August 11, 2022) BUG FIXES: From e7b5baa3cc5d1a73c6b336f8e893c6f4b1c24566 Mon Sep 17 00:00:00 2001 From: cskh Date: Thu, 11 Aug 2022 22:09:56 -0400 Subject: [PATCH 031/104] feat(telemetry): add labels to serf and memberlist metrics (#14161) * feat(telemetry): add labels to serf and memberlist metrics * changelog * doc update Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> --- .changelog/14161.txt | 3 +++ agent/consul/client_serf.go | 2 ++ agent/consul/config.go | 2 ++ agent/consul/server_oss.go | 15 +++++++++++++++ agent/consul/server_serf.go | 10 +++++++--- go.mod | 6 +++--- go.sum | 10 ++++++---- website/content/docs/agent/telemetry.mdx | 4 ++++ 8 files changed, 42 insertions(+), 10 deletions(-) create mode 100644 .changelog/14161.txt diff --git a/.changelog/14161.txt b/.changelog/14161.txt new file mode 100644 index 000000000..2926ffbe9 --- /dev/null +++ b/.changelog/14161.txt @@ -0,0 +1,3 @@ +```release-note:improvement +metrics: add labels of segment, partition, network area, network (lan or wan) to serf and memberlist metrics +``` diff --git a/agent/consul/client_serf.go b/agent/consul/client_serf.go index 55df7a547..05db21e2f 100644 --- a/agent/consul/client_serf.go +++ b/agent/consul/client_serf.go @@ -62,6 +62,8 @@ func (c *Client) setupSerf(conf *serf.Config, ch chan serf.Event, path string) ( return nil, err } + addSerfMetricsLabels(conf, false, "", "", "") + addEnterpriseSerfTags(conf.Tags, c.config.AgentEnterpriseMeta()) conf.ReconnectTimeoutOverride = libserf.NewReconnectOverride(c.logger) diff --git a/agent/consul/config.go b/agent/consul/config.go index b897c4f23..38063f808 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -584,6 +584,7 @@ func CloneSerfLANConfig(base *serf.Config) *serf.Config { cfg.MemberlistConfig.ProbeTimeout = base.MemberlistConfig.ProbeTimeout cfg.MemberlistConfig.SuspicionMult = base.MemberlistConfig.SuspicionMult cfg.MemberlistConfig.RetransmitMult = base.MemberlistConfig.RetransmitMult + cfg.MemberlistConfig.MetricLabels = base.MemberlistConfig.MetricLabels // agent/keyring.go cfg.MemberlistConfig.Keyring = base.MemberlistConfig.Keyring @@ -593,6 +594,7 @@ func CloneSerfLANConfig(base *serf.Config) *serf.Config { cfg.ReapInterval = base.ReapInterval cfg.TombstoneTimeout = base.TombstoneTimeout cfg.MemberlistConfig.SecretKey = base.MemberlistConfig.SecretKey + cfg.MetricLabels = base.MetricLabels return cfg } diff --git a/agent/consul/server_oss.go b/agent/consul/server_oss.go index 5ae2fc3ea..4ae524b65 100644 --- a/agent/consul/server_oss.go +++ b/agent/consul/server_oss.go @@ -159,3 +159,18 @@ func (s *Server) addEnterpriseStats(stats map[string]map[string]string) { func getSerfMemberEnterpriseMeta(member serf.Member) *acl.EnterpriseMeta { return structs.NodeEnterpriseMetaInDefaultPartition() } + +func addSerfMetricsLabels(conf *serf.Config, wan bool, segment string, partition string, areaID string) { + conf.MetricLabels = []metrics.Label{} + + networkMetric := metrics.Label{ + Name: "network", + } + if wan { + networkMetric.Value = "wan" + } else { + networkMetric.Value = "lan" + } + + conf.MetricLabels = append(conf.MetricLabels, networkMetric) +} diff --git a/agent/consul/server_serf.go b/agent/consul/server_serf.go index 5e29b47dd..b9c8ad95f 100644 --- a/agent/consul/server_serf.go +++ b/agent/consul/server_serf.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/memberlist" "github.com/hashicorp/raft" @@ -177,9 +178,10 @@ func (s *Server) setupSerfConfig(opts setupSerfOptions) (*serf.Config, error) { if opts.WAN { nt, err := memberlist.NewNetTransport(&memberlist.NetTransportConfig{ - BindAddrs: []string{conf.MemberlistConfig.BindAddr}, - BindPort: conf.MemberlistConfig.BindPort, - Logger: conf.MemberlistConfig.Logger, + BindAddrs: []string{conf.MemberlistConfig.BindAddr}, + BindPort: conf.MemberlistConfig.BindPort, + Logger: conf.MemberlistConfig.Logger, + MetricLabels: []metrics.Label{{Name: "network", Value: "wan"}}, }) if err != nil { return nil, err @@ -230,6 +232,8 @@ func (s *Server) setupSerfConfig(opts setupSerfOptions) (*serf.Config, error) { conf.ReconnectTimeoutOverride = libserf.NewReconnectOverride(s.logger) + addSerfMetricsLabels(conf, opts.WAN, "", "", "") + addEnterpriseSerfTags(conf.Tags, s.config.AgentEnterpriseMeta()) if s.config.OverrideInitialSerfTags != nil { diff --git a/go.mod b/go.mod index cb048763d..e2fbafed4 100644 --- a/go.mod +++ b/go.mod @@ -45,11 +45,11 @@ require ( github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 - github.com/hashicorp/memberlist v0.3.1 + github.com/hashicorp/memberlist v0.4.0 github.com/hashicorp/raft v1.3.9 github.com/hashicorp/raft-autopilot v0.1.6 github.com/hashicorp/raft-boltdb/v2 v2.2.2 - github.com/hashicorp/serf v0.9.8 + github.com/hashicorp/serf v0.10.0 github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493 @@ -77,7 +77,7 @@ require ( golang.org/x/net v0.0.0-20211216030914-fe4d6282115f golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220412211240-33da011f77ad + golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb google.golang.org/grpc v1.37.1 diff --git a/go.sum b/go.sum index 8f2afaa45..ee3e0beda 100644 --- a/go.sum +++ b/go.sum @@ -364,8 +364,9 @@ github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.4.0 h1:k3uda5gZcltmafuFF+UFqNEl5PrH+yPZ4zkjp1f/H/8= +github.com/hashicorp/memberlist v0.4.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= @@ -380,8 +381,8 @@ github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42/go.mod h1:wc github.com/hashicorp/raft-boltdb/v2 v2.2.2 h1:rlkPtOllgIcKLxVT4nutqlTH2NRFn+tO1wwZk/4Dxqw= github.com/hashicorp/raft-boltdb/v2 v2.2.2/go.mod h1:N8YgaZgNJLpZC+h+by7vDu5rzsRgONThTEeUS3zWbfY= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/serf v0.9.8 h1:JGklO/2Drf1QGa312EieQN3zhxQ+aJg6pG+aC3MFaVo= -github.com/hashicorp/serf v0.9.8/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/serf v0.10.0 h1:89qvvpfMQnz6c2y4pv7j2vUUmeT1+5TSZMexuTbtsPs= +github.com/hashicorp/serf v0.10.0/go.mod h1:bXN03oZc5xlH46k/K1qTrpXb9ERKyY1/i/N5mxvgrZw= github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 h1:OKsyxKi2sNmqm1Gv93adf2AID2FOBFdCbbZn9fGtIdg= github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 h1:e1ok06zGrWJW91rzRroyl5nRNqraaBe4d5hiKcVZuHM= @@ -793,8 +794,9 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index 575f3d7e5..8b4f92343 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -605,6 +605,10 @@ Any metric in this section can be turned off with the [`prefix_filter`](/docs/ag ## Cluster Health These metrics give insight into the health of the cluster as a whole. +Query for the `consul.memberlist.*` and `consul.serf.*` metrics can be appended +with certain labels to further distinguish data between different gossip pools. +The supported label for OSS is `network`, while `segment`, `partition`, `area` +are allowed for . | Metric | Description | Unit | Type | |----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------|---------| From f9f484853290600f94f00f058e77e053972d6194 Mon Sep 17 00:00:00 2001 From: trujillo-adam Date: Thu, 11 Aug 2022 20:10:36 -0700 Subject: [PATCH 032/104] updated Routes configuration ref --- .../docs/api-gateway/configuration/routes.mdx | 107 +++++++++++++++++- 1 file changed, 104 insertions(+), 3 deletions(-) diff --git a/website/content/docs/api-gateway/configuration/routes.mdx b/website/content/docs/api-gateway/configuration/routes.mdx index 7819d482f..199dbff6d 100644 --- a/website/content/docs/api-gateway/configuration/routes.mdx +++ b/website/content/docs/api-gateway/configuration/routes.mdx @@ -7,7 +7,9 @@ description: >- # Route -Routes are independent configuration objects that are associated with specific listeners. +This topic describes how to create and configure `Route` resources. Routes are independent configuration objects that are associated with specific listeners. + +## Create a `Route` Declare a route with either `kind: HTTPRoute` or `kind: TCPRoute` and configure the route parameters in the `spec` block. Refer to the Kubernetes Gateway API documentation for each object type for details: @@ -36,8 +38,55 @@ The following example creates a route named `example-route` associated with a li -To create a route for a `backendRef` in a different namespace, you must also -create a [ReferencePolicy](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy). +## Configuration model + +The following outline shows how to format the configurations for the `Route` object. The top-level `spec` field is the root for all configurations. Click on a property name to view details about the configuration. + +## Specification + +This topic provides details about the configuration parameters. + +### parentRefs + +This field contains the list of `Gateways` that the route should attach to. If not set, the route will not attach to a `Gateway`. + +* Type: List of objects +* Required: Required + +### parentRefs.name + +This field specifies the name of the `Gateway` the route is attached to. + +* Type: String +* Required: Required + +### ParentRefs.namespace + +This field specifies the Kubernetes namespace containing the `Gateway` to attach to. It is optional if the `Gateway` is in the same Kubernetes namespace as the `Route`. If the `Gateway` is in a different namespace, then a value must be provided. + +* Type: String +* Required: Optional + +### rules + +The `rules` field specifies how traffic passing through the route should behave. It contains several possible parameters to customize traffic behavior. + +* Type: List of objects +* Required: Required + +### rules.backendRefs + +This field specifies backend services that the `Route` references. The following table describes the parameters for `backendRefs`: + +| Parameter | Description | Type | Required | +| --- | --- | --- | --- | +| `group` | Specifies the Kubernetes API Group of the referenced backend. You can specify the following values:
  • `""`: Specifies the core Kubernetes API group. This value must be used when `kind` is set to `Service`. This is the default value if unspecified.
  • `api-gateway.consul.hashicorp.com`: This value must be used when `kind` is set to `MeshService`.
| String | Optional | +| `kind` | Specifies the Kubernetes Kind of the referenced backend. You can specify the following values:
  • `Service`: Indicates that the `backendRef` references a Service in the Kubernetes cluster. This is the default value if unspecified.
  • `MeshService`: Indicates that the `backendRef` references a service in the Consul mesh.
| String | Optional | +| `name` | Specifies the name of the Kubernetes Service or Consul mesh service resource. | String | Required | +| `namespace` | Specifies the Kubernetes namespace containing the Kubernetes Service or Consul mesh service resource. To create a route for a `backendRef` in a different namespace, you must also create a [ReferencePolicy](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy). Refer to the [example route](#example-cross-namespace-backendref) configured to reference across namespaces. | String | Optional | +| `port` | Specifies the port number for accessing the Kubernetes or Consul service. | Integer | Required | + +#### Example cross-namespace backendRef The following example creates a route named `example-route` in namespace `gateway-namespace`. This route has a `backendRef` in namespace `service-namespace`. Traffic is allowed because the `ReferencePolicy`, named `reference-policy` in namespace `service-namespace`, allows traffic from `HTTPRoutes` in `gateway-namespace` to `Services` in `service-namespace`. @@ -78,6 +127,57 @@ The following example creates a route named `example-route` in namespace `gatewa
+### rules.filters + +The `filters` block defines steps for processing requests. You can configure filters to modify the properties of matching incoming requests and enable Consul API Gateway features, such as rewriting path prefixes (refer to [Rerouting HTTP](/docs/api-gateway/usage#rerouting-http-requests) for additional information). + +* Type: Array of objects +* Required: Optional + +### rules.filters.type + +Specifies the type of filter you want to apply to the route. The parameter is optional and takes a string value. + +You can specify the following values: + +* `RequestHeaderModifier`: The `RequestHeaderModifier` type modifies the HTTP headers on the incoming request. +* `URLRewrite`: The `URLRewrite` type modifies the URL path on the incoming request. + +### rules.filters.requestHeaderModifier + +Contains a list of header configuration objects for `requestHeaderModifier` filters when `rules.filters.type` is configured to `RequestHeaderModifier`. + +### rules.filters.urlRewrite + +Contains a list of path configuration objects for `urlRewrite` filters when `rules.filters.type` is configured to `URLRewrite`. + +### rules.filters.urlRewrite.path + +Specifies a list of objects that determine how Consul API Gateway rewrites URL paths (refer to [Rerouting HTTP](/docs/api-gateway/usage#rerouting-http-requests) for additional information). + +The following table describes the parameters for `path`: + +| Parameter | Description | Type | Required | +| --- | --- | --- | --- | +| `replacePrefixMatch` | Specifies the path prefix to use as the replacement when rerouting requests. | String | Required | +| `type` | Specifies the type of rewrite rule. You can specify the following values:
  • `ReplacePrefixMatch`
| String | Optional | + +### rules.matches + +Specifies rules for matching incoming requests. You can apply [`filters`](#rulesfilters) to requests that match the defined rules. + +### rules.matches.path + +Specifies a list of objects that define pattern-matching rules. Consul API Gateway processes matching requests according to the rules configured for the routes. + +The following table describes the parameters for `path`: + +| Parameter | Description | Type | Required | +| --- | --- | --- | --- | +| `type` | | String | Required | +| `value` | | String | Required | + + \ No newline at end of file From 9dfc04a8833fda285acdbae20be587ec0d0c191f Mon Sep 17 00:00:00 2001 From: trujillo-adam Date: Thu, 11 Aug 2022 20:19:51 -0700 Subject: [PATCH 033/104] added usage docs for prefix rewrite --- website/content/docs/api-gateway/usage.mdx | 54 ++++++++++++++++++++-- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/website/content/docs/api-gateway/usage.mdx b/website/content/docs/api-gateway/usage.mdx index e89c0556f..5ba7cfc80 100644 --- a/website/content/docs/api-gateway/usage.mdx +++ b/website/content/docs/api-gateway/usage.mdx @@ -2,13 +2,17 @@ layout: docs page_title: Consul API Gateway Basic Usage description: >- - Consul API Gateway Basic Usage + This topic describes how to use Consul API Gateway. --- -# Basic Usage +# Usage -This topic describes the basic workflow for implementing Consul API Gateway configurations. +This topic describes how to use Consul API Gateway. + +## Basic usage + +Complete the following steps to use Consul API Gateway in your network. 1. Verify that the [requirements](/docs/api-gateway/tech-specs) have been met. 1. Verify that the Consul API Gateway CRDs and controller have been installed and applied (see [Installation](/docs/api-gateway/consul-api-gateway-install)). @@ -30,6 +34,50 @@ This topic describes the basic workflow for implementing Consul API Gateway conf $ kubectl apply -f gateway.yaml routes.yaml ``` +## Reroute HTTP requests + +Configure the following fields in your `Route` configuration to use this feature. Refer to the [Route configuration reference](/docs/api-gateway/configuration/routes) for details about the parameters. + +* [`rules.filters.type`](/docs/api-gateway/configuration/routes#rules-filters-type): Set this parameter to `URLRewrite` to instruct Consul API Gateway to rewrite the URL when specific conditions are met. +* [`rules.filters.urlRewrite`](/docs/api-gateway/configuration/routes#rules-filters-urlrewrite): Specify the `path` configuration. +* [`rules.filters.urlRewrite.path`](/docs/api-gateway/configuration/routes#rules-filters-urlrewrite-path): Contains the paths that incoming requests should be rewritten to based on the match conditions. + +Note that if the route is configured to accept paths with and without a trailing slash, you must make two separate routes to handle each case. + +### Example + +In the following example, requests to` /incoming-request-prefix/` are forwarded to the `backendRef` as `/prefix-backend-receives/`. A request to `/incoming-request-prefix/request-path`, for instance, is received by the `backendRef` as `/prefix-backend-receives/request-path`. + + + +```yaml hideClipboard +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: example-route + ##... +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: api-gateway + rules: + - backendRefs: + . . . + filters: + - type: URLRewrite + urlRewrite: + path: + replacePrefixMatch: /prefix-backend-receives/ + type: ReplacePrefixMatch + matches: + - path: + type: PathPrefix + value: /incoming–request-prefix/ +``` + + + \ No newline at end of file From 4104086ca980d1288c3a6aeb26dc356127c0f158 Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Mon, 15 Aug 2022 11:49:41 -0400 Subject: [PATCH 049/104] Add missing code block --- website/content/docs/api-gateway/configuration/meshservice.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/api-gateway/configuration/meshservice.mdx b/website/content/docs/api-gateway/configuration/meshservice.mdx index 49c6621fa..ef085f2f5 100644 --- a/website/content/docs/api-gateway/configuration/meshservice.mdx +++ b/website/content/docs/api-gateway/configuration/meshservice.mdx @@ -7,7 +7,7 @@ description: >- # MeshService -This topic provides full details about the MeshService resource. +This topic provides full details about the `MeshService` resource. ## Introduction From 8f64ef73e36c05dc8488b1f929a08e9ca5c8c5a5 Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Mon, 15 Aug 2022 11:56:54 -0400 Subject: [PATCH 050/104] Update ReferencePolicy -> ReferenceGrant --- .../content/docs/api-gateway/configuration/gateway.mdx | 2 +- .../content/docs/api-gateway/configuration/routes.mdx | 10 +++++----- website/content/docs/api-gateway/index.mdx | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/website/content/docs/api-gateway/configuration/gateway.mdx b/website/content/docs/api-gateway/configuration/gateway.mdx index 94b480845..4d07c8e98 100644 --- a/website/content/docs/api-gateway/configuration/gateway.mdx +++ b/website/content/docs/api-gateway/configuration/gateway.mdx @@ -159,7 +159,7 @@ Specifies the `tls` configurations for the `Gateway`. The `tls` object is requir | Parameter | Description | Type | Required | | --- | --- | --- | --- | -| `certificateRefs` |
Specifies Kubernetes `name` and `namespace` objects that contains TLS certificates and private keys.
The certificates establish a TLS handshake for requests that match the `hostname` of the associated `listener`. Each reference must be a Kubernetes Secret. If you are using a Secret in a namespace other than the `Gateway`'s, each reference must also have a corresponding [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy).
| Object or array | Required if `tls` is set | +| `certificateRefs` |
Specifies Kubernetes `name` and `namespace` objects that contains TLS certificates and private keys.
The certificates establish a TLS handshake for requests that match the `hostname` of the associated `listener`. Each reference must be a Kubernetes Secret. If you are using a Secret in a namespace other than the `Gateway`'s, each reference must also have a corresponding [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant).
| Object or array | Required if `tls` is set | | `mode` | Specifies the TLS Mode. Should always be set to `Terminate` for `HTTPRoutes` | string | Required if `certificateRefs` is set | | `options` | Specifies additional Consul API Gateway options. | Map of strings | optional | diff --git a/website/content/docs/api-gateway/configuration/routes.mdx b/website/content/docs/api-gateway/configuration/routes.mdx index 7819d482f..3822cf0f4 100644 --- a/website/content/docs/api-gateway/configuration/routes.mdx +++ b/website/content/docs/api-gateway/configuration/routes.mdx @@ -37,11 +37,11 @@ The following example creates a route named `example-route` associated with a li To create a route for a `backendRef` in a different namespace, you must also -create a [ReferencePolicy](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy). +create a [ReferenceGrant](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant). -The following example creates a route named `example-route` in namespace `gateway-namespace`. This route has a `backendRef` in namespace `service-namespace`. Traffic is allowed because the `ReferencePolicy`, named `reference-policy` in namespace `service-namespace`, allows traffic from `HTTPRoutes` in `gateway-namespace` to `Services` in `service-namespace`. +The following example creates a route named `example-route` in namespace `gateway-namespace`. This route has a `backendRef` in namespace `service-namespace`. Traffic is allowed because the `ReferenceGrant`, named `reference-grant` in namespace `service-namespace`, allows traffic from `HTTPRoutes` in `gateway-namespace` to `Services` in `service-namespace`. - + ```yaml apiVersion: gateway.networking.k8s.io/v1alpha2 @@ -61,9 +61,9 @@ The following example creates a route named `example-route` in namespace `gatewa --- apiVersion: gateway.networking.k8s.io/v1alpha2 - kind: ReferencePolicy + kind: ReferenceGrant metadata: - name: reference-policy + name: reference-grant namespace: service-namespace spec: from: diff --git a/website/content/docs/api-gateway/index.mdx b/website/content/docs/api-gateway/index.mdx index 2c32c5abd..6a811fd71 100644 --- a/website/content/docs/api-gateway/index.mdx +++ b/website/content/docs/api-gateway/index.mdx @@ -38,7 +38,7 @@ are used, see the [documentation in our GitHub repo](https://github.com/hashicor | [`Gateway`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.Gateway) |
  • Supported protocols: `HTTP`, `HTTPS`, `TCP`
  • Header-based hostname matching (no SNI support)
  • Supported filters: header addition, removal, and setting
  • TLS modes supported: `terminate`
  • Certificate types supported: `core/v1/Secret`
  • Extended options: TLS version and cipher constraints
| | [`HTTPRoute`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.HTTPRoute) |
  • Weight-based load balancing
  • Supported rules: path, header, query, and method-based matching
  • Supported filters: header addition, removal, and setting
  • Supported backend types:
    1. `core/v1/Service` (must map to a registered Consul service)
    2. `api-gateway.consul.hashicorp.com/v1alpha1/MeshService`
| | [`TCPRoute`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.TCPRoute) |
  • Supported backend types:
    1. `core/v1/Service` (must map to a registered Consul service)
    2. `api-gateway.consul.hashicorp.com/v1alpha1/MeshService`
| -| [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy) |
  • Required to allow any reference from a `Gateway` to a Kubernetes `core/v1/Secret` in a different namespace.
    • A Gateway with an unpermitted `certificateRefs` caused by the lack of a` ReferencePolicy` sets a `ResolvedRefs` status to `False` with the reason `InvalidCertificateRef`. The Gateway will not become ready in this case.
  • Required to allow any reference from an `HTTPRoute` or `TCPRoute` to a Kubernetes `core/v1/Service` in a different namespace.
    • A route with an unpermitted `backendRefs` caused by the lack of a `ReferencePolicy` sets a `ResolvedRefs` status to `False` with the reason `RefNotPermitted`. The gateway listener rejects routes with an unpermitted `backendRefs`.
    • WARNING: If a route `backendRefs` becomes unpermitted, the entire route is removed from the gateway listener.
      • A `backendRefs` can become unpermitted when you delete a `ReferencePolicy` or add a new unpermitted `backendRefs` to an existing route.
| +| [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant) |
  • Required to allow any reference from a `Gateway` to a Kubernetes `core/v1/Secret` in a different namespace.
    • A Gateway with an unpermitted `certificateRefs` caused by the lack of a` ReferenceGrant` sets a `ResolvedRefs` status to `False` with the reason `InvalidCertificateRef`. The Gateway will not become ready in this case.
  • Required to allow any reference from an `HTTPRoute` or `TCPRoute` to a Kubernetes `core/v1/Service` in a different namespace.
    • A route with an unpermitted `backendRefs` caused by the lack of a `ReferenceGrant` sets a `ResolvedRefs` status to `False` with the reason `RefNotPermitted`. The gateway listener rejects routes with an unpermitted `backendRefs`.
    • WARNING: If a route `backendRefs` becomes unpermitted, the entire route is removed from the gateway listener.
      • A `backendRefs` can become unpermitted when you delete a `ReferenceGrant` or add a new unpermitted `backendRefs` to an existing route.
| ## Additional Resources From ceb5afc408246e00693ac94cc70706b9bd2adfbb Mon Sep 17 00:00:00 2001 From: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Date: Mon, 15 Aug 2022 14:02:46 -0700 Subject: [PATCH 051/104] Apply suggestions from code review Co-authored-by: Nathan Coleman --- .../content/docs/api-gateway/configuration/meshservice.mdx | 2 +- website/content/docs/api-gateway/configuration/routes.mdx | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/website/content/docs/api-gateway/configuration/meshservice.mdx b/website/content/docs/api-gateway/configuration/meshservice.mdx index ef085f2f5..0307b3a4b 100644 --- a/website/content/docs/api-gateway/configuration/meshservice.mdx +++ b/website/content/docs/api-gateway/configuration/meshservice.mdx @@ -11,7 +11,7 @@ This topic provides full details about the `MeshService` resource. ## Introduction -A `MeshService` is a resource in the Kubernetes cluster that represents a service in the Consul service mesh outside the Kubernetes cluster where Consul API Gateway is deployed. The service must be in the same Consul datacenter. The MeshService exists so that other configuration models in Kubernetes, such as HTTPRoute and TCPRoute, can reference services that only exist in Consul. +A `MeshService` is a resource in the Kubernetes cluster that enables Kubernetes configuration models, such as `HTTPRoute` and `TCPRoute`, to reference services that only exist in Consul. A `MeshService` represents a service in the Consul service mesh outside the Kubernetes cluster where Consul API Gateway is deployed. The service represented by the `MeshService` resource must be in the same Consul datacenter as the Kubernetes cluster. ## Configuration Model diff --git a/website/content/docs/api-gateway/configuration/routes.mdx b/website/content/docs/api-gateway/configuration/routes.mdx index 58a07758d..acce1d68e 100644 --- a/website/content/docs/api-gateway/configuration/routes.mdx +++ b/website/content/docs/api-gateway/configuration/routes.mdx @@ -220,7 +220,7 @@ Specifies rules for matching incoming requests. You can apply [`filters`](#rules * [headers](#rules-matches-headers) * [query parameters](#rules-matches-queryparams) * [request method](#rules-matches-method) - +Each rule matches requests independently. As a result, a request matching any of the conditions is considered a match. You can configure several matching rules for each type to widen or narrow matches. ### rules.matches.path Specifies a list of objects that define matches based on URL path. The following table describes the parameters for the `path` field: @@ -228,7 +228,7 @@ Specifies a list of objects that define matches based on URL path. The following | Parameter | Description | Type | Required | | --- | --- | --- | --- | | `type` | Specifies the type of comparison to use for matching the path value. You can specify the following types.
  • `Exact`: Returns a match only when the entire path matches the `value` field (default).
  • `PathPrefix`: Returns a match when the path matches the regex defined in the `value` field.
| String | Required | -| `value` | Specifies value to match on. You can specify a specific string or a regular expression. | String | Required | +| `value` | Specifies the value to match on. You can specify a specific string when `type` is `Exact` or `PathPrefix`. You can specify a regular expression if `type` is `RegularExpression`. | String | Required | ### rules.matches.headers From 1ce15b3044d3ebb7b4ea28c390402b07ca22755c Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Mon, 15 Aug 2022 17:13:44 -0400 Subject: [PATCH 052/104] Apply suggestions from code review --- website/content/docs/api-gateway/configuration/routes.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/content/docs/api-gateway/configuration/routes.mdx b/website/content/docs/api-gateway/configuration/routes.mdx index acce1d68e..fc41f85d5 100644 --- a/website/content/docs/api-gateway/configuration/routes.mdx +++ b/website/content/docs/api-gateway/configuration/routes.mdx @@ -220,6 +220,7 @@ Specifies rules for matching incoming requests. You can apply [`filters`](#rules * [headers](#rules-matches-headers) * [query parameters](#rules-matches-queryparams) * [request method](#rules-matches-method) + Each rule matches requests independently. As a result, a request matching any of the conditions is considered a match. You can configure several matching rules for each type to widen or narrow matches. ### rules.matches.path @@ -227,7 +228,7 @@ Specifies a list of objects that define matches based on URL path. The following | Parameter | Description | Type | Required | | --- | --- | --- | --- | -| `type` | Specifies the type of comparison to use for matching the path value. You can specify the following types.
  • `Exact`: Returns a match only when the entire path matches the `value` field (default).
  • `PathPrefix`: Returns a match when the path matches the regex defined in the `value` field.
| String | Required | +| `type` | Specifies the type of comparison to use for matching the path value. You can specify the following types.
  • `Exact`: Returns a match only when the entire path matches the `value` field (default).
  • `PathPrefix`: Returns a match when the path has the prefix defined in the `value` field.
  • `RegularExpression`: Returns a match when the path matches the regex defined in the `value` field.
| String | Required | | `value` | Specifies the value to match on. You can specify a specific string when `type` is `Exact` or `PathPrefix`. You can specify a regular expression if `type` is `RegularExpression`. | String | Required | ### rules.matches.headers From 64695eca79974b974395d411183b5e5fb1c8446a Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Mon, 15 Aug 2022 17:21:36 -0400 Subject: [PATCH 053/104] Fix typo --- website/content/docs/api-gateway/configuration/routes.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/api-gateway/configuration/routes.mdx b/website/content/docs/api-gateway/configuration/routes.mdx index fc41f85d5..dced0cfa0 100644 --- a/website/content/docs/api-gateway/configuration/routes.mdx +++ b/website/content/docs/api-gateway/configuration/routes.mdx @@ -191,7 +191,7 @@ Defines operations to perform on matching request headers when `rules.filters.ty | --- | --- | --- | --- | | `set` | Configure this field to rewrite the HTTP request header. It specifies the name of an HTTP header to overwrite and the new value to set. Any existing values associated with the header name are overwritten. You can specify the following configurations:
  • `name`: Required string that specifies the name of the HTTP header to set.
  • `value`: Required string that specifies the value of the HTTP header to set.
| List of objects | Optional | | `add` | Configure this field to append the request header with a new value. It specifies the name of an HTTP header to append and the value(s) to add. You can specify the following configurations:
  • `name`: Required string that specifies the name of the HTTP header to append.
  • `value`: Required string that specifies the value of the HTTP header to add.
| List of objects | Optional | -| `remove` | Configure this field to specifify an array of header names to remove from the request header. | Array of strings | Optional | +| `remove` | Configure this field to specify an array of header names to remove from the request header. | Array of strings | Optional | ### rules.filters.urlRewrite From f06d4f234de9c9006217fda630096510a2262b1f Mon Sep 17 00:00:00 2001 From: sarahalsmiller <100602640+sarahalsmiller@users.noreply.github.com> Date: Mon, 15 Aug 2022 17:52:47 -0500 Subject: [PATCH 054/104] Update redirects.js --- website/redirects.js | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/website/redirects.js b/website/redirects.js index 52a313b04..583b2a5d1 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -1265,7 +1265,12 @@ module.exports = [ }, { source: '/docs/api-gateway/api-gateway-usage', - destination: '/docs/api-gateway/consul-api-gateway-install', + destination: '/docs/api-gateway/install', + permanent: true, + }, + { + source: '/docs/api-gateway/api-gateway/consul-api-gateway-install', + destination: '/docs/api-gateway/install', permanent: true, }, { From a95e0121a625544a2e84c9bbaae613b055a080ff Mon Sep 17 00:00:00 2001 From: sarahalsmiller <100602640+sarahalsmiller@users.noreply.github.com> Date: Mon, 15 Aug 2022 18:00:08 -0500 Subject: [PATCH 055/104] Update redirects.js --- website/redirects.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/redirects.js b/website/redirects.js index 583b2a5d1..a4b3f272b 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -1269,7 +1269,7 @@ module.exports = [ permanent: true, }, { - source: '/docs/api-gateway/api-gateway/consul-api-gateway-install', + source: '/docs/api-gateway/consul-api-gateway-install', destination: '/docs/api-gateway/install', permanent: true, }, From 0314d7cbbbc8ad2c118b6d8634fb2634a884c789 Mon Sep 17 00:00:00 2001 From: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> Date: Tue, 16 Aug 2022 09:26:02 -0700 Subject: [PATCH 056/104] Update website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx --- .../content/docs/release-notes/consul-api-gateway/v0_4_x.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx b/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx index c14f99559..382e88284 100644 --- a/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx +++ b/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx @@ -34,7 +34,7 @@ description: >- `//cart/checkout`. Please see the product documentation for details on how to configure this feature. -## What's Changed +## What Has Changed - **Reference Policy Renamed to Reference Grant** In v0.5.0 of the Kubernetes Gateway API, `ReferencePolicy` has been renamed to `ReferenceGrant`. This From 5c6bfee65de9381e42209d49d12115feb2eadb32 Mon Sep 17 00:00:00 2001 From: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> Date: Tue, 16 Aug 2022 09:31:46 -0700 Subject: [PATCH 057/104] Minor edits to Release Notes Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- .../docs/release-notes/consul-api-gateway/v0_4_x.mdx | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx b/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx index 382e88284..3512cb0d9 100644 --- a/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx +++ b/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx @@ -27,12 +27,8 @@ description: >- - **URL Path Prefix Rewrite** This release introduces support for rewriting a URL's path prefix when routing - HTTP traffic. This is configured by adding a `URLRewrite` filter to a - `HTTPRoute`. With this feature, the gateway can rewrite the URL path, in a - client's HTTP Request, before sending the request to a service. A simple - example of this is changing the path from `//store/checkout` to - `//cart/checkout`. Please see the product documentation for details on how to - configure this feature. + HTTP traffic. To use this functionality, add a `URLRewrite` filter to an + `HTTPRoute` configuration. This enables the gateway to rewrite the URL path in a client's HTTP request before sending the request to a service. For example, you could configure the gateway to change the path from `//store/checkout` to `//cart/checkout`. Refer to the [usage documentation](/docs/api-gateway/usage) for additional information. ## What Has Changed From 9f2798a5bf2bd091cb4e55051f015a433b4bf6cc Mon Sep 17 00:00:00 2001 From: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> Date: Tue, 16 Aug 2022 09:36:23 -0700 Subject: [PATCH 058/104] Minor edits to Release Notes --- .../content/docs/release-notes/consul-api-gateway/v0_4_x.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx b/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx index 3512cb0d9..4f440dc24 100644 --- a/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx +++ b/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx @@ -38,7 +38,7 @@ description: >- in a future version of the standard. After upgrading to this version of Consul API Gateway, you should rename all - existing `ReferencePolicy`y to `ReferenceGrant`s. Please see the upgrading + existing `ReferencePolicy`y to `ReferenceGrant`s. Refer to the [Upgrades](/docs/api-gateway/upgrades) instructions for additional details. ## Supported Software From f98380ab558ea96821ce3db92893fef39824d662 Mon Sep 17 00:00:00 2001 From: Jeff-Apple <79924108+Jeff-Apple@users.noreply.github.com> Date: Tue, 16 Aug 2022 10:48:13 -0700 Subject: [PATCH 059/104] Added Known Issues and other edits to Rel Notes --- .../consul-api-gateway/v0_4_x.mdx | 25 +++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx b/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx index 4f440dc24..b0c336af7 100644 --- a/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx +++ b/website/content/docs/release-notes/consul-api-gateway/v0_4_x.mdx @@ -25,12 +25,15 @@ description: >- API. Once an API reaches `v1beta1` status, future versions must comply with several backward compatibility requirements. -- **URL Path Prefix Rewrite** - This release introduces support for rewriting a URL's path prefix when routing - HTTP traffic. To use this functionality, add a `URLRewrite` filter to an - `HTTPRoute` configuration. This enables the gateway to rewrite the URL path in a client's HTTP request before sending the request to a service. For example, you could configure the gateway to change the path from `//store/checkout` to `//cart/checkout`. Refer to the [usage documentation](/docs/api-gateway/usage) for additional information. +- **URL Path Prefix Rewrite** This release introduces support for rewriting a + URL's path prefix when routing HTTP traffic. To use this functionality, add a + `URLRewrite` filter to an `HTTPRoute` configuration. This enables the gateway + to rewrite the URL path in a client's HTTP request before sending the request + to a service. For example, you could configure the gateway to change the path + from `//store/checkout` to `//cart/checkout`. Refer to the [usage + documentation](/docs/api-gateway/usage) for additional information. -## What Has Changed +## What has Changed - **Reference Policy Renamed to Reference Grant** In v0.5.0 of the Kubernetes Gateway API, `ReferencePolicy` has been renamed to `ReferenceGrant`. This @@ -38,13 +41,13 @@ description: >- in a future version of the standard. After upgrading to this version of Consul API Gateway, you should rename all - existing `ReferencePolicy`y to `ReferenceGrant`s. Refer to the [Upgrades](/docs/api-gateway/upgrades) + existing `ReferencePolicy` to `ReferenceGrant`. Refer to the [Upgrades](/docs/api-gateway/upgrades) instructions for additional details. ## Supported Software - Consul 1.11.2+ -- HashiCorp Consul Helm chart 0.47.0+ +- HashiCorp Consul Helm chart 0.47.1+ - Kubernetes 1.21+ - Kubernetes 1.24 is not supported at this time. - Kubectl 1.21+ @@ -59,6 +62,14 @@ Supported version of the [Gateway API](https://gateway-api.sigs.k8s.io/) spec: v For detailed information on upgrading, please refer to the [Upgrades page](/docs/api-gateway/upgrades) +## Known Issues +The following issues are know to exist in the v0.4.0 release + +- API Gateway pods fail to start if namespace mirroring enabled and destination + namespace doesn't exist. See GitHub Issue + [#248](https://github.com/hashicorp/consul-api-gateway/issues/248) for + details. + ## Changelogs The changelogs for this major release version and any maintenance versions are listed below. From 14494d84e04f3b6313c1349195a4b5c681962acd Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Tue, 16 Aug 2022 15:33:33 -0700 Subject: [PATCH 060/104] ci: Replace Nomad integration tests with predictable compatibility matrix (#14220) --- .circleci/config.yml | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index af1a2f5c6..60d6c3413 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -28,6 +28,10 @@ references: - "1.21.4" - "1.22.2" - "1.23.0" + nomad-versions: &supported_nomad_versions + - &default_nomad_version "1.3.3" + - "1.2.10" + - "1.1.16" images: # When updating the Go version, remember to also update the versions in the # workflows section for go-test-lib jobs. @@ -560,17 +564,20 @@ jobs: - run: make ci.dev-docker - run: *notify-slack-failure - # Nomad 0.8 builds on go1.10 - # Run integration tests on nomad/v0.8.7 - nomad-integration-0_8: + nomad-integration-test: &NOMAD_TESTS docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.10 + - image: docker.mirror.hashicorp.services/cimg/go:1.19 + parameters: + nomad-version: + type: enum + enum: *supported_nomad_versions + default: *default_nomad_version environment: <<: *ENVIRONMENT NOMAD_WORKING_DIR: &NOMAD_WORKING_DIR /home/circleci/go/src/github.com/hashicorp/nomad - NOMAD_VERSION: v0.8.7 + NOMAD_VERSION: << parameters.nomad-version >> steps: &NOMAD_INTEGRATION_TEST_STEPS - - run: git clone https://github.com/hashicorp/nomad.git --branch ${NOMAD_VERSION} ${NOMAD_WORKING_DIR} + - run: git clone https://github.com/hashicorp/nomad.git --branch v${NOMAD_VERSION} ${NOMAD_WORKING_DIR} # get consul binary - attach_workspace: @@ -601,16 +608,6 @@ jobs: path: *TEST_RESULTS_DIR - run: *notify-slack-failure - # run integration tests on nomad/main - nomad-integration-main: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.18 - environment: - <<: *ENVIRONMENT - NOMAD_WORKING_DIR: /home/circleci/go/src/github.com/hashicorp/nomad - NOMAD_VERSION: main - steps: *NOMAD_INTEGRATION_TEST_STEPS - # build frontend yarn cache frontend-cache: docker: @@ -1117,12 +1114,12 @@ workflows: - dev-upload-docker: <<: *dev-upload context: consul-ci - - nomad-integration-main: - requires: - - dev-build - - nomad-integration-0_8: + - nomad-integration-test: requires: - dev-build + matrix: + parameters: + nomad-version: *supported_nomad_versions - envoy-integration-test: requires: - dev-build From 149f6a610dde4ed66b3d4d56f246e661d2108369 Mon Sep 17 00:00:00 2001 From: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Date: Tue, 16 Aug 2022 23:08:09 -0400 Subject: [PATCH 061/104] docs: fix broken markdown --- website/content/docs/agent/config/config-files.mdx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index 5c4f7b909..da17549b0 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -1998,7 +1998,7 @@ specially crafted certificate signed by the CA can be used to gain full access t Certificate Authority from the [`ca_file`](#tls_defaults_ca_file) or [`ca_path`](#tls_defaults_ca_path). By default, this is false, and Consul will not make use of TLS for outgoing connections. This applies to clients - and servers as both will make outgoing connections. This setting *does not* + and servers as both will make outgoing connections. This setting does not apply to the gRPC interface as Consul makes no outgoing connections on this interface. @@ -2071,7 +2071,9 @@ specially crafted certificate signed by the CA can be used to gain full access t set to true, Consul verifies the TLS certificate presented by the servers match the hostname `server..`. By default this is false, and Consul does not verify the hostname of the certificate, only that it - is signed by a trusted CA. This setting *must* be enabled to prevent a + is signed by a trusted CA. + + ~> **Security Note:** `verify_server_hostname` *must* be set to true to prevent a compromised client from gaining full read and write access to all cluster data *including all ACL tokens and Connect CA root keys*. From a5a200e0e9ccc68e54cb2d6091882e7732cfb98c Mon Sep 17 00:00:00 2001 From: James Hartig Date: Tue, 16 Aug 2022 16:54:01 -0400 Subject: [PATCH 062/104] Use the maximum jitter when calculating the timeout The timeout should include the maximum possible jitter since the server will randomly add to it's timeout a jitter. If the server's timeout is less than the client's timeout then the client will return an i/o deadline reached error. Before: ``` time curl 'http://localhost:8500/v1/catalog/service/service?dc=other-dc&stale=&wait=600s&index=15820644' rpc error making call: i/o deadline reached real 10m11.469s user 0m0.018s sys 0m0.023s ``` After: ``` time curl 'http://localhost:8500/v1/catalog/service/service?dc=other-dc&stale=&wait=600s&index=15820644' [...] real 10m35.835s user 0m0.021s sys 0m0.021s ``` --- .changelog/14233.txt | 3 +++ agent/consul/client_test.go | 7 ++++--- agent/structs/structs.go | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 .changelog/14233.txt diff --git a/.changelog/14233.txt b/.changelog/14233.txt new file mode 100644 index 000000000..5a2c6dee1 --- /dev/null +++ b/.changelog/14233.txt @@ -0,0 +1,3 @@ +```release-note:bugfix +rpc: Adds max jitter to client deadlines to prevent i/o deadline errors on blocking queries +``` diff --git a/agent/consul/client_test.go b/agent/consul/client_test.go index 84135ee18..32199d8ab 100644 --- a/agent/consul/client_test.go +++ b/agent/consul/client_test.go @@ -893,8 +893,8 @@ func TestClient_RPC_Timeout(t *testing.T) { } }) - // waiter will sleep for 50ms - require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 50 * time.Millisecond})) + // waiter will sleep for 101ms which is 1ms more than the DefaultQueryTime + require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 101 * time.Millisecond})) // Requests with QueryOptions have a default timeout of RPCHoldTimeout (10ms) // so we expect the RPC call to timeout. @@ -903,7 +903,8 @@ func TestClient_RPC_Timeout(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached") - // Blocking requests have a longer timeout (100ms) so this should pass + // Blocking requests have a longer timeout (100ms) so this should pass since we + // add the maximum jitter which should be 16ms out = struct{}{} err = c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{ QueryOptions: structs.QueryOptions{ diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 22fb47ca9..830168888 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -353,7 +353,7 @@ func (q QueryOptions) Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime tim q.MaxQueryTime = defaultQueryTime } // Timeout after maximum jitter has elapsed. - q.MaxQueryTime += lib.RandomStagger(q.MaxQueryTime / JitterFraction) + q.MaxQueryTime += q.MaxQueryTime / JitterFraction return q.MaxQueryTime + rpcHoldTimeout } From bc6ee86d29188dbbdcc9afeb86aa81f531c531a8 Mon Sep 17 00:00:00 2001 From: Michele Degges Date: Wed, 17 Aug 2022 14:48:43 -0700 Subject: [PATCH 063/104] set PRODUCT_VERSION for docker build (#14242) Changes proposed in this PR: In `actions-docker-build` we [pass](https://github.com/hashicorp/actions-docker-build/blob/05c370a26e61b06be46c5095d6e914c9f0ea4f3d/scripts/docker_build#L49) `PRODUCT_VERSION` to the docker build command. Since this was not set, the label did not populate properly which is used in a comparison to determine the `minor-latest` and `latest` docker image tags. How I've tested this PR: - build the image up to the point of label creation and pass in `--build-arg PRODUCT_VERSION=1.2.3` - inspect the image for the label with the above command How I expect reviewers to test this PR: - same as above Related [internal-only] post about this: https://hashicorp.atlassian.net/wiki/spaces/RELENG/pages/2416934922/August+17+2022-+Docker+Build+Failures --- Dockerfile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index 762471eb5..f47ca4161 100644 --- a/Dockerfile +++ b/Dockerfile @@ -110,13 +110,13 @@ CMD ["agent", "-dev", "-client", "0.0.0.0"] # Remember, this image cannot be built locally. FROM docker.mirror.hashicorp.services/alpine:3.15 as default -ARG VERSION +ARG PRODUCT_VERSION ARG BIN_NAME # PRODUCT_NAME and PRODUCT_VERSION are the name of the software on releases.hashicorp.com # and the version to download. Example: PRODUCT_NAME=consul PRODUCT_VERSION=1.2.3. ENV BIN_NAME=$BIN_NAME -ENV VERSION=$VERSION +ENV VERSION=$PRODUCT_VERSION ARG PRODUCT_REVISION ARG PRODUCT_NAME=$BIN_NAME @@ -128,7 +128,7 @@ LABEL org.opencontainers.image.authors="Consul Team " \ org.opencontainers.image.url="https://www.consul.io/" \ org.opencontainers.image.documentation="https://www.consul.io/docs" \ org.opencontainers.image.source="https://github.com/hashicorp/consul" \ - org.opencontainers.image.version=$VERSION \ + org.opencontainers.image.version=${PRODUCT_VERSION} \ org.opencontainers.image.vendor="HashiCorp" \ org.opencontainers.image.title="consul" \ org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." @@ -217,7 +217,7 @@ LABEL org.opencontainers.image.authors="Consul Team " \ org.opencontainers.image.url="https://www.consul.io/" \ org.opencontainers.image.documentation="https://www.consul.io/docs" \ org.opencontainers.image.source="https://github.com/hashicorp/consul" \ - org.opencontainers.image.version=$VERSION \ + org.opencontainers.image.version=${PRODUCT_VERSION} \ org.opencontainers.image.vendor="HashiCorp" \ org.opencontainers.image.title="consul" \ org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." @@ -284,4 +284,4 @@ USER 100 # By default you'll get an insecure single-node development server that stores # everything in RAM, exposes a web UI and HTTP endpoints, and bootstraps itself. # Don't use this configuration for production. -CMD ["agent", "-dev", "-client", "0.0.0.0"] \ No newline at end of file +CMD ["agent", "-dev", "-client", "0.0.0.0"] From 7f66dfc78008141f293ea8c5e5e61a48f57abaa3 Mon Sep 17 00:00:00 2001 From: cskh Date: Wed, 17 Aug 2022 21:14:04 -0400 Subject: [PATCH 064/104] Fix: upgrade pkg imdario/merg to prevent merge config panic (#14237) * upgrade imdario/merg to prevent merge config panic * test: service definition takes precedence over service-defaults in merged results --- agent/consul/merge_service_config_test.go | 10 ++++++++-- go.mod | 4 ++-- go.sum | 6 ++++-- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/agent/consul/merge_service_config_test.go b/agent/consul/merge_service_config_test.go index 5a866dce2..dd9b1cbca 100644 --- a/agent/consul/merge_service_config_test.go +++ b/agent/consul/merge_service_config_test.go @@ -153,6 +153,12 @@ func Test_MergeServiceConfig_UpstreamOverrides(t *testing.T) { DestinationNamespace: "default", DestinationPartition: "default", DestinationName: "zap", + Config: map[string]interface{}{ + "passive_health_check": map[string]interface{}{ + "Interval": int64(20), + "MaxFailures": int64(4), + }, + }, }, }, }, @@ -171,8 +177,8 @@ func Test_MergeServiceConfig_UpstreamOverrides(t *testing.T) { DestinationName: "zap", Config: map[string]interface{}{ "passive_health_check": map[string]interface{}{ - "Interval": int64(10), - "MaxFailures": int64(2), + "Interval": int64(20), + "MaxFailures": int64(4), }, "protocol": "grpc", }, diff --git a/go.mod b/go.mod index e2fbafed4..1ade7d6de 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493 - github.com/imdario/mergo v0.3.6 + github.com/imdario/mergo v0.3.13 github.com/kr/text v0.2.0 github.com/miekg/dns v1.1.41 github.com/mitchellh/cli v1.1.0 @@ -183,7 +183,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/resty.v1 v1.12.0 // indirect gopkg.in/yaml.v2 v2.2.8 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + gopkg.in/yaml.v3 v3.0.0 // indirect k8s.io/klog v1.0.0 // indirect k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 // indirect sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect diff --git a/go.sum b/go.sum index ee3e0beda..cf7f2afc3 100644 --- a/go.sum +++ b/go.sum @@ -396,8 +396,9 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= @@ -969,8 +970,9 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 3f88847590b27744aabc371272893b63abc5a05b Mon Sep 17 00:00:00 2001 From: Mariano Asselborn Date: Thu, 18 Aug 2022 14:41:34 -0400 Subject: [PATCH 065/104] Add version label to Docker image (#14204) --- Dockerfile | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index f47ca4161..8e127254f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,10 +22,11 @@ LABEL org.opencontainers.image.authors="Consul Team " \ org.opencontainers.image.url="https://www.consul.io/" \ org.opencontainers.image.documentation="https://www.consul.io/docs" \ org.opencontainers.image.source="https://github.com/hashicorp/consul" \ - org.opencontainers.image.version=$VERSION \ + org.opencontainers.image.version=${VERSION} \ org.opencontainers.image.vendor="HashiCorp" \ org.opencontainers.image.title="consul" \ - org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." + org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \ + version=${VERSION} # This is the location of the releases. ENV HASHICORP_RELEASES=https://releases.hashicorp.com @@ -116,7 +117,7 @@ ARG BIN_NAME # PRODUCT_NAME and PRODUCT_VERSION are the name of the software on releases.hashicorp.com # and the version to download. Example: PRODUCT_NAME=consul PRODUCT_VERSION=1.2.3. ENV BIN_NAME=$BIN_NAME -ENV VERSION=$PRODUCT_VERSION +ENV PRODUCT_VERSION=$PRODUCT_VERSION ARG PRODUCT_REVISION ARG PRODUCT_NAME=$BIN_NAME @@ -131,7 +132,8 @@ LABEL org.opencontainers.image.authors="Consul Team " \ org.opencontainers.image.version=${PRODUCT_VERSION} \ org.opencontainers.image.vendor="HashiCorp" \ org.opencontainers.image.title="consul" \ - org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." + org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \ + version=${PRODUCT_VERSION} # Set up certificates and base tools. # libc6-compat is needed to symlink the shared libraries for ARM builds @@ -220,7 +222,8 @@ LABEL org.opencontainers.image.authors="Consul Team " \ org.opencontainers.image.version=${PRODUCT_VERSION} \ org.opencontainers.image.vendor="HashiCorp" \ org.opencontainers.image.title="consul" \ - org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." + org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \ + version=${PRODUCT_VERSION} # Copy license for Red Hat certification. COPY LICENSE /licenses/mozilla.txt From f92aee09f8e81bb92c4aee906c8cf4448c12ec42 Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Thu, 18 Aug 2022 12:59:03 -0700 Subject: [PATCH 066/104] Add missing changelog for 1.9.17 (#14053) --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b92ca84f3..f3399ba24 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -944,6 +944,24 @@ NOTES: * legal: **(Enterprise only)** Enterprise binary downloads will now include a copy of the EULA and Terms of Evaluation in the zip archive +## 1.9.17 (April 13, 2022) + +SECURITY: + +* agent: Added a new check field, `disable_redirects`, that allows for disabling the following of redirects for HTTP checks. The intention is to default this to true in a future release so that redirects must explicitly be enabled. [[GH-12685](https://github.com/hashicorp/consul/issues/12685)] +* connect: Properly set SNI when configured for services behind a terminating gateway. [[GH-12672](https://github.com/hashicorp/consul/issues/12672)] + +DEPRECATIONS: + +* tls: With the upgrade to Go 1.17, the ordering of `tls_cipher_suites` will no longer be honored, and `tls_prefer_server_cipher_suites` is now ignored. [[GH-12767](https://github.com/hashicorp/consul/issues/12767)] + +BUG FIXES: + +* connect/ca: cancel old Vault renewal on CA configuration. Provide a 1 - 6 second backoff on repeated token renewal requests to prevent overwhelming Vault. [[GH-12607](https://github.com/hashicorp/consul/issues/12607)] +* memberlist: fixes a bug which prevented members from joining a cluster with +large amounts of churn [[GH-253](https://github.com/hashicorp/memberlist/issues/253)] [[GH-12046](https://github.com/hashicorp/consul/issues/12046)] +* replication: Fixed a bug which could prevent ACL replication from continuing successfully after a leader election. [[GH-12565](https://github.com/hashicorp/consul/issues/12565)] + ## 1.9.16 (February 28, 2022) FEATURES: From 18bb45db75beb1f34d700136409fd3e144e3db55 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Thu, 18 Aug 2022 09:26:46 -0700 Subject: [PATCH 067/104] docs: add 1.13 upgrade considerations Adds guidance when upgrading a Consul service mesh deployment to 1.13 and: - using auto-encrypt or auto-config; or - the HTTPS port is not enabled on Consul agents --- .../docs/agent/config/config-files.mdx | 2 +- .../docs/upgrading/instructions/index.mdx | 3 + .../docs/upgrading/upgrade-specific.mdx | 129 ++++++++++++++++-- 3 files changed, 122 insertions(+), 12 deletions(-) diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index 5c4f7b909..3cd1428ac 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -2082,7 +2082,7 @@ specially crafted certificate signed by the CA can be used to gain full access t ### Deprecated Options ((#tls_deprecated_options)) The following options were deprecated in Consul 1.12, please use the -[`tls`](#tls) stanza instead. +[`tls`](#tls-1) stanza instead. - `ca_file` See: [`tls.defaults.ca_file`](#tls_defaults_ca_file). diff --git a/website/content/docs/upgrading/instructions/index.mdx b/website/content/docs/upgrading/instructions/index.mdx index 466acf335..4ddc86b07 100644 --- a/website/content/docs/upgrading/instructions/index.mdx +++ b/website/content/docs/upgrading/instructions/index.mdx @@ -31,6 +31,9 @@ we recommend reviewing the changelog for versions between the one you are on and one you are upgrading to at each step to familiarize yourself with changes. Select your _currently installed_ release series: +- 1.12.x: work upwards from [1.13 upgrade notes](/docs/upgrading/upgrade-specific#consul-1-13-x) +- 1.11.x: work upwards from [1.12 upgrade notes](/docs/upgrading/upgrade-specific#consul-1-12-0) +- 1.10.x: work upwards from [1.11 upgrade notes](/docs/upgrading/upgrade-specific#consul-1-11-0) - [1.9.x](/docs/upgrading/instructions/upgrade-to-1-10-x) - [1.8.x](/docs/upgrading/instructions/upgrade-to-1-10-x) - [1.7.x](/docs/upgrading/instructions/upgrade-to-1-8-x) diff --git a/website/content/docs/upgrading/upgrade-specific.mdx b/website/content/docs/upgrading/upgrade-specific.mdx index 75884adb2..2732ffe4f 100644 --- a/website/content/docs/upgrading/upgrade-specific.mdx +++ b/website/content/docs/upgrading/upgrade-specific.mdx @@ -18,8 +18,15 @@ upgrade flow. ### Service Mesh Compatibility -Existing Consul deployments using service mesh (i.e., containing any registered -Connect proxies) should upgrade to **at least Consul 1.13.1**. +Before upgrading existing Consul deployments using service mesh to Consul 1.13.x, +review the following guidances relevant to your deployment: +- [All service mesh deployments](#all-service-mesh-deployments) +- [Service mesh deployments using auto-encrypt or auto-config](#service-mesh-deployments-using-auto-encrypt-or-auto-config) +- [Service mesh deployments without the HTTPS port enabled on Consul agents](#service-mesh-deployments-without-the-https-port-enabled-on-consul-agents) + +#### All service mesh deployments + +Upgrade to **Consul version 1.13.1 or later**. Consul 1.13.0 contains a bug that prevents Consul server agents from restoring saved state on startup if the state @@ -29,17 +36,117 @@ saved state on startup if the state This bug is fixed in Consul versions 1.13.1 and newer. -### gRPC TLS +#### Service mesh deployments using auto-encrypt or auto-config -In prior Consul versions if HTTPS was enabled for the client API and exposed -via `ports { https = NUMBER }` then the same TLS material was used to encrypt -the gRPC port used for xDS. Now this is decoupled and activating TLS on the -gRPC endpoint is controlled solely with the gRPC section of the new -[`tls` stanza](/docs/agent/config/config-files#tls-configuration-reference). +**Do not upgrade to Consul 1.13 yet** if using +[auto-encrypt](/docs/agent/config/config-files#auto_encrypt) or +[auto-config](/docs/agent/config/config-files#auto_config). -If you have not yet switched to the new `tls` stanza and were NOT using HTTPS -for the API then updating to Consul 1.13 will activate TLS for gRPC since the -deprecated TLS settings are used as defaults. +In Consul 1.13, auto-encrypt and auto-config both cause Consul +to require TLS for gRPC communication with Envoy proxies. +In environments where Envoy proxies are not already configured +to use TLS for gRPC, upgrading Consul 1.13 will cause +Envoy proxies to disconnect from the control plane (Consul agents). + +The underlying cause is the same as discussed in +[deployments without the HTTPS port enabled on Consul agents](#service-mesh-deployments-without-the-https-port-enabled-on-consul-agents). +However, when using auto-encrypt or auto-config, +the problem **cannot** currently be avoided by +[modifying the agent's TLS configuration](#modify-the-consul-agent-s-tls-configuration) +because auto-encrypt and auto-config automatically set +interface-generic TLS configuration in a manner similar to +[`tls.defaults`](/docs/agent/config/config-files#tls_defaults). +We are working to address this problem in an upcoming 1.13 patch release. + +#### Service mesh deployments without the HTTPS port enabled on Consul agents ((#grpc-tls)) + +If the HTTPS port is not enabled +([`ports { https = POSITIVE_INTEGER }`](/docs/agent/config/config-files#https_port)) +on a pre-1.13 Consul agent, +**[modify the agent's TLS configuration before upgrading](#modify-the-consul-agent-s-tls-configuration)** +to avoid Envoy proxies disconnecting from the control plane (Consul agents). +Envoy proxies include service mesh sidecars and gateways. + +##### Changes to gRPC and HTTP interface configuration + +If a Consul agent's HTTP API is exposed externally, +enabling HTTPS (TLS encryption for HTTP) is important. + +The gRPC interface is used for xDS communication between Consul and +Envoy proxies when using Consul service mesh. +A Consul agent's gRPC traffic is often loopback-only, +which TLS encryption is not important for. + +Prior to Consul 1.13, if [`ports { https = POSITIVE_INTEGER }`](/docs/agent/config/config-files#https_port) +was configured, TLS was enabled for both HTTP *and* gRPC. +This was inconvenient for deployments that +needed TLS for HTTP, but not for gRPC. +Enabling HTTPS also required launching Envoy proxies +with the necessary TLS material for xDS communication +with its Consul agent via TLS over gRPC. + +Consul 1.13 addresses this inconvenience by fully decoupling the TLS configuration for HTTP and gRPC interfaces. +TLS for gRPC is no longer enabled by setting +[`ports { https = POSITIVE_INTEGER }`](/docs/agent/config/config-files#https_port). +TLS configuration for gRPC is now determined exclusively by: + +1. [`tls.grpc`](/docs/agent/config/config-files#tls_grpc), which overrides +1. [`tls.defaults`](/docs/agent/config/config-files#tls_defaults), which overrides +1. [Deprecated TLS options](/docs/agent/config/config-files#tls_deprecated_options) such as + [`ca_file`](/docs/agent/config/config-files#ca_file-4), + [`cert_file`](/docs/agent/config/config-files#cert_file-4), and + [`key_file`](/docs/agent/config/config-files#key_file-4). + +This decoupling has a side effect that requires a +[TLS configuration change](#modify-the-consul-agent-s-tls-configuration) +for pre-1.13 agents without the HTTPS port enabled. +Without a TLS configuration change, +Consul 1.13 agents may now expect gRPC *with* TLS, +causing communication to fail with Envoy proxies +that continue to use gRPC *without* TLS. + +##### Modify the Consul agent's TLS configuration + +If [`tls.grpc`](/docs/agent/config/config-files#tls_grpc), +[`tls.defaults`](/docs/agent/config/config-files#tls_defaults), +or the [deprecated TLS options](/docs/agent/config/config-files#tls_deprecated_options) +define TLS material in their +`ca_file`, `ca_path`, `cert_file`, or `key_file` fields, +TLS for gRPC will be enabled in Consul 1.13, even if +[`ports { https = POSITIVE_INTEGER }`](/docs/agent/config/config-files#https_port) +is not set. + +This will cause Envoy proxies to disconnect from the control plane +after upgrading to Consul 1.13 if associated pre-1.13 Consul agents +have **not** set +[`ports { https = POSITIVE_INTEGER }`](/docs/agent/config/config-files#https_port). +To avoid this problem, make the following agent configuration changes: + +1. Remove TLS material from the Consul agents' + interface-generic TLS configuration options: + [`tls.defaults`](/docs/agent/config/config-files#tls_grpc) and + [deprecated TLS options](/docs/agent/config/config-files#tls_deprecated_options) +1. Reapply TLS material to the non-gRPC interfaces that need it with the + interface-specific TLS configuration stanzas + [introduced in Consul 1.12](/docs/upgrading/upgrade-specific#tls-configuration): + [`tls.https`](/docs/agent/config/config-files#tls_https) and + [`tls.internal_rpc`](/docs/agent/config/config-files#tls_internal_rpc). + + If upgrading directly from pre-1.12 Consul, + the above configuration change cannot be made before upgrading. + Therefore, consider upgrading agents to Consul 1.12 before upgrading to 1.13. + +If pre-1.13 Consul agents have set +[`ports { https = POSITIVE_INTEGER }`](/docs/agent/config/config-files#https_port), +this configuration change is not required to upgrade. +That setting means the pre-1.13 Consul agent requires TLS for gRPC *already*, +and will continue to do so after upgrading to 1.13. +If your pre-1.13 service mesh is working, you have already +configured your Envoy proxies to use TLS for gRPC when bootstrapping Envoy +via [`consul connect envoy`](/commands/connect/envoy), +such as with flags or environment variables like +[`-ca-file`](/commands/connect/envoy#ca-file) and +[`CONSUL_CACERT`](/commands#consul_cacert). ### 1.9 Telemetry Compatibility From f4bfb6d4994075b91dc95de86fe02e0c633c7e4a Mon Sep 17 00:00:00 2001 From: Chris Thain <32781396+cthain@users.noreply.github.com> Date: Thu, 18 Aug 2022 16:06:20 -0700 Subject: [PATCH 068/104] Skip Lambda integration tests for fork PRs (#14257) --- .circleci/config.yml | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 60d6c3413..105666c66 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -109,15 +109,18 @@ commands: type: env_var_name default: ROLE_ARN steps: + # Only run the assume-role command for the main repo. The AWS credentials aren't available for forks. - run: | - export AWS_ACCESS_KEY_ID="${<< parameters.access-key >>}" - export AWS_SECRET_ACCESS_KEY="${<< parameters.secret-key >>}" - export ROLE_ARN="${<< parameters.role-arn >>}" - # assume role has duration of 15 min (the minimum allowed) - CREDENTIALS="$(aws sts assume-role --duration-seconds 900 --role-arn ${ROLE_ARN} --role-session-name build-${CIRCLE_SHA1} | jq '.Credentials')" - echo "export AWS_ACCESS_KEY_ID=$(echo $CREDENTIALS | jq -r '.AccessKeyId')" >> $BASH_ENV - echo "export AWS_SECRET_ACCESS_KEY=$(echo $CREDENTIALS | jq -r '.SecretAccessKey')" >> $BASH_ENV - echo "export AWS_SESSION_TOKEN=$(echo $CREDENTIALS | jq -r '.SessionToken')" >> $BASH_ENV + if [[ "${CIRCLE_BRANCH%%/*}/" != "pull/" ]]; then + export AWS_ACCESS_KEY_ID="${<< parameters.access-key >>}" + export AWS_SECRET_ACCESS_KEY="${<< parameters.secret-key >>}" + export ROLE_ARN="${<< parameters.role-arn >>}" + # assume role has duration of 15 min (the minimum allowed) + CREDENTIALS="$(aws sts assume-role --duration-seconds 900 --role-arn ${ROLE_ARN} --role-session-name build-${CIRCLE_SHA1} | jq '.Credentials')" + echo "export AWS_ACCESS_KEY_ID=$(echo $CREDENTIALS | jq -r '.AccessKeyId')" >> $BASH_ENV + echo "export AWS_SECRET_ACCESS_KEY=$(echo $CREDENTIALS | jq -r '.SecretAccessKey')" >> $BASH_ENV + echo "export AWS_SESSION_TOKEN=$(echo $CREDENTIALS | jq -r '.SessionToken')" >> $BASH_ENV + fi run-go-test-full: parameters: From a87d8f48beb083312bfd8f660a42923fb729076b Mon Sep 17 00:00:00 2001 From: cskh Date: Fri, 19 Aug 2022 14:11:21 -0400 Subject: [PATCH 069/104] fix: missing MaxInboundConnections field in service-defaults config entry (#14072) * fix: missing max_inbound_connections field in merge config --- agent/consul/merge_service_config.go | 7 ++ agent/consul/merge_service_config_test.go | 47 +++++++++++ agent/structs/config_entry_test.go | 79 +++++++++++++++++++ api/config_entry.go | 31 ++++---- api/config_entry_test.go | 2 + .../config-entries/service-defaults.mdx | 6 ++ 6 files changed, 157 insertions(+), 15 deletions(-) diff --git a/agent/consul/merge_service_config.go b/agent/consul/merge_service_config.go index 027a2d3f5..91fe229ea 100644 --- a/agent/consul/merge_service_config.go +++ b/agent/consul/merge_service_config.go @@ -159,6 +159,13 @@ func computeResolvedServiceConfig( thisReply.Destination = *serviceConf.Destination } + if serviceConf.MaxInboundConnections > 0 { + if thisReply.ProxyConfig == nil { + thisReply.ProxyConfig = map[string]interface{}{} + } + thisReply.ProxyConfig["max_inbound_connections"] = serviceConf.MaxInboundConnections + } + thisReply.Meta = serviceConf.Meta } diff --git a/agent/consul/merge_service_config_test.go b/agent/consul/merge_service_config_test.go index dd9b1cbca..b34c85143 100644 --- a/agent/consul/merge_service_config_test.go +++ b/agent/consul/merge_service_config_test.go @@ -3,12 +3,59 @@ package consul import ( "testing" + "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/structs" "github.com/mitchellh/copystructure" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func Test_ComputeResolvedServiceConfig(t *testing.T) { + type args struct { + scReq *structs.ServiceConfigRequest + upstreamIDs []structs.ServiceID + entries *configentry.ResolvedServiceConfigSet + } + + sid := structs.ServiceID{ + ID: "sid", + } + tests := []struct { + name string + args args + want *structs.ServiceConfigResponse + }{ + { + name: "proxy with maxinboundsconnections", + args: args{ + scReq: &structs.ServiceConfigRequest{ + Name: "sid", + }, + entries: &configentry.ResolvedServiceConfigSet{ + ServiceDefaults: map[structs.ServiceID]*structs.ServiceConfigEntry{ + sid: { + MaxInboundConnections: 20, + }, + }, + }, + }, + want: &structs.ServiceConfigResponse{ + ProxyConfig: map[string]interface{}{ + "max_inbound_connections": 20, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := computeResolvedServiceConfig(tt.args.scReq, tt.args.upstreamIDs, + false, tt.args.entries, nil) + require.NoError(t, err) + assert.Equal(t, tt.want, got) + }) + } +} + func Test_MergeServiceConfig_TransparentProxy(t *testing.T) { type args struct { defaults *structs.ServiceConfigResponse diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index c3f5c7a98..e462f6aa7 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -216,6 +216,85 @@ func testConfigEntries_ListRelatedServices_AndACLs(t *testing.T, cases []configE } } +func TestDecodeConfigEntry_ServiceDefaults(t *testing.T) { + + for _, tc := range []struct { + name string + camel string + snake string + expect ConfigEntry + expectErr string + }{ + { + name: "service-defaults-with-MaxInboundConnections", + snake: ` + kind = "service-defaults" + name = "external" + protocol = "tcp" + destination { + addresses = [ + "api.google.com", + "web.google.com" + ] + port = 8080 + } + max_inbound_connections = 14 + `, + camel: ` + Kind = "service-defaults" + Name = "external" + Protocol = "tcp" + Destination { + Addresses = [ + "api.google.com", + "web.google.com" + ] + Port = 8080 + } + MaxInboundConnections = 14 + `, + expect: &ServiceConfigEntry{ + Kind: "service-defaults", + Name: "external", + Protocol: "tcp", + Destination: &DestinationConfig{ + Addresses: []string{ + "api.google.com", + "web.google.com", + }, + Port: 8080, + }, + MaxInboundConnections: 14, + }, + }, + } { + tc := tc + + testbody := func(t *testing.T, body string) { + var raw map[string]interface{} + err := hcl.Decode(&raw, body) + require.NoError(t, err) + + got, err := DecodeConfigEntry(raw) + if tc.expectErr != "" { + require.Nil(t, got) + require.Error(t, err) + requireContainsLower(t, err.Error(), tc.expectErr) + } else { + require.NoError(t, err) + require.Equal(t, tc.expect, got) + } + } + + t.Run(tc.name+" (snake case)", func(t *testing.T) { + testbody(t, tc.snake) + }) + t.Run(tc.name+" (camel case)", func(t *testing.T) { + testbody(t, tc.camel) + }) + } +} + // TestDecodeConfigEntry is the 'structs' mirror image of // command/config/write/config_write_test.go:TestParseConfigEntry func TestDecodeConfigEntry(t *testing.T) { diff --git a/api/config_entry.go b/api/config_entry.go index ee55b55ad..da685b786 100644 --- a/api/config_entry.go +++ b/api/config_entry.go @@ -218,21 +218,22 @@ type UpstreamLimits struct { } type ServiceConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - Protocol string `json:",omitempty"` - Mode ProxyMode `json:",omitempty"` - TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` - MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` - Expose ExposeConfig `json:",omitempty"` - ExternalSNI string `json:",omitempty" alias:"external_sni"` - UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"` - Destination *DestinationConfig `json:",omitempty"` - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 + Kind string + Name string + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + Protocol string `json:",omitempty"` + Mode ProxyMode `json:",omitempty"` + TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` + Expose ExposeConfig `json:",omitempty"` + ExternalSNI string `json:",omitempty" alias:"external_sni"` + UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"` + Destination *DestinationConfig `json:",omitempty"` + MaxInboundConnections int `json:",omitempty" alias:"max_inbound_connections"` + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 } func (s *ServiceConfigEntry) GetKind() string { return s.Kind } diff --git a/api/config_entry_test.go b/api/config_entry_test.go index 4249e7547..63aba11b8 100644 --- a/api/config_entry_test.go +++ b/api/config_entry_test.go @@ -104,6 +104,7 @@ func TestAPI_ConfigEntries(t *testing.T) { "foo": "bar", "gir": "zim", }, + MaxInboundConnections: 5, } dest := &DestinationConfig{ @@ -144,6 +145,7 @@ func TestAPI_ConfigEntries(t *testing.T) { require.Equal(t, service.Protocol, readService.Protocol) require.Equal(t, service.Meta, readService.Meta) require.Equal(t, service.Meta, readService.GetMeta()) + require.Equal(t, service.MaxInboundConnections, readService.MaxInboundConnections) // update it service.Protocol = "tcp" diff --git a/website/content/docs/connect/config-entries/service-defaults.mdx b/website/content/docs/connect/config-entries/service-defaults.mdx index 2dad3b526..54aabfe8e 100644 --- a/website/content/docs/connect/config-entries/service-defaults.mdx +++ b/website/content/docs/connect/config-entries/service-defaults.mdx @@ -687,6 +687,12 @@ represents a location outside the Consul cluster. They can be dialed directly wh }, ] }, + { + name: 'MaxInboundConnections', + description: 'The maximum number of concurrent inbound connections to each service instance.', + type: 'int: 0', + yaml: true, + }, { name: 'MeshGateway', type: 'MeshGatewayConfig: ', From 9d1086b11570309ea8d7e6f46706d7f76d982d63 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Fri, 19 Aug 2022 11:11:41 -0700 Subject: [PATCH 070/104] docs: add 1.13 upgrade considerations to changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3399ba24..94217c74a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,9 @@ connect: Terminating gateways with a wildcard service entry should no longer pic BREAKING CHANGES: * config-entry: Exporting a specific service name across all namespace is invalid. +* connect: contains an upgrade compatibility issue when restoring snapshots containing service mesh proxy registrations from pre-1.13 versions of Consul [[GH-14107](https://github.com/hashicorp/consul/issues/14107)]. Fixed in 1.13.1 [[GH-14149](https://github.com/hashicorp/consul/issues/14149)]. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#all-service-mesh-deployments) for more information. +* connect: if using auto-encrypt or auto-config, TLS is required for gRPC communication between Envoy and Consul as of 1.13.0; this TLS for gRPC requirement will be removed in a future 1.13 patch release. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#service-mesh-deployments-using-auto-encrypt-or-auto-config) for more information. +* connect: if a pre-1.13 Consul agent's HTTPS port was not enabled, upgrading to 1.13 may turn on TLS for gRPC communication for Envoy and Consul depending on the agent's TLS configuration. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#grpc-tls) for more information. * connect: Removes support for Envoy 1.19 [[GH-13807](https://github.com/hashicorp/consul/issues/13807)] * telemetry: config flag `telemetry { disable_compat_1.9 = (true|false) }` has been removed. Before upgrading you should remove this flag from your config if the flag is being used. [[GH-13532](https://github.com/hashicorp/consul/issues/13532)] From f47a1c333a9fa7fbcd5ec361a389b2727aad18cd Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Mon, 22 Aug 2022 12:33:42 -0400 Subject: [PATCH 071/104] Add example code for cross-namespace certificateRefs --- .../api-gateway/configuration/gateway.mdx | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/website/content/docs/api-gateway/configuration/gateway.mdx b/website/content/docs/api-gateway/configuration/gateway.mdx index a3f8594c6..652aa009f 100644 --- a/website/content/docs/api-gateway/configuration/gateway.mdx +++ b/website/content/docs/api-gateway/configuration/gateway.mdx @@ -183,3 +183,49 @@ tls: ``` +#### Example cross-namespace certificateRef + +The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace`. This `Gateway` has a `certificateRef` in namespace `secret-namespace`. The reference is allowed because the `ReferenceGrant`, named `reference-grant` in namespace `secret-namespace`, allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace`. + + + + ```yaml + apiVersion: gateway.networking.k8s.io/v1beta1 + kind: Gateway + metadata: + name: example-gateway + namespace: gateway-namespace + spec: + gatewayClassName: consul-api-gateway + listeners: + - protocol: HTTPS + port: 443 + name: https + allowedRoutes: + namespaces: + from: Same + tls: + certificateRefs: + - name: cert + namespace: secret-namespace + group: "" + kind: Secret + --- + + apiVersion: gateway.networking.k8s.io/v1alpha2 + kind: ReferenceGrant + metadata: + name: reference-grant + namespace: secret-namespace + spec: + from: + - group: gateway.networking.k8s.io + kind: Gateway + namespace: gateway-namespace + to: + - group: "" + kind: Secret + name: cert + ``` + + From e9ec4f1c2556d3be419abbb474d0ca0f9636af7b Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Mon, 22 Aug 2022 12:34:16 -0400 Subject: [PATCH 072/104] Correct structure of existing tls.certificateRefs example --- website/content/docs/api-gateway/configuration/gateway.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/api-gateway/configuration/gateway.mdx b/website/content/docs/api-gateway/configuration/gateway.mdx index 652aa009f..7dbbb34fd 100644 --- a/website/content/docs/api-gateway/configuration/gateway.mdx +++ b/website/content/docs/api-gateway/configuration/gateway.mdx @@ -174,7 +174,7 @@ In the following example, `tls` settings are configured to use a secret named `c tls: certificateRefs: - name: consul-server-cert + - name: consul-server-cert group: "" kind: Secret mode: Terminate From e30d6bfc40e6b3e202743b5c232656965d95295c Mon Sep 17 00:00:00 2001 From: cskh Date: Mon, 22 Aug 2022 13:51:04 -0400 Subject: [PATCH 073/104] Fix: add missing ent meta for test (#14289) --- agent/consul/merge_service_config_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/agent/consul/merge_service_config_test.go b/agent/consul/merge_service_config_test.go index b34c85143..a497579a7 100644 --- a/agent/consul/merge_service_config_test.go +++ b/agent/consul/merge_service_config_test.go @@ -18,7 +18,8 @@ func Test_ComputeResolvedServiceConfig(t *testing.T) { } sid := structs.ServiceID{ - ID: "sid", + ID: "sid", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), } tests := []struct { name string From 96744b581c20958de2578780563171ddca7e1d33 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Mon, 22 Aug 2022 11:04:51 -0700 Subject: [PATCH 074/104] Update requirements.mdx (#14286) * Update requirements.mdx --- website/content/docs/ecs/requirements.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/website/content/docs/ecs/requirements.mdx b/website/content/docs/ecs/requirements.mdx index e7c0a8ea8..0fb0ab451 100644 --- a/website/content/docs/ecs/requirements.mdx +++ b/website/content/docs/ecs/requirements.mdx @@ -10,6 +10,7 @@ description: >- The following requirements must be met in order to install Consul on ECS: * **Launch Type:** Fargate and EC2 launch types are supported. +* **Network Mode:** Only `awsvpc` mode is supported. * **Subnets:** ECS Tasks can run in private or public subnets. Tasks must have [network access](https://aws.amazon.com/premiumsupport/knowledge-center/ecs-pull-container-api-error-ecr/) to Amazon ECR or other public container registries to pull images. * **Consul Servers:** You can use your own Consul servers running on virtual machines or use [HashiCorp Cloud Platform Consul](https://www.hashicorp.com/cloud-platform) to host the servers for you. For development purposes or testing, you may use the `dev-server` [Terraform module](https://github.com/hashicorp/terraform-aws-consul-ecs/tree/main) that runs the Consul server as an ECS task. The `dev-server` does not support persistent storage. * **ACL Controller:** If you are running a secure Consul installation with ACLs enabled, configure the ACL controller. From 6f57024d0a6b8d74a3985be7c3f068346c1836f2 Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Mon, 22 Aug 2022 14:31:19 -0400 Subject: [PATCH 075/104] Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- website/content/docs/api-gateway/configuration/gateway.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/api-gateway/configuration/gateway.mdx b/website/content/docs/api-gateway/configuration/gateway.mdx index 7dbbb34fd..89da4ba22 100644 --- a/website/content/docs/api-gateway/configuration/gateway.mdx +++ b/website/content/docs/api-gateway/configuration/gateway.mdx @@ -185,9 +185,9 @@ tls: #### Example cross-namespace certificateRef -The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace`. This `Gateway` has a `certificateRef` in namespace `secret-namespace`. The reference is allowed because the `ReferenceGrant`, named `reference-grant` in namespace `secret-namespace`, allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace`. +The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace` (lines 2-4). The gateway has a `certificateRef` in namespace `secret-namespace` (lines 16-18). The reference is allowed because the `ReferenceGrant` configuration, named `reference-grant` in namespace `secret-namespace` (lines 23-26), allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace` (lines 30-34). - + ```yaml apiVersion: gateway.networking.k8s.io/v1beta1 From 9dd1b95aa78a39a6936e7cc6fc389bef06a7a555 Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Mon, 22 Aug 2022 14:40:43 -0400 Subject: [PATCH 076/104] Update website/content/docs/api-gateway/configuration/gateway.mdx --- website/content/docs/api-gateway/configuration/gateway.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/api-gateway/configuration/gateway.mdx b/website/content/docs/api-gateway/configuration/gateway.mdx index 89da4ba22..be9906292 100644 --- a/website/content/docs/api-gateway/configuration/gateway.mdx +++ b/website/content/docs/api-gateway/configuration/gateway.mdx @@ -187,7 +187,7 @@ tls: The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace` (lines 2-4). The gateway has a `certificateRef` in namespace `secret-namespace` (lines 16-18). The reference is allowed because the `ReferenceGrant` configuration, named `reference-grant` in namespace `secret-namespace` (lines 23-26), allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace` (lines 30-34). - + ```yaml apiVersion: gateway.networking.k8s.io/v1beta1 From 9362cc652598e18b7255fc0365baaf127d5cf640 Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Mon, 22 Aug 2022 15:14:30 -0400 Subject: [PATCH 077/104] Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- website/content/docs/api-gateway/configuration/gateway.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/api-gateway/configuration/gateway.mdx b/website/content/docs/api-gateway/configuration/gateway.mdx index be9906292..43fc27018 100644 --- a/website/content/docs/api-gateway/configuration/gateway.mdx +++ b/website/content/docs/api-gateway/configuration/gateway.mdx @@ -185,9 +185,9 @@ tls: #### Example cross-namespace certificateRef -The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace` (lines 2-4). The gateway has a `certificateRef` in namespace `secret-namespace` (lines 16-18). The reference is allowed because the `ReferenceGrant` configuration, named `reference-grant` in namespace `secret-namespace` (lines 23-26), allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace` (lines 30-34). +The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace` (lines 2-4). The gateway has a `certificateRef` in namespace `secret-namespace` (lines 16-18). The reference is allowed because the `ReferenceGrant` configuration, named `reference-grant` in namespace `secret-namespace` (lines 23-26), allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace` (lines 30-35). - + ```yaml apiVersion: gateway.networking.k8s.io/v1beta1 From 34f37291409ff8e67568013631cdcdc2c8c6bd17 Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Mon, 22 Aug 2022 16:22:43 -0400 Subject: [PATCH 078/104] Update website/content/docs/api-gateway/configuration/gateway.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- website/content/docs/api-gateway/configuration/gateway.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/api-gateway/configuration/gateway.mdx b/website/content/docs/api-gateway/configuration/gateway.mdx index 43fc27018..240b19721 100644 --- a/website/content/docs/api-gateway/configuration/gateway.mdx +++ b/website/content/docs/api-gateway/configuration/gateway.mdx @@ -185,9 +185,9 @@ tls: #### Example cross-namespace certificateRef -The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace` (lines 2-4). The gateway has a `certificateRef` in namespace `secret-namespace` (lines 16-18). The reference is allowed because the `ReferenceGrant` configuration, named `reference-grant` in namespace `secret-namespace` (lines 23-26), allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace` (lines 30-35). +The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace` (lines 2-4). The gateway has a `certificateRef` in namespace `secret-namespace` (lines 16-18). The reference is allowed because the `ReferenceGrant` configuration, named `reference-grant` in namespace `secret-namespace` (lines 24-27), allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace` (lines 31-35). - + ```yaml apiVersion: gateway.networking.k8s.io/v1beta1 From 9929f9a96d6abcfa4e31edb0887b001daf01f5a8 Mon Sep 17 00:00:00 2001 From: Tu Nguyen Date: Mon, 22 Aug 2022 21:37:03 -0700 Subject: [PATCH 079/104] Reshuffle Docs nav IA --- .../service-discovery.mdx} | 0 .../service-mesh.mdx} | 0 website/content/docs/intro/usecases/index.mdx | 9 - website/data/docs-nav-data.json | 777 +++++++++--------- 4 files changed, 389 insertions(+), 397 deletions(-) rename website/content/docs/{intro/usecases/what-is-service-discovery.mdx => concepts/service-discovery.mdx} (100%) rename website/content/docs/{intro/usecases/what-is-a-service-mesh.mdx => concepts/service-mesh.mdx} (100%) delete mode 100644 website/content/docs/intro/usecases/index.mdx diff --git a/website/content/docs/intro/usecases/what-is-service-discovery.mdx b/website/content/docs/concepts/service-discovery.mdx similarity index 100% rename from website/content/docs/intro/usecases/what-is-service-discovery.mdx rename to website/content/docs/concepts/service-discovery.mdx diff --git a/website/content/docs/intro/usecases/what-is-a-service-mesh.mdx b/website/content/docs/concepts/service-mesh.mdx similarity index 100% rename from website/content/docs/intro/usecases/what-is-a-service-mesh.mdx rename to website/content/docs/concepts/service-mesh.mdx diff --git a/website/content/docs/intro/usecases/index.mdx b/website/content/docs/intro/usecases/index.mdx deleted file mode 100644 index 2f2b4b324..000000000 --- a/website/content/docs/intro/usecases/index.mdx +++ /dev/null @@ -1,9 +0,0 @@ ---- -layout: docs -page_title: usecases -description: >- - Consul Service Mesh can be deployed on AWS ECS (Elastic Container Service). - This section documents the official installation of Consul on ECS. ---- - -lals diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 7f64ed3b4..010d36922 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -1,28 +1,18 @@ [ { "title": "What is Consul?", + "path": "intro" + }, + { + "title": "Core Concepts", "routes": [ { - "title": "Overview", - "path": "intro" + "title": "Service Discovery", + "path": "concepts/service-discovery" }, { - "title": "Use Cases", - "routes": [ - { - "title": "Overview", - "path": "intro/usecases", - "hidden": true - }, - { - "title": "Service Discovery", - "path": "intro/usecases/what-is-service-discovery" - }, - { - "title": "Service Mesh", - "path": "intro/usecases/what-is-a-service-mesh" - } - ] + "title": "Service Mesh", + "path": "concepts/service-mesh" } ] }, @@ -69,12 +59,40 @@ ] }, { - "title": "API", - "href": "/api-docs" + "title": "Architecture", + "routes": [ + { + "title": "Overview", + "path": "architecture" + }, + { + "title": "Improving Consul Resilience", + "path": "architecture/improving-consul-resilience" + }, + { + "title": "Anti-Entropy", + "path": "architecture/anti-entropy" + }, + { + "title": "Consensus Protocol", + "path": "architecture/consensus" + }, + { + "title": "Gossip Protocol", + "path": "architecture/gossip" + }, + { + "title": "Jepsen Testing", + "path": "architecture/jepsen" + }, + { + "title": "Network Coordinates", + "path": "architecture/coordinates" + } + ] }, { - "title": "Commands (CLI)", - "href": "/commands" + "divider": true }, { "title": "Service Discovery", @@ -355,59 +373,181 @@ ] }, { - "title": "Consul API Gateway", + "title": "Dynamic App Configuration", + "routes": [ + { + "title": "Consul KV", + "path": "dynamic-app-config/kv" + }, + { + "title": "Sessions", + "path": "dynamic-app-config/sessions" + }, + { + "title": "Watches", + "path": "dynamic-app-config/watches" + } + ] + }, + { + "title": "Security", "routes": [ { "title": "Overview", - "path": "api-gateway" + "path": "security" }, { - "title": "Installation", - "path": "api-gateway/install" + "title": "Access Control (ACLs)", + "routes": [ + { + "title": "ACL System Overview", + "path": "security/acl" + }, + { + "title": "Tokens", + "path": "security/acl/acl-tokens" + }, + { + "title": "Policies", + "path": "security/acl/acl-policies" + }, + { + "title": "Roles", + "path": "security/acl/acl-roles" + }, + { + "title": "Rules Reference", + "path": "security/acl/acl-rules" + }, + { + "title": "Legacy Mode", + "path": "security/acl/acl-legacy" + }, + { + "title": "Token Migration", + "path": "security/acl/acl-migrate-tokens" + }, + { + "title": "ACLs in Federated Datacenters", + "path": "security/acl/acl-federated-datacenters" + }, + { + "title": "Auth Methods", + "routes": [ + { + "title": "Overview", + "path": "security/acl/auth-methods" + }, + { + "title": "Kubernetes", + "path": "security/acl/auth-methods/kubernetes" + }, + { + "title": "JWT", + "path": "security/acl/auth-methods/jwt" + }, + { + "title": "OIDC", + "path": "security/acl/auth-methods/oidc" + }, + { + "title": "AWS IAM", + "path": "security/acl/auth-methods/aws-iam" + } + ] + } + ] }, { - "title": "Technical Specifications", - "path": "api-gateway/tech-specs" + "title": "Encryption", + "path": "security/encryption" }, { - "title": "Upgrades", - "path": "api-gateway/upgrades" - }, + "title": "Security Models", + "routes": [ + { + "title": "Overview", + "path": "security/security-models" + }, + { + "title": "Core", + "path": "security/security-models/core" + }, + { + "title": "Network Infrastructure Automation", + "path": "security/security-models/nia" + } + ] + } + ] + }, + { + "title": "Agent", + "routes": [ { - "title": "Usage", - "path": "api-gateway/usage" + "title": "Overview", + "path": "agent" }, { "title": "Configuration", "routes": [ { - "title": "Overview", - "path": "api-gateway/configuration" + "title": "General", + "path": "agent/config" }, { - "title": "Gateway", - "path": "api-gateway/configuration/gateway" + "title": "CLI Reference", + "path": "agent/config/cli-flags" }, { - "title": "GatewayClass", - "path": "api-gateway/configuration/gatewayclass" - }, - { - "title": "GatewayClassConfig", - "path": "api-gateway/configuration/gatewayclassconfig" - }, - { - "title": "Routes", - "path": "api-gateway/configuration/routes" - }, - { - "title": "MeshService", - "path": "api-gateway/configuration/meshservice" + "title": "Configuration Reference", + "path": "agent/config/config-files" } ] + }, + { + "title": "Configuration Entries", + "path": "agent/config-entries" + }, + { + "title": "Telemetry", + "path": "agent/telemetry" + }, + { + "title": "Sentinel", + "path": "agent/sentinel" + }, + { + "title": "RPC", + "path": "agent/rpc", + "hidden": true } ] }, + { + "title": "Integrations", + "routes": [ + { + "title": "Consul Integration Program", + "path": "integrate/partnerships" + }, + { + "title": "NIA Integration Program", + "path": "integrate/nia-integration" + }, + { + "title": "Vault Integration", + "href": "/docs/connect/ca/vault" + }, + { + "title": "Proxy Integration", + "href": "/docs/connect/proxies/integrate" + } + ] + }, + { + "divider": true + }, { "title": "Kubernetes", "routes": [ @@ -777,6 +917,140 @@ } ] }, + { + "divider": true + }, + { + "title": "HCP Consul", + "href": "https://cloud.hashicorp.com/docs/consul" + }, + { + "title": "Consul Enterprise", + "routes": [ + { + "title": "Overview", + "path": "enterprise" + }, + { + "title": "Admin Partitions", + "path": "enterprise/admin-partitions" + }, + { + "title": "Audit Logging", + "path": "enterprise/audit-logging" + }, + { + "title": "Automated Backups", + "path": "enterprise/backups" + }, + { + "title": "Automated Upgrades", + "path": "enterprise/upgrades" + }, + { + "title": "Enhanced Read Scalability", + "path": "enterprise/read-scale" + }, + { + "title": "Single sign-on - OIDC", + "href": "/docs/security/acl/auth-methods/oidc" + }, + { + "title": "Redundancy Zones", + "path": "enterprise/redundancy" + }, + { + "title": "Advanced Federation", + "path": "enterprise/federation" + }, + { + "title": "Network Segments", + "path": "enterprise/network-segments" + }, + { + "title": "Namespaces", + "path": "enterprise/namespaces" + }, + { + "title": "NIA with TFE", + "href": "/docs/nia/enterprise" + }, + { + "title": "Sentinel", + "path": "enterprise/sentinel" + }, + { + "title": "License", + "routes": [ + { + "title": "Overview", + "path": "enterprise/license/overview" + }, + { + "title": "FAQ", + "path": "enterprise/license/faq" + } + ] + } + ] + }, + { + "divider": true + }, + { + "title": "Consul API Gateway", + "routes": [ + { + "title": "Overview", + "path": "api-gateway" + }, + { + "title": "Installation", + "path": "api-gateway/install" + }, + { + "title": "Technical Specifications", + "path": "api-gateway/tech-specs" + }, + { + "title": "Upgrades", + "path": "api-gateway/upgrades" + }, + { + "title": "Usage", + "path": "api-gateway/usage" + }, + { + "title": "Configuration", + "routes": [ + { + "title": "Overview", + "path": "api-gateway/configuration" + }, + { + "title": "Gateway", + "path": "api-gateway/configuration/gateway" + }, + { + "title": "GatewayClass", + "path": "api-gateway/configuration/gatewayclass" + }, + { + "title": "GatewayClassConfig", + "path": "api-gateway/configuration/gatewayclassconfig" + }, + { + "title": "Routes", + "path": "api-gateway/configuration/routes" + }, + { + "title": "MeshService", + "path": "api-gateway/configuration/meshservice" + } + ] + } + ] + }, { "title": "Network Infrastructure Automation", "routes": [ @@ -900,343 +1174,7 @@ ] }, { - "title": "Dynamic App Configuration", - "routes": [ - { - "title": "Consul KV", - "path": "dynamic-app-config/kv" - }, - { - "title": "Sessions", - "path": "dynamic-app-config/sessions" - }, - { - "title": "Watches", - "path": "dynamic-app-config/watches" - } - ] - }, - { - "title": "Agent", - "routes": [ - { - "title": "Overview", - "path": "agent" - }, - { - "title": "Configuration", - "routes": [ - { - "title": "General", - "path": "agent/config" - }, - { - "title": "CLI Reference", - "path": "agent/config/cli-flags" - }, - { - "title": "Configuration Reference", - "path": "agent/config/config-files" - } - ] - }, - { - "title": "Configuration Entries", - "path": "agent/config-entries" - }, - { - "title": "Telemetry", - "path": "agent/telemetry" - }, - { - "title": "Sentinel", - "path": "agent/sentinel" - }, - { - "title": "RPC", - "path": "agent/rpc", - "hidden": true - } - ] - }, - { - "title": "Security", - "routes": [ - { - "title": "Overview", - "path": "security" - }, - { - "title": "Access Control (ACLs)", - "routes": [ - { - "title": "ACL System Overview", - "path": "security/acl" - }, - { - "title": "Tokens", - "path": "security/acl/acl-tokens" - }, - { - "title": "Policies", - "path": "security/acl/acl-policies" - }, - { - "title": "Roles", - "path": "security/acl/acl-roles" - }, - { - "title": "Rules Reference", - "path": "security/acl/acl-rules" - }, - { - "title": "Legacy Mode", - "path": "security/acl/acl-legacy" - }, - { - "title": "Token Migration", - "path": "security/acl/acl-migrate-tokens" - }, - { - "title": "ACLs in Federated Datacenters", - "path": "security/acl/acl-federated-datacenters" - }, - { - "title": "Auth Methods", - "routes": [ - { - "title": "Overview", - "path": "security/acl/auth-methods" - }, - { - "title": "Kubernetes", - "path": "security/acl/auth-methods/kubernetes" - }, - { - "title": "JWT", - "path": "security/acl/auth-methods/jwt" - }, - { - "title": "OIDC", - "path": "security/acl/auth-methods/oidc" - }, - { - "title": "AWS IAM", - "path": "security/acl/auth-methods/aws-iam" - } - ] - } - ] - }, - { - "title": "Encryption", - "path": "security/encryption" - }, - { - "title": "Security Models", - "routes": [ - { - "title": "Overview", - "path": "security/security-models" - }, - { - "title": "Core", - "path": "security/security-models/core" - }, - { - "title": "Network Infrastructure Automation", - "path": "security/security-models/nia" - } - ] - } - ] - }, - { - "title": "Consul Enterprise", - "routes": [ - { - "title": "Overview", - "path": "enterprise" - }, - { - "title": "Admin Partitions", - "path": "enterprise/admin-partitions" - }, - { - "title": "Audit Logging", - "path": "enterprise/audit-logging" - }, - { - "title": "Automated Backups", - "path": "enterprise/backups" - }, - { - "title": "Automated Upgrades", - "path": "enterprise/upgrades" - }, - { - "title": "Enhanced Read Scalability", - "path": "enterprise/read-scale" - }, - { - "title": "Single sign-on - OIDC", - "href": "/docs/security/acl/auth-methods/oidc" - }, - { - "title": "Redundancy Zones", - "path": "enterprise/redundancy" - }, - { - "title": "Advanced Federation", - "path": "enterprise/federation" - }, - { - "title": "Network Segments", - "path": "enterprise/network-segments" - }, - { - "title": "Namespaces", - "path": "enterprise/namespaces" - }, - { - "title": "NIA with TFE", - "href": "/docs/nia/enterprise" - }, - { - "title": "Sentinel", - "path": "enterprise/sentinel" - }, - { - "title": "License", - "routes": [ - { - "title": "Overview", - "path": "enterprise/license/overview" - }, - { - "title": "FAQ", - "path": "enterprise/license/faq" - } - ] - } - ] - }, - { - "title": "Architecture", - "routes": [ - { - "title": "Overview", - "path": "architecture" - }, - { - "title": "Improving Consul Resilience", - "path": "architecture/improving-consul-resilience" - }, - { - "title": "Anti-Entropy", - "path": "architecture/anti-entropy" - }, - { - "title": "Consensus Protocol", - "path": "architecture/consensus" - }, - { - "title": "Gossip Protocol", - "path": "architecture/gossip" - }, - { - "title": "Jepsen Testing", - "path": "architecture/jepsen" - }, - { - "title": "Network Coordinates", - "path": "architecture/coordinates" - } - ] - }, - { - "title": "Integrations", - "routes": [ - { - "title": "Consul Integration Program", - "path": "integrate/partnerships" - }, - { - "title": "NIA Integration Program", - "path": "integrate/nia-integration" - }, - { - "title": "Vault Integration", - "href": "/docs/connect/ca/vault" - }, - { - "title": "Proxy Integration", - "href": "/docs/connect/proxies/integrate" - } - ] - }, - { - "title": "Consul Tools", - "path": "download-tools" - }, - { - "title": "Upgrade", - "routes": [ - { - "title": "Overview", - "path": "upgrading" - }, - { - "title": "Compatibility Promise", - "path": "upgrading/compatibility" - }, - { - "title": "Specific Version Details", - "path": "upgrading/upgrade-specific" - }, - { - "title": "Upgrade Instructions", - "routes": [ - { - "title": "Overview", - "path": "upgrading/instructions" - }, - { - "title": "General Process", - "path": "upgrading/instructions/general-process" - }, - { - "title": "Upgrading to Latest 1.2.x", - "path": "upgrading/instructions/upgrade-to-1-2-x" - }, - { - "title": "Upgrading to Latest 1.6.x", - "path": "upgrading/instructions/upgrade-to-1-6-x" - }, - { - "title": "Upgrading to Latest 1.8.x", - "path": "upgrading/instructions/upgrade-to-1-8-x" - }, - { - "title": "Upgrading to Latest 1.10.x", - "path": "upgrading/instructions/upgrade-to-1-10-x" - } - ] - } - ] - }, - { - "title": "Troubleshoot", - "routes": [ - { - "title": "Common Error Messages", - "path": "troubleshoot/common-errors" - }, - { - "title": "FAQ", - "path": "troubleshoot/faq" - } - ] + "divider": true }, { "title": "Release Notes", @@ -1319,6 +1257,69 @@ } ] }, + { + "title": "Upgrade", + "routes": [ + { + "title": "Overview", + "path": "upgrading" + }, + { + "title": "Compatibility Promise", + "path": "upgrading/compatibility" + }, + { + "title": "Specific Version Details", + "path": "upgrading/upgrade-specific" + }, + { + "title": "Upgrade Instructions", + "routes": [ + { + "title": "Overview", + "path": "upgrading/instructions" + }, + { + "title": "General Process", + "path": "upgrading/instructions/general-process" + }, + { + "title": "Upgrading to Latest 1.2.x", + "path": "upgrading/instructions/upgrade-to-1-2-x" + }, + { + "title": "Upgrading to Latest 1.6.x", + "path": "upgrading/instructions/upgrade-to-1-6-x" + }, + { + "title": "Upgrading to Latest 1.8.x", + "path": "upgrading/instructions/upgrade-to-1-8-x" + }, + { + "title": "Upgrading to Latest 1.10.x", + "path": "upgrading/instructions/upgrade-to-1-10-x" + } + ] + } + ] + }, + { + "title": "Troubleshoot", + "routes": [ + { + "title": "Common Error Messages", + "path": "troubleshoot/common-errors" + }, + { + "title": "FAQ", + "path": "troubleshoot/faq" + } + ] + }, + { + "title": "Consul Tools", + "path": "download-tools" + }, { "title": "Internals", "hidden": true, From 9189c115a125f61cd322af3ece29099d8b7283e8 Mon Sep 17 00:00:00 2001 From: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Date: Tue, 16 Aug 2022 16:50:03 -0400 Subject: [PATCH 080/104] docs: update k8s vault connect ca config docs - Add namespace to additionalConfig example - Improve the link to additional configuration options available --- website/content/docs/k8s/helm.mdx | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index 837a03f56..be0c34080 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -270,14 +270,14 @@ Use these links to navigate to a particular top-level stanza. - `authMethodPath` ((#v-global-secretsbackend-vault-connectca-authmethodpath)) (`string: kubernetes`) - The mount path of the Kubernetes auth method in Vault. - `rootPKIPath` ((#v-global-secretsbackend-vault-connectca-rootpkipath)) (`string: ""`) - The path to a PKI secrets engine for the root certificate. - Please see https://www.consul.io/docs/connect/ca/vault#rootpkipath. + For more details, [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#rootpkipath). - `intermediatePKIPath` ((#v-global-secretsbackend-vault-connectca-intermediatepkipath)) (`string: ""`) - The path to a PKI secrets engine for the generated intermediate certificate. - Please see https://www.consul.io/docs/connect/ca/vault#intermediatepkipath. + For more details, [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#intermediatepkipath). - `additionalConfig` ((#v-global-secretsbackend-vault-connectca-additionalconfig)) (`string: {}`) - Additional Connect CA configuration in JSON format. - Please see https://www.consul.io/docs/connect/ca/vault#common-ca-config-options - for additional configuration options. + Please refer to [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#configuration) + for all configuration options available for that provider. Example: @@ -286,7 +286,8 @@ Use these links to navigate to a particular top-level stanza. { "connect": [{ "ca_config": [{ - "leaf_cert_ttl": "36h" + "leaf_cert_ttl": "36h", + "namespace": "my-vault-ns" }] }] } From dbeb8a23dcf867ab12964352a0682b56798f6ca6 Mon Sep 17 00:00:00 2001 From: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Date: Fri, 29 Jul 2022 18:04:05 -0400 Subject: [PATCH 081/104] docs: link pq docs to relevant DNS lookup section --- website/content/api-docs/query.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/api-docs/query.mdx b/website/content/api-docs/query.mdx index 54a148e9a..4ad4e2e90 100644 --- a/website/content/api-docs/query.mdx +++ b/website/content/api-docs/query.mdx @@ -11,7 +11,7 @@ The `/query` endpoints create, update, destroy, and execute prepared queries. Prepared queries allow you to register a complex service query and then execute it later via its ID or name to get a set of healthy nodes that provide a given service. This is particularly useful in combination with Consul's -[DNS Interface](/docs/discovery/dns) as it allows for much richer queries than +[DNS Interface](/docs/discovery/dns#prepared-query-lookups) as it allows for much richer queries than would be possible given the limited entry points exposed by DNS. Check the [Geo Failover tutorial](https://learn.hashicorp.com/tutorials/consul/automate-geo-failover) for details and From 3d45306e1b17a245c8ab1348b4baddf1c566bd26 Mon Sep 17 00:00:00 2001 From: Eric Haberkorn Date: Tue, 23 Aug 2022 09:13:43 -0400 Subject: [PATCH 082/104] Cluster peering failover disco chain changes (#14296) --- agent/connect/sni_test.go | 33 ++- agent/consul/discovery_chain_endpoint_test.go | 17 +- agent/consul/discoverychain/compile.go | 216 +++++++------- agent/consul/discoverychain/compile_test.go | 273 ++++++++++++++---- agent/consul/state/peering_test.go | 8 +- agent/discovery_chain_endpoint_test.go | 27 +- agent/proxycfg/naming.go | 33 ++- agent/proxycfg/naming_test.go | 7 + agent/structs/config_entry_discoverychain.go | 29 ++ agent/structs/discovery_chain.go | 56 +++- agent/xds/failover_math_test.go | 35 ++- 11 files changed, 527 insertions(+), 207 deletions(-) diff --git a/agent/connect/sni_test.go b/agent/connect/sni_test.go index 26fae1da7..59e9f41fc 100644 --- a/agent/connect/sni_test.go +++ b/agent/connect/sni_test.go @@ -178,20 +178,43 @@ func TestQuerySNI(t *testing.T) { func TestTargetSNI(t *testing.T) { // empty namespace, empty subset require.Equal(t, "api.default.foo."+testTrustDomainSuffix1, - TargetSNI(structs.NewDiscoveryTarget("api", "", "", "default", "foo"), testTrustDomain1)) + TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "api", + Partition: "default", + Datacenter: "foo", + }), testTrustDomain1)) require.Equal(t, "api.default.foo."+testTrustDomainSuffix1, - TargetSNI(structs.NewDiscoveryTarget("api", "", "", "", "foo"), testTrustDomain1)) + TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "api", + Datacenter: "foo", + }), testTrustDomain1)) // set namespace, empty subset require.Equal(t, "api.neighbor.foo."+testTrustDomainSuffix2, - TargetSNI(structs.NewDiscoveryTarget("api", "", "neighbor", "default", "foo"), testTrustDomain2)) + TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "api", + Namespace: "neighbor", + Partition: "default", + Datacenter: "foo", + }), testTrustDomain2)) // empty namespace, set subset require.Equal(t, "v2.api.default.foo."+testTrustDomainSuffix1, - TargetSNI(structs.NewDiscoveryTarget("api", "v2", "", "default", "foo"), testTrustDomain1)) + TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "api", + ServiceSubset: "v2", + Partition: "default", + Datacenter: "foo", + }), testTrustDomain1)) // set namespace, set subset require.Equal(t, "canary.api.neighbor.foo."+testTrustDomainSuffix2, - TargetSNI(structs.NewDiscoveryTarget("api", "canary", "neighbor", "default", "foo"), testTrustDomain2)) + TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "api", + ServiceSubset: "canary", + Namespace: "neighbor", + Partition: "default", + Datacenter: "foo", + }), testTrustDomain2)) } diff --git a/agent/consul/discovery_chain_endpoint_test.go b/agent/consul/discovery_chain_endpoint_test.go index 21c34aa86..c1ad0fef3 100644 --- a/agent/consul/discovery_chain_endpoint_test.go +++ b/agent/consul/discovery_chain_endpoint_test.go @@ -56,8 +56,17 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { return &resp, nil } - newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget { - t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) + newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget { + if opts.Namespace == "" { + opts.Namespace = "default" + } + if opts.Partition == "" { + opts.Partition = "default" + } + if opts.Datacenter == "" { + opts.Datacenter = "dc1" + } + t := structs.NewDiscoveryTarget(opts) t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul") t.Name = t.SNI t.ConnectTimeout = 5 * time.Second // default @@ -119,7 +128,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), + "web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}), }, }, } @@ -245,7 +254,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { }, Targets: map[string]*structs.DiscoveryTarget{ "web.default.default.dc1": targetWithConnectTimeout( - newTarget("web", "", "default", "default", "dc1"), + newTarget(structs.DiscoveryTargetOpts{Service: "web"}), 33*time.Second, ), }, diff --git a/agent/consul/discoverychain/compile.go b/agent/consul/discoverychain/compile.go index ed664878b..3a9a1f0ed 100644 --- a/agent/consul/discoverychain/compile.go +++ b/agent/consul/discoverychain/compile.go @@ -8,6 +8,7 @@ import ( "github.com/mitchellh/hashstructure" "github.com/mitchellh/mapstructure" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" @@ -576,7 +577,10 @@ func (c *compiler) assembleChain() error { if router == nil { // If no router is configured, move on down the line to the next hop of // the chain. - node, err := c.getSplitterOrResolverNode(c.newTarget(c.serviceName, "", "", "", "")) + node, err := c.getSplitterOrResolverNode(c.newTarget(structs.DiscoveryTargetOpts{ + Service: c.serviceName, + })) + if err != nil { return err } @@ -626,11 +630,20 @@ func (c *compiler) assembleChain() error { ) if dest.ServiceSubset == "" { node, err = c.getSplitterOrResolverNode( - c.newTarget(svc, "", destNamespace, destPartition, ""), - ) + c.newTarget(structs.DiscoveryTargetOpts{ + Service: svc, + Namespace: destNamespace, + Partition: destPartition, + }, + )) } else { node, err = c.getResolverNode( - c.newTarget(svc, dest.ServiceSubset, destNamespace, destPartition, ""), + c.newTarget(structs.DiscoveryTargetOpts{ + Service: svc, + ServiceSubset: dest.ServiceSubset, + Namespace: destNamespace, + Partition: destPartition, + }), false, ) } @@ -642,7 +655,12 @@ func (c *compiler) assembleChain() error { // If we have a router, we'll add a catch-all route at the end to send // unmatched traffic to the next hop in the chain. - defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(router.Name, "", router.NamespaceOrDefault(), router.PartitionOrDefault(), "")) + opts := structs.DiscoveryTargetOpts{ + Service: router.Name, + Namespace: router.NamespaceOrDefault(), + Partition: router.PartitionOrDefault(), + } + defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(opts)) if err != nil { return err } @@ -674,26 +692,36 @@ func newDefaultServiceRoute(serviceName, namespace, partition string) *structs.S } } -func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget { - if service == "" { +func (c *compiler) newTarget(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget { + if opts.Service == "" { panic("newTarget called with empty service which makes no sense") } - t := structs.NewDiscoveryTarget( - service, - serviceSubset, - defaultIfEmpty(namespace, c.evaluateInNamespace), - defaultIfEmpty(partition, c.evaluateInPartition), - defaultIfEmpty(datacenter, c.evaluateInDatacenter), - ) + if opts.Peer == "" { + opts.Datacenter = defaultIfEmpty(opts.Datacenter, c.evaluateInDatacenter) + opts.Namespace = defaultIfEmpty(opts.Namespace, c.evaluateInNamespace) + opts.Partition = defaultIfEmpty(opts.Partition, c.evaluateInPartition) + } else { + // Don't allow Peer and Datacenter. + opts.Datacenter = "" + // Peer and Partition cannot both be set. + opts.Partition = acl.PartitionOrDefault("") + // Default to "default" rather than c.evaluateInNamespace. + opts.Namespace = acl.PartitionOrDefault(opts.Namespace) + } - // Set default connect SNI. This will be overridden later if the service - // has an explicit SNI value configured in service-defaults. - t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain) + t := structs.NewDiscoveryTarget(opts) - // Use the same representation for the name. This will NOT be overridden - // later. - t.Name = t.SNI + // We don't have the peer's trust domain yet so we can't construct the SNI. + if opts.Peer == "" { + // Set default connect SNI. This will be overridden later if the service + // has an explicit SNI value configured in service-defaults. + t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain) + + // Use the same representation for the name. This will NOT be overridden + // later. + t.Name = t.SNI + } prev, ok := c.loadedTargets[t.ID] if ok { @@ -703,34 +731,30 @@ func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datac return t } -func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, service, serviceSubset, partition, namespace, datacenter string) *structs.DiscoveryTarget { - var ( - service2 = t.Service - serviceSubset2 = t.ServiceSubset - partition2 = t.Partition - namespace2 = t.Namespace - datacenter2 = t.Datacenter - ) +func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget { + mergedOpts := t.ToDiscoveryTargetOpts() - if service != "" && service != service2 { - service2 = service + if opts.Service != "" && opts.Service != mergedOpts.Service { + mergedOpts.Service = opts.Service // Reset the chosen subset if we reference a service other than our own. - serviceSubset2 = "" + mergedOpts.ServiceSubset = "" } - if serviceSubset != "" { - serviceSubset2 = serviceSubset + if opts.ServiceSubset != "" { + mergedOpts.ServiceSubset = opts.ServiceSubset } - if partition != "" { - partition2 = partition + if opts.Partition != "" { + mergedOpts.Partition = opts.Partition } - if namespace != "" { - namespace2 = namespace + // Only use explicit Namespace with Peer + if opts.Namespace != "" || opts.Peer != "" { + mergedOpts.Namespace = opts.Namespace } - if datacenter != "" { - datacenter2 = datacenter + if opts.Datacenter != "" { + mergedOpts.Datacenter = opts.Datacenter } + mergedOpts.Peer = opts.Peer - return c.newTarget(service2, serviceSubset2, namespace2, partition2, datacenter2) + return c.newTarget(mergedOpts) } func (c *compiler) getSplitterOrResolverNode(target *structs.DiscoveryTarget) (*structs.DiscoveryGraphNode, error) { @@ -803,10 +827,13 @@ func (c *compiler) getSplitterNode(sid structs.ServiceID) (*structs.DiscoveryGra // fall through to group-resolver } - node, err := c.getResolverNode( - c.newTarget(splitID.ID, split.ServiceSubset, splitID.NamespaceOrDefault(), splitID.PartitionOrDefault(), ""), - false, - ) + opts := structs.DiscoveryTargetOpts{ + Service: splitID.ID, + ServiceSubset: split.ServiceSubset, + Namespace: splitID.NamespaceOrDefault(), + Partition: splitID.PartitionOrDefault(), + } + node, err := c.getResolverNode(c.newTarget(opts), false) if err != nil { return nil, err } @@ -881,11 +908,7 @@ RESOLVE_AGAIN: redirectedTarget := c.rewriteTarget( target, - redirect.Service, - redirect.ServiceSubset, - redirect.Partition, - redirect.Namespace, - redirect.Datacenter, + redirect.ToDiscoveryTargetOpts(), ) if redirectedTarget.ID != target.ID { target = redirectedTarget @@ -895,14 +918,9 @@ RESOLVE_AGAIN: // Handle default subset. if target.ServiceSubset == "" && resolver.DefaultSubset != "" { - target = c.rewriteTarget( - target, - "", - resolver.DefaultSubset, - "", - "", - "", - ) + target = c.rewriteTarget(target, structs.DiscoveryTargetOpts{ + ServiceSubset: resolver.DefaultSubset, + }) goto RESOLVE_AGAIN } @@ -1027,56 +1045,54 @@ RESOLVE_AGAIN: failover, ok = f["*"] } - if ok { - // Determine which failover definitions apply. - var failoverTargets []*structs.DiscoveryTarget - if len(failover.Datacenters) > 0 { - for _, dc := range failover.Datacenters { - // Rewrite the target as per the failover policy. - failoverTarget := c.rewriteTarget( - target, - failover.Service, - failover.ServiceSubset, - target.Partition, - failover.Namespace, - dc, - ) - if failoverTarget.ID != target.ID { // don't failover to yourself - failoverTargets = append(failoverTargets, failoverTarget) - } - } - } else { + if !ok { + return node, nil + } + + // Determine which failover definitions apply. + var failoverTargets []*structs.DiscoveryTarget + if len(failover.Datacenters) > 0 { + opts := failover.ToDiscoveryTargetOpts() + for _, dc := range failover.Datacenters { // Rewrite the target as per the failover policy. - failoverTarget := c.rewriteTarget( - target, - failover.Service, - failover.ServiceSubset, - target.Partition, - failover.Namespace, - "", - ) + opts.Datacenter = dc + failoverTarget := c.rewriteTarget(target, opts) if failoverTarget.ID != target.ID { // don't failover to yourself failoverTargets = append(failoverTargets, failoverTarget) } } - - // If we filtered everything out then no point in having a failover. - if len(failoverTargets) > 0 { - df := &structs.DiscoveryFailover{} - node.Resolver.Failover = df - - // Take care of doing any redirects or configuration loading - // related to targets by cheating a bit and recursing into - // ourselves. - for _, target := range failoverTargets { - failoverResolveNode, err := c.getResolverNode(target, true) - if err != nil { - return nil, err - } - failoverTarget := failoverResolveNode.Resolver.Target - df.Targets = append(df.Targets, failoverTarget) + } else if len(failover.Targets) > 0 { + for _, t := range failover.Targets { + // Rewrite the target as per the failover policy. + failoverTarget := c.rewriteTarget(target, t.ToDiscoveryTargetOpts()) + if failoverTarget.ID != target.ID { // don't failover to yourself + failoverTargets = append(failoverTargets, failoverTarget) } } + } else { + // Rewrite the target as per the failover policy. + failoverTarget := c.rewriteTarget(target, failover.ToDiscoveryTargetOpts()) + if failoverTarget.ID != target.ID { // don't failover to yourself + failoverTargets = append(failoverTargets, failoverTarget) + } + } + + // If we filtered everything out then no point in having a failover. + if len(failoverTargets) > 0 { + df := &structs.DiscoveryFailover{} + node.Resolver.Failover = df + + // Take care of doing any redirects or configuration loading + // related to targets by cheating a bit and recursing into + // ourselves. + for _, target := range failoverTargets { + failoverResolveNode, err := c.getResolverNode(target, true) + if err != nil { + return nil, err + } + failoverTarget := failoverResolveNode.Resolver.Target + df.Targets = append(df.Targets, failoverTarget) + } } } diff --git a/agent/consul/discoverychain/compile_test.go b/agent/consul/discoverychain/compile_test.go index 221ac757f..6505fdb9e 100644 --- a/agent/consul/discoverychain/compile_test.go +++ b/agent/consul/discoverychain/compile_test.go @@ -46,6 +46,7 @@ func TestCompile(t *testing.T) { "service and subset failover": testcase_ServiceAndSubsetFailover(), "datacenter failover": testcase_DatacenterFailover(), "datacenter failover with mesh gateways": testcase_DatacenterFailover_WithMeshGateways(), + "target failover": testcase_Failover_Targets(), "noop split to resolver with default subset": testcase_NoopSplit_WithDefaultSubset(), "resolver with default subset": testcase_Resolve_WithDefaultSubset(), "default resolver with external sni": testcase_DefaultResolver_ExternalSNI(), @@ -182,7 +183,7 @@ func testcase_JustRouterWithDefaults() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -244,7 +245,7 @@ func testcase_JustRouterWithNoDestination() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -294,7 +295,7 @@ func testcase_RouterWithDefaults_NoSplit_WithResolver() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ "main.default.default.dc1": targetWithConnectTimeout( - newTarget("main", "", "default", "default", "dc1", nil), + newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), 33*time.Second, ), }, @@ -361,7 +362,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_DefaultResolver() compileTestCase }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -426,7 +427,10 @@ func testcase_NoopSplit_DefaultResolver_ProtocolFromProxyDefaults() compileTestC }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc1", + }, nil), }, } @@ -498,7 +502,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_WithResolver() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ "main.default.default.dc1": targetWithConnectTimeout( - newTarget("main", "", "default", "default", "dc1", nil), + newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), 33*time.Second, ), }, @@ -584,8 +588,11 @@ func testcase_RouteBypassesSplit() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "bypass.other.default.default.dc1": newTarget("other", "bypass", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "bypass.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "other", + ServiceSubset: "bypass", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == bypass", } @@ -638,7 +645,7 @@ func testcase_NoopSplit_DefaultResolver() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -694,7 +701,7 @@ func testcase_NoopSplit_WithResolver() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ "main.default.default.dc1": targetWithConnectTimeout( - newTarget("main", "", "default", "default", "dc1", nil), + newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), 33*time.Second, ), }, @@ -776,12 +783,19 @@ func testcase_SubsetSplit() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + + "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v2", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 2", } }), - "v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v1", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 1", } @@ -855,8 +869,8 @@ func testcase_ServiceSplit() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil), - "bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil), + "foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil), + "bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil), }, } @@ -935,7 +949,10 @@ func testcase_SplitBypassesSplit() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "bypassed.next.default.default.dc1": newTarget("next", "bypassed", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "bypassed.next.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "next", + ServiceSubset: "bypassed", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == bypass", } @@ -973,7 +990,7 @@ func testcase_ServiceRedirect() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil), + "other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil), }, } @@ -1019,7 +1036,10 @@ func testcase_ServiceAndSubsetRedirect() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "v2.other.default.default.dc1": newTarget("other", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v2.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "other", + ServiceSubset: "v2", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 2", } @@ -1055,7 +1075,10 @@ func testcase_DatacenterRedirect() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", nil), + "main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc9", + }, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1095,7 +1118,10 @@ func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", func(t *structs.DiscoveryTarget) { + "main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc9", + }, func(t *structs.DiscoveryTarget) { t.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, } @@ -1134,8 +1160,8 @@ func testcase_ServiceFailover() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1177,8 +1203,8 @@ func testcase_ServiceFailoverThroughRedirect() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "actual.default.default.dc1": newTarget("actual", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "actual.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "actual"}, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1220,8 +1246,8 @@ func testcase_Resolver_CircularFailover() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1261,8 +1287,11 @@ func testcase_ServiceAndSubsetFailover() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "backup.main.default.default.dc1": newTarget("main", "backup", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "backup.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "backup", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == backup", } @@ -1301,9 +1330,15 @@ func testcase_DatacenterFailover() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", nil), - "main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc2", + }, nil), + "main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc4", + }, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1350,17 +1385,105 @@ func testcase_DatacenterFailover_WithMeshGateways() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) { t.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, } }), - "main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", func(t *structs.DiscoveryTarget) { + "main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc2", + }, func(t *structs.DiscoveryTarget) { t.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, } }), - "main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", func(t *structs.DiscoveryTarget) { + "main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc4", + }, func(t *structs.DiscoveryTarget) { + t.MeshGateway = structs.MeshGatewayConfig{ + Mode: structs.MeshGatewayModeRemote, + } + }), + }, + } + return compileTestCase{entries: entries, expect: expect} +} + +func testcase_Failover_Targets() compileTestCase { + entries := newEntries() + + entries.AddProxyDefaults(&structs.ProxyConfigEntry{ + Kind: structs.ProxyDefaults, + Name: structs.ProxyConfigGlobal, + MeshGateway: structs.MeshGatewayConfig{ + Mode: structs.MeshGatewayModeRemote, + }, + }) + + entries.AddResolvers( + &structs.ServiceResolverConfigEntry{ + Kind: "service-resolver", + Name: "main", + Failover: map[string]structs.ServiceResolverFailover{ + "*": { + Targets: []structs.ServiceResolverFailoverTarget{ + {Datacenter: "dc3"}, + {Service: "new-main"}, + {Peer: "cluster-01"}, + }, + }, + }, + }, + ) + + expect := &structs.CompiledDiscoveryChain{ + Protocol: "tcp", + StartNode: "resolver:main.default.default.dc1", + Nodes: map[string]*structs.DiscoveryGraphNode{ + "resolver:main.default.default.dc1": { + Type: structs.DiscoveryGraphNodeTypeResolver, + Name: "main.default.default.dc1", + Resolver: &structs.DiscoveryResolver{ + ConnectTimeout: 5 * time.Second, + Target: "main.default.default.dc1", + Failover: &structs.DiscoveryFailover{ + Targets: []string{ + "main.default.default.dc3", + "new-main.default.default.dc1", + "main.default.default.external.cluster-01", + }, + }, + }, + }, + }, + Targets: map[string]*structs.DiscoveryTarget{ + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) { + t.MeshGateway = structs.MeshGatewayConfig{ + Mode: structs.MeshGatewayModeRemote, + } + }), + "main.default.default.dc3": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc3", + }, func(t *structs.DiscoveryTarget) { + t.MeshGateway = structs.MeshGatewayConfig{ + Mode: structs.MeshGatewayModeRemote, + } + }), + "new-main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "new-main"}, func(t *structs.DiscoveryTarget) { + t.MeshGateway = structs.MeshGatewayConfig{ + Mode: structs.MeshGatewayModeRemote, + } + }), + "main.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Peer: "cluster-01", + }, func(t *structs.DiscoveryTarget) { + t.SNI = "" + t.Name = "" + t.Datacenter = "" t.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, } @@ -1422,7 +1545,10 @@ func testcase_NoopSplit_WithDefaultSubset() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v2", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 2", } @@ -1452,7 +1578,7 @@ func testcase_DefaultResolver() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ // TODO-TARGET - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1488,7 +1614,7 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) { t.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, } @@ -1530,7 +1656,7 @@ func testcase_ServiceMetaProjection() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -1588,7 +1714,7 @@ func testcase_ServiceMetaProjectionWithRedirect() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil), + "other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil), }, } @@ -1623,7 +1749,7 @@ func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil), + "other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil), }, } @@ -1658,7 +1784,10 @@ func testcase_Resolve_WithDefaultSubset() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v2", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 2", } @@ -1692,7 +1821,7 @@ func testcase_DefaultResolver_ExternalSNI() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) { t.SNI = "main.some.other.service.mesh" t.External = true }), @@ -1857,11 +1986,17 @@ func testcase_MultiDatacenterCanary() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ "main.default.default.dc2": targetWithConnectTimeout( - newTarget("main", "", "default", "default", "dc2", nil), + newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc2", + }, nil), 33*time.Second, ), "main.default.default.dc3": targetWithConnectTimeout( - newTarget("main", "", "default", "default", "dc3", nil), + newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc3", + }, nil), 33*time.Second, ), }, @@ -2155,27 +2290,42 @@ func testcase_AllBellsAndWhistles() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "prod.redirected.default.default.dc1": newTarget("redirected", "prod", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "prod.redirected.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "redirected", + ServiceSubset: "prod", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "ServiceMeta.env == prod", } }), - "v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v1", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 1", } }), - "v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v2", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 2", } }), - "v3.main.default.default.dc1": newTarget("main", "v3", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v3.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v3", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 3", } }), - "default-subset.main.default.default.dc1": newTarget("main", "default-subset", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "default-subset.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "default-subset", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{OnlyPassing: true} }), }, @@ -2379,7 +2529,7 @@ func testcase_ResolverProtocolOverride() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ // TODO-TARGET - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } return compileTestCase{entries: entries, expect: expect, @@ -2413,7 +2563,7 @@ func testcase_ResolverProtocolOverrideIgnored() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ // TODO-TARGET - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } return compileTestCase{entries: entries, expect: expect, @@ -2451,7 +2601,7 @@ func testcase_RouterIgnored_ResolverProtocolOverride() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ // TODO-TARGET - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } return compileTestCase{entries: entries, expect: expect, @@ -2685,9 +2835,9 @@ func testcase_LBSplitterAndResolver() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil), - "bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil), - "baz.default.default.dc1": newTarget("baz", "", "default", "default", "dc1", nil), + "foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil), + "bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil), + "baz.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "baz"}, nil), }, } @@ -2743,7 +2893,7 @@ func testcase_LBResolver() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -2791,8 +2941,17 @@ func newEntries() *configentry.DiscoveryChainSet { } } -func newTarget(service, serviceSubset, namespace, partition, datacenter string, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget { - t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) +func newTarget(opts structs.DiscoveryTargetOpts, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget { + if opts.Namespace == "" { + opts.Namespace = "default" + } + if opts.Partition == "" { + opts.Partition = "default" + } + if opts.Datacenter == "" { + opts.Datacenter = "dc1" + } + t := structs.NewDiscoveryTarget(opts) t.SNI = connect.TargetSNI(t, "trustdomain.consul") t.Name = t.SNI t.ConnectTimeout = 5 * time.Second // default diff --git a/agent/consul/state/peering_test.go b/agent/consul/state/peering_test.go index b48e4f80d..bfce75295 100644 --- a/agent/consul/state/peering_test.go +++ b/agent/consul/state/peering_test.go @@ -1461,7 +1461,13 @@ func TestStateStore_ExportedServicesForPeer(t *testing.T) { } newTarget := func(service, serviceSubset, datacenter string) *structs.DiscoveryTarget { - t := structs.NewDiscoveryTarget(service, serviceSubset, "default", "default", datacenter) + t := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: service, + ServiceSubset: serviceSubset, + Partition: "default", + Namespace: "default", + Datacenter: datacenter, + }) t.SNI = connect.TargetSNI(t, connect.TestTrustDomain) t.Name = t.SNI t.ConnectTimeout = 5 * time.Second // default diff --git a/agent/discovery_chain_endpoint_test.go b/agent/discovery_chain_endpoint_test.go index 8b4a7e272..42c082591 100644 --- a/agent/discovery_chain_endpoint_test.go +++ b/agent/discovery_chain_endpoint_test.go @@ -27,8 +27,17 @@ func TestDiscoveryChainRead(t *testing.T) { defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, "dc1") - newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget { - t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) + newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget { + if opts.Namespace == "" { + opts.Namespace = "default" + } + if opts.Partition == "" { + opts.Partition = "default" + } + if opts.Datacenter == "" { + opts.Datacenter = "dc1" + } + t := structs.NewDiscoveryTarget(opts) t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul") t.Name = t.SNI t.ConnectTimeout = 5 * time.Second // default @@ -99,7 +108,7 @@ func TestDiscoveryChainRead(t *testing.T) { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), + "web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}), }, } require.Equal(t, expect, value.Chain) @@ -144,7 +153,7 @@ func TestDiscoveryChainRead(t *testing.T) { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "web.default.default.dc2": newTarget("web", "", "default", "default", "dc2"), + "web.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}), }, } require.Equal(t, expect, value.Chain) @@ -198,7 +207,7 @@ func TestDiscoveryChainRead(t *testing.T) { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), + "web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}), }, } require.Equal(t, expect, value.Chain) @@ -264,11 +273,11 @@ func TestDiscoveryChainRead(t *testing.T) { }, Targets: map[string]*structs.DiscoveryTarget{ "web.default.default.dc1": targetWithConnectTimeout( - newTarget("web", "", "default", "default", "dc1"), + newTarget(structs.DiscoveryTargetOpts{Service: "web"}), 33*time.Second, ), "web.default.default.dc2": targetWithConnectTimeout( - newTarget("web", "", "default", "default", "dc2"), + newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}), 33*time.Second, ), }, @@ -280,7 +289,7 @@ func TestDiscoveryChainRead(t *testing.T) { })) expectTarget_DC1 := targetWithConnectTimeout( - newTarget("web", "", "default", "default", "dc1"), + newTarget(structs.DiscoveryTargetOpts{Service: "web"}), 22*time.Second, ) expectTarget_DC1.MeshGateway = structs.MeshGatewayConfig{ @@ -288,7 +297,7 @@ func TestDiscoveryChainRead(t *testing.T) { } expectTarget_DC2 := targetWithConnectTimeout( - newTarget("web", "", "default", "default", "dc2"), + newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}), 22*time.Second, ) expectTarget_DC2.MeshGateway = structs.MeshGatewayConfig{ diff --git a/agent/proxycfg/naming.go b/agent/proxycfg/naming.go index 3bb0854b0..08ff216ed 100644 --- a/agent/proxycfg/naming.go +++ b/agent/proxycfg/naming.go @@ -63,22 +63,29 @@ func NewUpstreamIDFromServiceID(sid structs.ServiceID) UpstreamID { return id } -// TODO(peering): confirm we don't need peername here func NewUpstreamIDFromTargetID(tid string) UpstreamID { - // Drop the leading subset if one is present in the target ID. - separators := strings.Count(tid, ".") - if separators > 3 { - prefix := tid[:strings.Index(tid, ".")+1] - tid = strings.TrimPrefix(tid, prefix) + var id UpstreamID + split := strings.Split(tid, ".") + + switch { + case split[len(split)-2] == "external": + id = UpstreamID{ + Name: split[0], + EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]), + Peer: split[4], + } + case len(split) == 5: + // Drop the leading subset if one is present in the target ID. + split = split[1:] + fallthrough + default: + id = UpstreamID{ + Name: split[0], + EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]), + Datacenter: split[3], + } } - split := strings.SplitN(tid, ".", 4) - - id := UpstreamID{ - Name: split[0], - EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]), - Datacenter: split[3], - } id.normalize() return id } diff --git a/agent/proxycfg/naming_test.go b/agent/proxycfg/naming_test.go index 23ff24165..2c4f5173a 100644 --- a/agent/proxycfg/naming_test.go +++ b/agent/proxycfg/naming_test.go @@ -35,6 +35,13 @@ func TestUpstreamIDFromTargetID(t *testing.T) { Datacenter: "dc2", }, }, + "peered": { + tid: "foo.default.default.external.cluster-01", + expect: UpstreamID{ + Name: "foo", + Peer: "cluster-01", + }, + }, } for name, tc := range cases { diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go index 8bc0305b0..0ea260955 100644 --- a/agent/structs/config_entry_discoverychain.go +++ b/agent/structs/config_entry_discoverychain.go @@ -1233,6 +1233,16 @@ type ServiceResolverRedirect struct { Datacenter string `json:",omitempty"` } +func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts { + return DiscoveryTargetOpts{ + Service: r.Service, + ServiceSubset: r.ServiceSubset, + Namespace: r.Namespace, + Partition: r.Partition, + Datacenter: r.Datacenter, + } +} + // There are some restrictions on what is allowed in here: // // - Service, ServiceSubset, Namespace, Datacenters, and Targets cannot all be @@ -1275,6 +1285,14 @@ type ServiceResolverFailover struct { Targets []ServiceResolverFailoverTarget `json:",omitempty"` } +func (t *ServiceResolverFailover) ToDiscoveryTargetOpts() DiscoveryTargetOpts { + return DiscoveryTargetOpts{ + Service: t.Service, + ServiceSubset: t.ServiceSubset, + Namespace: t.Namespace, + } +} + func (f *ServiceResolverFailover) isEmpty() bool { return f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 && len(f.Targets) == 0 } @@ -1299,6 +1317,17 @@ type ServiceResolverFailoverTarget struct { Peer string `json:",omitempty"` } +func (t *ServiceResolverFailoverTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts { + return DiscoveryTargetOpts{ + Service: t.Service, + ServiceSubset: t.ServiceSubset, + Namespace: t.Namespace, + Partition: t.Partition, + Datacenter: t.Datacenter, + Peer: t.Peer, + } +} + // LoadBalancer determines the load balancing policy and configuration for services // issuing requests to this upstream service. type LoadBalancer struct { diff --git a/agent/structs/discovery_chain.go b/agent/structs/discovery_chain.go index 2bbe88f9e..ca64d070d 100644 --- a/agent/structs/discovery_chain.go +++ b/agent/structs/discovery_chain.go @@ -56,7 +56,12 @@ type CompiledDiscoveryChain struct { // ID returns an ID that encodes the service, namespace, partition, and datacenter. // This ID allows us to compare a discovery chain target to the chain upstream itself. func (c *CompiledDiscoveryChain) ID() string { - return chainID("", c.ServiceName, c.Namespace, c.Partition, c.Datacenter) + return chainID(DiscoveryTargetOpts{ + Service: c.ServiceName, + Namespace: c.Namespace, + Partition: c.Partition, + Datacenter: c.Datacenter, + }) } func (c *CompiledDiscoveryChain) CompoundServiceName() ServiceName { @@ -185,6 +190,7 @@ type DiscoveryTarget struct { Namespace string `json:",omitempty"` Partition string `json:",omitempty"` Datacenter string `json:",omitempty"` + Peer string `json:",omitempty"` MeshGateway MeshGatewayConfig `json:",omitempty"` Subset ServiceResolverSubset `json:",omitempty"` @@ -240,28 +246,52 @@ func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error { return nil } -func NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter string) *DiscoveryTarget { +type DiscoveryTargetOpts struct { + Service string + ServiceSubset string + Namespace string + Partition string + Datacenter string + Peer string +} + +func NewDiscoveryTarget(opts DiscoveryTargetOpts) *DiscoveryTarget { t := &DiscoveryTarget{ - Service: service, - ServiceSubset: serviceSubset, - Namespace: namespace, - Partition: partition, - Datacenter: datacenter, + Service: opts.Service, + ServiceSubset: opts.ServiceSubset, + Namespace: opts.Namespace, + Partition: opts.Partition, + Datacenter: opts.Datacenter, + Peer: opts.Peer, } t.setID() return t } -func chainID(subset, service, namespace, partition, dc string) string { - // NOTE: this format is similar to the SNI syntax for simplicity - if subset == "" { - return fmt.Sprintf("%s.%s.%s.%s", service, namespace, partition, dc) +func (t *DiscoveryTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts { + return DiscoveryTargetOpts{ + Service: t.Service, + ServiceSubset: t.ServiceSubset, + Namespace: t.Namespace, + Partition: t.Partition, + Datacenter: t.Datacenter, + Peer: t.Peer, } - return fmt.Sprintf("%s.%s.%s.%s.%s", subset, service, namespace, partition, dc) +} + +func chainID(opts DiscoveryTargetOpts) string { + // NOTE: this format is similar to the SNI syntax for simplicity + if opts.Peer != "" { + return fmt.Sprintf("%s.%s.default.external.%s", opts.Service, opts.Namespace, opts.Peer) + } + if opts.ServiceSubset == "" { + return fmt.Sprintf("%s.%s.%s.%s", opts.Service, opts.Namespace, opts.Partition, opts.Datacenter) + } + return fmt.Sprintf("%s.%s.%s.%s.%s", opts.ServiceSubset, opts.Service, opts.Namespace, opts.Partition, opts.Datacenter) } func (t *DiscoveryTarget) setID() { - t.ID = chainID(t.ServiceSubset, t.Service, t.Namespace, t.Partition, t.Datacenter) + t.ID = chainID(t.ToDiscoveryTargetOpts()) } func (t *DiscoveryTarget) String() string { diff --git a/agent/xds/failover_math_test.go b/agent/xds/failover_math_test.go index 29ac17ffe..296d1cc77 100644 --- a/agent/xds/failover_math_test.go +++ b/agent/xds/failover_math_test.go @@ -15,15 +15,40 @@ func TestFirstHealthyTarget(t *testing.T) { warning := proxycfg.TestUpstreamNodesInStatus(t, "warning") critical := proxycfg.TestUpstreamNodesInStatus(t, "critical") - warnOnlyPassingTarget := structs.NewDiscoveryTarget("all-warn", "", "default", "default", "dc1") + warnOnlyPassingTarget := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "all-warn", + Namespace: "default", + Partition: "default", + Datacenter: "dc1", + }) warnOnlyPassingTarget.Subset.OnlyPassing = true - failOnlyPassingTarget := structs.NewDiscoveryTarget("all-fail", "", "default", "default", "dc1") + failOnlyPassingTarget := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "all-fail", + Namespace: "default", + Partition: "default", + Datacenter: "dc1", + }) failOnlyPassingTarget.Subset.OnlyPassing = true targets := map[string]*structs.DiscoveryTarget{ - "all-ok.default.dc1": structs.NewDiscoveryTarget("all-ok", "", "default", "default", "dc1"), - "all-warn.default.dc1": structs.NewDiscoveryTarget("all-warn", "", "default", "default", "dc1"), - "all-fail.default.default.dc1": structs.NewDiscoveryTarget("all-fail", "", "default", "default", "dc1"), + "all-ok.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "all-ok", + Namespace: "default", + Partition: "default", + Datacenter: "dc1", + }), + "all-warn.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "all-warn", + Namespace: "default", + Partition: "default", + Datacenter: "dc1", + }), + "all-fail.default.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "all-fail", + Namespace: "default", + Partition: "default", + Datacenter: "dc1", + }), "all-warn-onlypassing.default.dc1": warnOnlyPassingTarget, "all-fail-onlypassing.default.dc1": failOnlyPassingTarget, } From 00951602b0b29d33bda8130a3b30180214e2dcb3 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Tue, 2 Aug 2022 06:30:06 -0700 Subject: [PATCH 083/104] docs: improve consistency of DNS lookup variables Previously, some variables were wrapped in < > while others were not, creating ambiguity in whether some labels were a string literal or a variable. Now, all variables are wrapped in < >. --- website/content/docs/discovery/dns.mdx | 69 +++++++++++++++----------- 1 file changed, 40 insertions(+), 29 deletions(-) diff --git a/website/content/docs/discovery/dns.mdx b/website/content/docs/discovery/dns.mdx index f08a43c6e..b50e8deee 100644 --- a/website/content/docs/discovery/dns.mdx +++ b/website/content/docs/discovery/dns.mdx @@ -52,7 +52,7 @@ There are fundamentally two types of queries: node lookups and service lookups. A node lookup, a simple query for the address of a named node, looks like this: ```text -.node[.datacenter]. +.node[.]. ``` For example, if we have a `foo` node with default settings, we could @@ -79,16 +79,16 @@ $ dig @127.0.0.1 -p 8600 foo.node.consul ANY ;; WARNING: recursion requested but not available ;; QUESTION SECTION: -;foo.node.consul. IN ANY +;foo.node.consul. IN ANY ;; ANSWER SECTION: -foo.node.consul. 0 IN A 10.1.10.12 -foo.node.consul. 0 IN TXT "meta_key=meta_value" -foo.node.consul. 0 IN TXT "value only" +foo.node.consul. 0 IN A 10.1.10.12 +foo.node.consul. 0 IN TXT "meta_key=meta_value" +foo.node.consul. 0 IN TXT "value only" ;; AUTHORITY SECTION: -consul. 0 IN SOA ns.consul. postmaster.consul. 1392836399 3600 600 86400 0 +consul. 0 IN SOA ns.consul. postmaster.consul. 1392836399 3600 600 86400 0 ``` By default the TXT records value will match the node's metadata key-value @@ -121,7 +121,7 @@ it is recommended to use the HTTP API to retrieve the list of nodes. The format of a standard service lookup is: ```text -[tag.].service[.datacenter]. +[.].service[.]. ``` The `tag` is optional, and, as with node lookups, the `datacenter` is as @@ -157,26 +157,37 @@ $ dig @127.0.0.1 -p 8600 consul.service.consul SRV ;; WARNING: recursion requested but not available ;; QUESTION SECTION: -;consul.service.consul. IN SRV +;consul.service.consul. IN SRV ;; ANSWER SECTION: -consul.service.consul. 0 IN SRV 1 1 8300 foobar.node.dc1.consul. +consul.service.consul. 0 IN SRV 1 1 8300 foobar.node.dc1.consul. ;; ADDITIONAL SECTION: -foobar.node.dc1.consul. 0 IN A 10.1.10.12 +foobar.node.dc1.consul. 0 IN A 10.1.10.12 ``` ### RFC 2782 Lookup -The format for RFC 2782 SRV lookups is: +Valid formats for RFC 2782 SRV lookups depend on +whether you want to filter results based on a service tag: - _._[.service][.datacenter][.domain] +- No filtering on service tag -Per [RFC 2782](https://tools.ietf.org/html/rfc2782), SRV queries should use -underscores, `_`, as a prefix to the `service` and `protocol` values in a query to -prevent DNS collisions. The `protocol` value can be any of the tags for a -service. If the service has no tags, `tcp` should be used. If `tcp` -is specified as the protocol, the query will not perform any tag filtering. + ```text + _._tcp[.service][.]. + ``` + +- Filtering on service tag specified in the RFC 2782 protocol field + + ```text + _._[.service][.]. + ``` + +Per [RFC 2782](https://tools.ietf.org/html/rfc2782), SRV queries must +prepend an underscore (`_`) to the `service` and `protocol` values in a query to +prevent DNS collisions. +To perform no tag-based filtering, specify `tcp` in the RFC 2782 protocol field. +To filter results on a service tag, specify the tag in the RFC 2782 protocol field. Other than the query format and default `tcp` protocol/tag value, the behavior of the RFC style lookup is the same as the standard style of lookup. @@ -196,13 +207,13 @@ $ dig @127.0.0.1 -p 8600 _rabbitmq._amqp.service.consul SRV ;; WARNING: recursion requested but not available ;; QUESTION SECTION: -;_rabbitmq._amqp.service.consul. IN SRV +;_rabbitmq._amqp.service.consul. IN SRV ;; ANSWER SECTION: -_rabbitmq._amqp.service.consul. 0 IN SRV 1 1 5672 rabbitmq.node1.dc1.consul. +_rabbitmq._amqp.service.consul. 0 IN SRV 1 1 5672 rabbitmq.node1.dc1.consul. ;; ADDITIONAL SECTION: -rabbitmq.node1.dc1.consul. 0 IN A 10.1.11.20 +rabbitmq.node1.dc1.consul. 0 IN A 10.1.11.20 ``` Again, note that the SRV record returns the port of the service as well as its IP. @@ -328,7 +339,7 @@ $ echo -n "20010db800010002cafe000000001337" | perl -ne 'printf join(":", unpack The format of a prepared query lookup is: ```text -.query[.datacenter]. +.query[.]. ``` The `datacenter` is optional, and if not provided, the datacenter of this Consul @@ -376,7 +387,7 @@ If you need more complex behavior, please use the To find the unique virtual IP allocated for a service: ```text -.virtual[.peer]. +.virtual[.]. ``` This will return the unique virtual IP for any [Connect-capable](/docs/connect) @@ -439,14 +450,14 @@ The following responses are returned: ``` ;; QUESTION SECTION: -;consul.service.test-domain. IN SRV +;consul.service.test-domain. IN SRV ;; ANSWER SECTION: -consul.service.test-domain. 0 IN SRV 1 1 8300 machine.node.dc1.test-domain. +consul.service.test-domain. 0 IN SRV 1 1 8300 machine.node.dc1.test-domain. ;; ADDITIONAL SECTION: -machine.node.dc1.test-domain. 0 IN A 127.0.0.1 -machine.node.dc1.test-domain. 0 IN TXT "consul-network-segment=" +machine.node.dc1.test-domain. 0 IN A 127.0.0.1 +machine.node.dc1.test-domain. 0 IN TXT "consul-network-segment=" ``` -> **PTR queries:** Responses to PTR queries (`.in-addr.arpa.`) will always use the @@ -479,7 +490,7 @@ resolve services within the `default` namespace and partition. However, for reso services from other namespaces or partitions the following form can be used: ```text -[tag.].service..ns..ap..dc. +[.].service..ns..ap..dc. ``` This sequence is the canonical naming convention of a Consul Enterprise service. At least two of the following @@ -491,14 +502,14 @@ fields must be present: For imported lookups, only the namespace and peer need to be specified as the partition can be inferred from the peering: ```text -.virtual[.namespace][.peer]. +.virtual[.].. ``` For node lookups, only the partition and datacenter need to be specified as nodes cannot be namespaced. ```text -[tag.].node..ap..dc. +[.].node..ap..dc. ``` ## DNS with ACLs From 7a3c20ce645087d6769d65b8a84955289c20f842 Mon Sep 17 00:00:00 2001 From: Tyler Wendlandt Date: Tue, 23 Aug 2022 13:02:40 -0600 Subject: [PATCH 084/104] ui: Update badge / pill icon sizing (#14282) * Update badge icon sizing to be 16x16 * Update icon sizing in pill component --- .../app/components/consul/external-source/index.scss | 9 +++++++-- .../consul-ui/app/components/consul/kind/index.scss | 1 + ui/packages/consul-ui/app/components/pill/index.scss | 3 +++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/ui/packages/consul-ui/app/components/consul/external-source/index.scss b/ui/packages/consul-ui/app/components/consul/external-source/index.scss index b05acb45b..b876b48fd 100644 --- a/ui/packages/consul-ui/app/components/consul/external-source/index.scss +++ b/ui/packages/consul-ui/app/components/consul/external-source/index.scss @@ -1,6 +1,11 @@ .consul-external-source { @extend %pill-200, %frame-gray-600, %p1; } + +.consul-external-source::before { + --icon-size: icon-300; +} + .consul-external-source.kubernetes::before { @extend %with-logo-kubernetes-color-icon, %as-pseudo; } @@ -15,10 +20,10 @@ @extend %with-logo-consul-color-icon, %as-pseudo; } .consul-external-source.vault::before { - @extend %with-vault-100; + @extend %with-vault-300; } .consul-external-source.aws::before { - @extend %with-aws-100; + @extend %with-aws-300; } .consul-external-source.leader::before { @extend %with-star-outline-mask, %as-pseudo; diff --git a/ui/packages/consul-ui/app/components/consul/kind/index.scss b/ui/packages/consul-ui/app/components/consul/kind/index.scss index 7467195f2..0431ac306 100644 --- a/ui/packages/consul-ui/app/components/consul/kind/index.scss +++ b/ui/packages/consul-ui/app/components/consul/kind/index.scss @@ -3,4 +3,5 @@ } .consul-kind::before { @extend %with-gateway-mask, %as-pseudo; + --icon-size: icon-300; } diff --git a/ui/packages/consul-ui/app/components/pill/index.scss b/ui/packages/consul-ui/app/components/pill/index.scss index d08626db8..c528bd9ff 100644 --- a/ui/packages/consul-ui/app/components/pill/index.scss +++ b/ui/packages/consul-ui/app/components/pill/index.scss @@ -18,6 +18,9 @@ span.policy-node-identity::before { span.policy-service-identity::before { content: 'Service Identity: '; } +%pill::before { + --icon-size: icon-300; +} %pill.leader::before { @extend %with-star-outline-mask, %as-pseudo; } From 1e9cb26b65103dca3c6cf631099243766b10a168 Mon Sep 17 00:00:00 2001 From: Ashwin Venkatesh Date: Tue, 23 Aug 2022 15:14:36 -0400 Subject: [PATCH 085/104] Updates docs for CRDs (#14267) Co-authored-by: NicoletaPopoviciu --- .../docs/connect/config-entries/ingress-gateway.mdx | 6 ------ website/content/docs/connect/config-entries/mesh.mdx | 11 +---------- .../docs/connect/config-entries/proxy-defaults.mdx | 8 +++----- .../docs/connect/config-entries/service-defaults.mdx | 2 -- 4 files changed, 4 insertions(+), 23 deletions(-) diff --git a/website/content/docs/connect/config-entries/ingress-gateway.mdx b/website/content/docs/connect/config-entries/ingress-gateway.mdx index 78773188d..fa95c5b19 100644 --- a/website/content/docs/connect/config-entries/ingress-gateway.mdx +++ b/website/content/docs/connect/config-entries/ingress-gateway.mdx @@ -991,14 +991,12 @@ You can specify the following parameters to configure ingress gateway configurat }, { name: 'TLSMinVersion', - yaml: false, type: 'string: ""', description: "Set the default minimum TLS version supported for the gateway's listeners. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.", }, { name: 'TLSMaxVersion', - yaml: false, type: 'string: ""', description: { hcl: @@ -1009,7 +1007,6 @@ You can specify the following parameters to configure ingress gateway configurat }, { name: 'CipherSuites', - yaml: false, type: 'array: ', description: `Set the default list of TLS cipher suites for the gateway's listeners to support when negotiating connections using @@ -1179,21 +1176,18 @@ You can specify the following parameters to configure ingress gateway configurat }, { name: 'TLSMinVersion', - yaml: false, type: 'string: ""', description: 'Set the minimum TLS version supported for this listener. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.', }, { name: 'TLSMaxVersion', - yaml: false, type: 'string: ""', description: 'Set the maximum TLS version supported for this listener. Must be greater than or equal to `TLSMinVersion`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`.', }, { name: 'CipherSuites', - yaml: false, type: 'array: ', description: `Set the list of TLS cipher suites to support when negotiating connections using TLS 1.2 or earlier. If unspecified, diff --git a/website/content/docs/connect/config-entries/mesh.mdx b/website/content/docs/connect/config-entries/mesh.mdx index 8c9f3e718..e8d6b4de5 100644 --- a/website/content/docs/connect/config-entries/mesh.mdx +++ b/website/content/docs/connect/config-entries/mesh.mdx @@ -271,7 +271,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura children: [ { name: 'Incoming', - yaml: false, type: 'TLSDirectionConfig: ', description: `TLS configuration for inbound mTLS connections targeting the public listener on \`connect-proxy\` and \`terminating-gateway\` @@ -279,14 +278,12 @@ Note that the Kubernetes example does not include a `partition` field. Configura children: [ { name: 'TLSMinVersion', - yaml: false, type: 'string: ""', description: "Set the default minimum TLS version supported. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.", }, { name: 'TLSMaxVersion', - yaml: false, type: 'string: ""', description: { hcl: @@ -297,7 +294,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura }, { name: 'CipherSuites', - yaml: false, type: 'array: ', description: `Set the default list of TLS cipher suites to support when negotiating connections using @@ -315,7 +311,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura }, { name: 'Outgoing', - yaml: false, type: 'TLSDirectionConfig: ', description: `TLS configuration for outbound mTLS connections dialing upstreams from \`connect-proxy\` and \`ingress-gateway\` @@ -323,14 +318,12 @@ Note that the Kubernetes example does not include a `partition` field. Configura children: [ { name: 'TLSMinVersion', - yaml: false, type: 'string: ""', description: "Set the default minimum TLS version supported. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.", }, { name: 'TLSMaxVersion', - yaml: false, type: 'string: ""', description: { hcl: @@ -341,7 +334,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura }, { name: 'CipherSuites', - yaml: false, type: 'array: ', description: `Set the default list of TLS cipher suites to support when negotiating connections using @@ -366,9 +358,8 @@ Note that the Kubernetes example does not include a `partition` field. Configura children: [ { name: 'SanitizeXForwardedClientCert', - yaml: false, type: 'bool: ', - description: `If configured to \`true\`, the \`forward_client_cert_details\` option will be set to \`SANITIZE\` + description: `If configured to \`true\`, the \`forward_client_cert_details\` option will be set to \`SANITIZE\` for all Envoy proxies. As a result, Consul will not include the \`x-forwarded-client-cert\` header in the next hop. If set to \`false\` (default), the XFCC header is propagated to upstream applications.`, }, diff --git a/website/content/docs/connect/config-entries/proxy-defaults.mdx b/website/content/docs/connect/config-entries/proxy-defaults.mdx index 3be5c850b..c6f82d783 100644 --- a/website/content/docs/connect/config-entries/proxy-defaults.mdx +++ b/website/content/docs/connect/config-entries/proxy-defaults.mdx @@ -10,7 +10,7 @@ description: >- # Proxy Defaults -The `proxy-defaults` configuration entry (`ProxyDefaults` on Kubernetes) allows you +The `proxy-defaults` configuration entry (`ProxyDefaults` on Kubernetes) allows you to configure global defaults across all services for Connect proxy configurations. Only one global entry is supported. @@ -28,8 +28,8 @@ service definitions](/docs/connect/registration/sidecar-service). ## Requirements The following Consul binaries are supported: -* Consul 1.8.4+ on Kubernetes. -* Consul 1.5.0+ on other platforms. +* Consul 1.8.4+ on Kubernetes. +* Consul 1.5.0+ on other platforms. ## Usage @@ -321,7 +321,6 @@ spec: \`direct\` represents that the proxy's listeners must be dialed directly by the local application and other proxies. Added in v1.10.0.`, - yaml: false, }, { name: 'TransparentProxy', @@ -333,7 +332,6 @@ spec: type: 'int: "15001"', description: `The port the proxy should listen on for outbound traffic. This must be the port where outbound application traffic is captured and redirected to.`, - yaml: false, }, { name: 'DialedDirectly', diff --git a/website/content/docs/connect/config-entries/service-defaults.mdx b/website/content/docs/connect/config-entries/service-defaults.mdx index 54aabfe8e..b431e4345 100644 --- a/website/content/docs/connect/config-entries/service-defaults.mdx +++ b/website/content/docs/connect/config-entries/service-defaults.mdx @@ -366,7 +366,6 @@ represents a location outside the Consul cluster. They can be dialed directly wh \`direct\` represents that the proxy's listeners must be dialed directly by the local application and other proxies. Added in v1.10.0.`, - yaml: false, }, { name: 'UpstreamConfig', @@ -652,7 +651,6 @@ represents a location outside the Consul cluster. They can be dialed directly wh type: 'int: "15001"', description: `The port the proxy should listen on for outbound traffic. This must be the port where outbound application traffic is redirected to.`, - yaml: false, }, { name: 'DialedDirectly', From 1cd7ec05437ef1872c576ecd2be593dd753e1802 Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Thu, 11 Aug 2022 10:19:36 +0100 Subject: [PATCH 086/104] proxycfg: terminate stream on irrecoverable errors This is the OSS portion of enterprise PR 2339. It improves our handling of "irrecoverable" errors in proxycfg data sources. The canonical example of this is what happens when the ACL token presented by Envoy is deleted/revoked. Previously, the stream would get "stuck" until the xDS server re-checked the token (after 5 minutes) and terminated the stream. Materializers would also sit burning resources retrying something that could never succeed. Now, it is possible for data sources to mark errors as "terminal" which causes the xDS stream to be closed immediately. Similarly, the submatview.Store will evict materializers when it observes they have encountered such an error. --- agent/proxycfg-glue/glue.go | 20 +++--- agent/proxycfg-glue/intention_upstreams.go | 7 +-- agent/proxycfg-glue/intentions.go | 17 ++--- agent/proxycfg/data_sources.go | 23 +++++++ agent/proxycfg/manager.go | 39 +++++++++--- agent/proxycfg/state.go | 24 +++++++- agent/submatview/local_materializer.go | 12 ++++ agent/submatview/store.go | 60 +++++++++++++++++- agent/submatview/store_test.go | 72 ++++++++++++++++++++++ agent/xds/delta.go | 18 +++++- agent/xds/server.go | 22 ++++--- 11 files changed, 267 insertions(+), 47 deletions(-) diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index 86badf67e..1b22b02bd 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -124,15 +124,21 @@ func (c *cacheProxyDataSource[ReqType]) Notify( func dispatchCacheUpdate(ch chan<- proxycfg.UpdateEvent) cache.Callback { return func(ctx context.Context, e cache.UpdateEvent) { - u := proxycfg.UpdateEvent{ - CorrelationID: e.CorrelationID, - Result: e.Result, - Err: e.Err, - } - select { - case ch <- u: + case ch <- newUpdateEvent(e.CorrelationID, e.Result, e.Err): case <-ctx.Done(): } } } + +func newUpdateEvent(correlationID string, result any, err error) proxycfg.UpdateEvent { + // This roughly matches the logic in agent/submatview.LocalMaterializer.isTerminalError. + if acl.IsErrNotFound(err) { + err = proxycfg.TerminalError(err) + } + return proxycfg.UpdateEvent{ + CorrelationID: correlationID, + Result: result, + Err: err, + } +} diff --git a/agent/proxycfg-glue/intention_upstreams.go b/agent/proxycfg-glue/intention_upstreams.go index 186d91b35..a694d033b 100644 --- a/agent/proxycfg-glue/intention_upstreams.go +++ b/agent/proxycfg-glue/intention_upstreams.go @@ -54,13 +54,8 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) { return func(ctx context.Context, correlationID string, result ResultType, err error) { - event := proxycfg.UpdateEvent{ - CorrelationID: correlationID, - Result: result, - Err: err, - } select { - case ch <- event: + case ch <- newUpdateEvent(correlationID, result, err): case <-ctx.Done(): } } diff --git a/agent/proxycfg-glue/intentions.go b/agent/proxycfg-glue/intentions.go index 57f48bdae..69652d922 100644 --- a/agent/proxycfg-glue/intentions.go +++ b/agent/proxycfg-glue/intentions.go @@ -39,12 +39,8 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi QueryOptions: structs.QueryOptions{Token: req.QueryOptions.Token}, } return c.c.NotifyCallback(ctx, cachetype.IntentionMatchName, query, correlationID, func(ctx context.Context, event cache.UpdateEvent) { - e := proxycfg.UpdateEvent{ - CorrelationID: correlationID, - Err: event.Err, - } - - if e.Err == nil { + var result any + if event.Err == nil { rsp, ok := event.Result.(*structs.IndexedIntentionMatches) if !ok { return @@ -54,11 +50,11 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi if len(rsp.Matches) != 0 { matches = rsp.Matches[0] } - e.Result = matches + result = matches } select { - case ch <- e: + case ch <- newUpdateEvent(correlationID, result, event.Err): case <-ctx.Done(): } }) @@ -110,10 +106,7 @@ func (s *serverIntentions) Notify(ctx context.Context, req *structs.ServiceSpeci sort.Sort(structs.IntentionPrecedenceSorter(intentions)) - return proxycfg.UpdateEvent{ - CorrelationID: correlationID, - Result: intentions, - }, true + return newUpdateEvent(correlationID, intentions, nil), true } for subjectIdx, subject := range subjects { diff --git a/agent/proxycfg/data_sources.go b/agent/proxycfg/data_sources.go index bda0226ff..3649bed2d 100644 --- a/agent/proxycfg/data_sources.go +++ b/agent/proxycfg/data_sources.go @@ -2,6 +2,7 @@ package proxycfg import ( "context" + "errors" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/structs" @@ -15,6 +16,28 @@ type UpdateEvent struct { Err error } +// TerminalError wraps the given error to indicate that the data source is in +// an irrecoverably broken state (e.g. because the given ACL token has been +// deleted). +// +// Setting UpdateEvent.Err to a TerminalError causes all watches to be canceled +// which, in turn, terminates the xDS streams. +func TerminalError(err error) error { + return terminalError{err} +} + +// IsTerminalError returns whether the given error indicates that the data +// source is in an irrecoverably broken state so watches should be torn down +// and retried at a higher level. +func IsTerminalError(err error) bool { + return errors.As(err, &terminalError{}) +} + +type terminalError struct{ err error } + +func (e terminalError) Error() string { return e.err.Error() } +func (e terminalError) Unwrap() error { return e.err } + // DataSources contains the dependencies used to consume data used to configure // proxies. type DataSources struct { diff --git a/agent/proxycfg/manager.go b/agent/proxycfg/manager.go index 3de11b3f8..efdfe4b72 100644 --- a/agent/proxycfg/manager.go +++ b/agent/proxycfg/manager.go @@ -127,7 +127,7 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour } // We are updating the proxy, close its old state - state.Close() + state.Close(false) } // TODO: move to a function that translates ManagerConfig->stateConfig @@ -148,14 +148,13 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour return err } - ch, err := state.Watch() - if err != nil { + if _, err = state.Watch(); err != nil { return err } m.proxies[id] = state // Start a goroutine that will wait for changes and broadcast them to watchers. - go m.notifyBroadcast(ch) + go m.notifyBroadcast(id, state) return nil } @@ -175,8 +174,8 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) { } // Closing state will let the goroutine we started in Register finish since - // watch chan is closed. - state.Close() + // watch chan is closed + state.Close(false) delete(m.proxies, id) // We intentionally leave potential watchers hanging here - there is no new @@ -186,11 +185,17 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) { // cleaned up naturally. } -func (m *Manager) notifyBroadcast(ch <-chan ConfigSnapshot) { - // Run until ch is closed - for snap := range ch { +func (m *Manager) notifyBroadcast(proxyID ProxyID, state *state) { + // Run until ch is closed (by a defer in state.run). + for snap := range state.snapCh { m.notify(&snap) } + + // If state.run exited because of an irrecoverable error, close all of the + // watchers so that the consumers reconnect/retry at a higher level. + if state.failed() { + m.closeAllWatchers(proxyID) + } } func (m *Manager) notify(snap *ConfigSnapshot) { @@ -281,6 +286,20 @@ func (m *Manager) Watch(id ProxyID) (<-chan *ConfigSnapshot, CancelFunc) { } } +func (m *Manager) closeAllWatchers(proxyID ProxyID) { + m.mu.Lock() + defer m.mu.Unlock() + + watchers, ok := m.watchers[proxyID] + if !ok { + return + } + + for watchID := range watchers { + m.closeWatchLocked(proxyID, watchID) + } +} + // closeWatchLocked cleans up state related to a single watcher. It assumes the // lock is held. func (m *Manager) closeWatchLocked(proxyID ProxyID, watchID uint64) { @@ -309,7 +328,7 @@ func (m *Manager) Close() error { // Then close all states for proxyID, state := range m.proxies { - state.Close() + state.Close(false) delete(m.proxies, proxyID) } return nil diff --git a/agent/proxycfg/state.go b/agent/proxycfg/state.go index 13b22c4fd..34d336435 100644 --- a/agent/proxycfg/state.go +++ b/agent/proxycfg/state.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "reflect" + "sync/atomic" "time" "github.com/hashicorp/go-hclog" @@ -70,11 +71,21 @@ type state struct { // in Watch. cancel func() + // failedFlag is (atomically) set to 1 (by Close) when run exits because a data + // source is in an irrecoverable state. It can be read with failed. + failedFlag int32 + ch chan UpdateEvent snapCh chan ConfigSnapshot reqCh chan chan *ConfigSnapshot } +// failed returns whether run exited because a data source is in an +// irrecoverable state. +func (s *state) failed() bool { + return atomic.LoadInt32(&s.failedFlag) == 1 +} + type DNSConfig struct { Domain string AltDomain string @@ -250,10 +261,13 @@ func (s *state) Watch() (<-chan ConfigSnapshot, error) { } // Close discards the state and stops any long-running watches. -func (s *state) Close() error { +func (s *state) Close(failed bool) error { if s.cancel != nil { s.cancel() } + if failed { + atomic.StoreInt32(&s.failedFlag, 1) + } return nil } @@ -300,7 +314,13 @@ func (s *state) run(ctx context.Context, snap *ConfigSnapshot) { case <-ctx.Done(): return case u := <-s.ch: - s.logger.Trace("A blocking query returned; handling snapshot update", "correlationID", u.CorrelationID) + s.logger.Trace("Data source returned; handling snapshot update", "correlationID", u.CorrelationID) + + if IsTerminalError(u.Err) { + s.logger.Error("Data source in an irrecoverable state; exiting", "error", u.Err, "correlationID", u.CorrelationID) + s.Close(true) + return + } if err := s.handler.handleUpdate(ctx, u, snap); err != nil { s.logger.Error("Failed to handle update from watch", diff --git a/agent/submatview/local_materializer.go b/agent/submatview/local_materializer.go index 6e32b3602..b3d4480bd 100644 --- a/agent/submatview/local_materializer.go +++ b/agent/submatview/local_materializer.go @@ -66,6 +66,10 @@ func (m *LocalMaterializer) Run(ctx context.Context) { if ctx.Err() != nil { return } + if m.isTerminalError(err) { + return + } + m.mat.handleError(req, err) if err := m.mat.retryWaiter.Wait(ctx); err != nil { @@ -74,6 +78,14 @@ func (m *LocalMaterializer) Run(ctx context.Context) { } } +// isTerminalError determines whether the given error cannot be recovered from +// and should cause the materializer to halt and be evicted from the view store. +// +// This roughly matches the logic in agent/proxycfg-glue.newUpdateEvent. +func (m *LocalMaterializer) isTerminalError(err error) bool { + return acl.IsErrNotFound(err) +} + // subscribeOnce opens a new subscription to a local backend and runs // for its lifetime or until the view is closed. func (m *LocalMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error { diff --git a/agent/submatview/store.go b/agent/submatview/store.go index 242a0d70d..dacf2d8ba 100644 --- a/agent/submatview/store.go +++ b/agent/submatview/store.go @@ -47,6 +47,9 @@ type entry struct { // requests is the count of active requests using this entry. This entry will // remain in the store as long as this count remains > 0. requests int + // evicting is used to mark an entry that will be evicted when the current in- + // flight requests finish. + evicting bool } // NewStore creates and returns a Store that is ready for use. The caller must @@ -89,6 +92,7 @@ func (s *Store) Run(ctx context.Context) { // Only stop the materializer if there are no active requests. if e.requests == 0 { + s.logger.Trace("evicting item from store", "key", he.Key()) e.stop() delete(s.byKey, he.Key()) } @@ -187,13 +191,13 @@ func (s *Store) NotifyCallback( "error", err, "request-type", req.Type(), "index", index) - continue } index = result.Index cb(ctx, cache.UpdateEvent{ CorrelationID: correlationID, Result: result.Value, + Err: err, Meta: cache.ResultMeta{Index: result.Index, Hit: result.Cached}, }) } @@ -211,6 +215,9 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) { defer s.lock.Unlock() e, ok := s.byKey[key] if ok { + if e.evicting { + return "", nil, errors.New("item is marked for eviction") + } e.requests++ s.byKey[key] = e return key, e.materializer, nil @@ -222,7 +229,18 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) { } ctx, cancel := context.WithCancel(context.Background()) - go mat.Run(ctx) + go func() { + mat.Run(ctx) + + // Materializers run until they either reach their TTL and are evicted (which + // cancels the given context) or encounter an irrecoverable error. + // + // If the context hasn't been canceled, we know it's the error case so we + // trigger an immediate eviction. + if ctx.Err() == nil { + s.evictNow(key) + } + }() e = entry{ materializer: mat, @@ -233,6 +251,28 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) { return key, e.materializer, nil } +// evictNow causes the item with the given key to be evicted immediately. +// +// If there are requests in-flight, the item is marked for eviction such that +// once the requests have been served releaseEntry will move it to the top of +// the expiry heap. If there are no requests in-flight, evictNow will move the +// item to the top of the expiry heap itself. +// +// In either case, the entry's evicting flag prevents it from being served by +// readEntry (and thereby gaining new in-flight requests). +func (s *Store) evictNow(key string) { + s.lock.Lock() + defer s.lock.Unlock() + + e := s.byKey[key] + e.evicting = true + s.byKey[key] = e + + if e.requests == 0 { + s.expireNowLocked(key) + } +} + // releaseEntry decrements the request count and starts an expiry timer if the // count has reached 0. Must be called once for every call to readEntry. func (s *Store) releaseEntry(key string) { @@ -246,6 +286,11 @@ func (s *Store) releaseEntry(key string) { return } + if e.evicting { + s.expireNowLocked(key) + return + } + if e.expiry.Index() == ttlcache.NotIndexed { e.expiry = s.expiryHeap.Add(key, s.idleTTL) s.byKey[key] = e @@ -255,6 +300,17 @@ func (s *Store) releaseEntry(key string) { s.expiryHeap.Update(e.expiry.Index(), s.idleTTL) } +// expireNowLocked moves the item with the given key to the top of the expiry +// heap, causing it to be picked up by the expiry loop and evicted immediately. +func (s *Store) expireNowLocked(key string) { + e := s.byKey[key] + if idx := e.expiry.Index(); idx != ttlcache.NotIndexed { + s.expiryHeap.Remove(idx) + } + e.expiry = s.expiryHeap.Add(key, time.Duration(0)) + s.byKey[key] = e +} + // makeEntryKey matches agent/cache.makeEntryKey, but may change in the future. func makeEntryKey(typ string, r cache.RequestInfo) string { return fmt.Sprintf("%s/%s/%s/%s", typ, r.Datacenter, r.Token, r.Key) diff --git a/agent/submatview/store_test.go b/agent/submatview/store_test.go index 1d5789c05..aab099599 100644 --- a/agent/submatview/store_test.go +++ b/agent/submatview/store_test.go @@ -509,3 +509,75 @@ func TestStore_Run_ExpiresEntries(t *testing.T) { require.Len(t, store.byKey, 0) require.Equal(t, ttlcache.NotIndexed, e.expiry.Index()) } + +func TestStore_Run_FailingMaterializer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + store := NewStore(hclog.NewNullLogger()) + store.idleTTL = 24 * time.Hour + go store.Run(ctx) + + t.Run("with an in-flight request", func(t *testing.T) { + req := &failingMaterializerRequest{ + doneCh: make(chan struct{}), + } + + ch := make(chan cache.UpdateEvent) + reqCtx, reqCancel := context.WithCancel(context.Background()) + t.Cleanup(reqCancel) + require.NoError(t, store.Notify(reqCtx, req, "", ch)) + + assertRequestCount(t, store, req, 1) + + // Cause the materializer to "fail" (exit before its context is canceled). + close(req.doneCh) + + // End the in-flight request. + reqCancel() + + // Check that the item was evicted. + retry.Run(t, func(r *retry.R) { + store.lock.Lock() + defer store.lock.Unlock() + + require.Len(r, store.byKey, 0) + }) + }) + + t.Run("with no in-flight requests", func(t *testing.T) { + req := &failingMaterializerRequest{ + doneCh: make(chan struct{}), + } + + // Cause the materializer to "fail" (exit before its context is canceled). + close(req.doneCh) + + // Check that the item was evicted. + retry.Run(t, func(r *retry.R) { + store.lock.Lock() + defer store.lock.Unlock() + + require.Len(r, store.byKey, 0) + }) + }) +} + +type failingMaterializerRequest struct { + doneCh chan struct{} +} + +func (failingMaterializerRequest) CacheInfo() cache.RequestInfo { return cache.RequestInfo{} } +func (failingMaterializerRequest) Type() string { return "test.FailingMaterializerRequest" } + +func (r *failingMaterializerRequest) NewMaterializer() (Materializer, error) { + return &failingMaterializer{doneCh: r.doneCh}, nil +} + +type failingMaterializer struct { + doneCh <-chan struct{} +} + +func (failingMaterializer) Query(context.Context, uint64) (Result, error) { return Result{}, nil } + +func (m *failingMaterializer) Run(context.Context) { <-m.doneCh } diff --git a/agent/xds/delta.go b/agent/xds/delta.go index 701c04f2e..71c1edcb0 100644 --- a/agent/xds/delta.go +++ b/agent/xds/delta.go @@ -81,6 +81,11 @@ const ( ) func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discovery_v3.DeltaDiscoveryRequest) error { + // Handle invalid ACL tokens up-front. + if _, err := s.authenticate(stream.Context()); err != nil { + return err + } + // Loop state var ( cfgSnap *proxycfg.ConfigSnapshot @@ -200,7 +205,18 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove } } - case cfgSnap = <-stateCh: + case cs, ok := <-stateCh: + if !ok { + // stateCh is closed either when *we* cancel the watch (on-exit via defer) + // or by the proxycfg.Manager when an irrecoverable error is encountered + // such as the ACL token getting deleted. + // + // We know for sure that this is the latter case, because in the former we + // would've already exited this loop. + return status.Error(codes.Aborted, "xDS stream terminated due to an irrecoverable error, please try again") + } + cfgSnap = cs + newRes, err := generator.allResourcesFromSnapshot(cfgSnap) if err != nil { return status.Errorf(codes.Unavailable, "failed to generate all xDS resources from the snapshot: %v", err) diff --git a/agent/xds/server.go b/agent/xds/server.go index cc27f3fde..3ee42e77b 100644 --- a/agent/xds/server.go +++ b/agent/xds/server.go @@ -186,6 +186,18 @@ func (s *Server) Register(srv *grpc.Server) { envoy_discovery_v3.RegisterAggregatedDiscoveryServiceServer(srv, s) } +func (s *Server) authenticate(ctx context.Context) (acl.Authorizer, error) { + authz, err := s.ResolveToken(external.TokenFromContext(ctx)) + if acl.IsErrNotFound(err) { + return nil, status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err) + } else if acl.IsErrPermissionDenied(err) { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } else if err != nil { + return nil, status.Errorf(codes.Internal, "error resolving acl token: %v", err) + } + return authz, nil +} + // authorize the xDS request using the token stored in ctx. This authorization is // a bit different from most interfaces. Instead of explicitly authorizing or // filtering each piece of data in the response, the request is authorized @@ -201,13 +213,9 @@ func (s *Server) authorize(ctx context.Context, cfgSnap *proxycfg.ConfigSnapshot return status.Errorf(codes.Unauthenticated, "unauthenticated: no config snapshot") } - authz, err := s.ResolveToken(external.TokenFromContext(ctx)) - if acl.IsErrNotFound(err) { - return status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err) - } else if acl.IsErrPermissionDenied(err) { - return status.Error(codes.PermissionDenied, err.Error()) - } else if err != nil { - return status.Errorf(codes.Internal, "error resolving acl token: %v", err) + authz, err := s.authenticate(ctx) + if err != nil { + return err } var authzContext acl.AuthorizerContext From efe7fec43e28d2837eed830ee7d754544d114c91 Mon Sep 17 00:00:00 2001 From: Tu Nguyen Date: Tue, 23 Aug 2022 12:41:44 -0700 Subject: [PATCH 087/104] more reshuffling --- .../docs/{ => integrate}/download-tools.mdx | 0 website/data/docs-nav-data.json | 288 +++++++++--------- 2 files changed, 144 insertions(+), 144 deletions(-) rename website/content/docs/{ => integrate}/download-tools.mdx (100%) diff --git a/website/content/docs/download-tools.mdx b/website/content/docs/integrate/download-tools.mdx similarity index 100% rename from website/content/docs/download-tools.mdx rename to website/content/docs/integrate/download-tools.mdx diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 010d36922..acaf67628 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -58,6 +58,133 @@ } ] }, + { + "title": "Release Notes", + "routes": [ + { + "title": "Overview", + "path": "release-notes" + }, + { + "title": "Consul", + "routes": [ + { + "title": "v1.11.x", + "path": "release-notes/consul/v1_11_x" + }, + { + "title": "v1.10.x", + "path": "release-notes/consul/v1_10_x" + }, + { + "title": "v1.9.x", + "path": "release-notes/consul/v1_9_x" + } + ] + }, + { + "title": "Consul API Gateway", + "routes": [ + { + "title": "v0.4.x", + "path": "release-notes/consul-api-gateway/v0_4_x" + }, + { + "title": "v0.3.x", + "path": "release-notes/consul-api-gateway/v0_3_x" + }, + { + "title": "v0.2.x", + "path": "release-notes/consul-api-gateway/v0_2_x" + }, + { + "title": "v0.1.x", + "path": "release-notes/consul-api-gateway/v0_1_x" + } + ] + }, + { + "title": "Consul ECS", + "routes": [ + { + "title": "v0.5.x", + "path": "release-notes/consul-ecs/v0_5_x" + }, + { + "title": "v0.4.x", + "path": "release-notes/consul-ecs/v0_4_x" + }, + { + "title": "v0.3.x", + "path": "release-notes/consul-ecs/v0_3_x" + }, + { + "title": "v0.2.x", + "path": "release-notes/consul-ecs/v0_2_x" + } + ] + }, + { + "title": "Consul Terraform Sync", + "routes": [ + { + "title": "v0.6.x", + "path": "release-notes/consul-terraform-sync/v0_6_x" + }, + { + "title": "v0.5.x", + "path": "release-notes/consul-terraform-sync/v0_5_x" + } + ] + } + ] + }, + { + "title": "Upgrade", + "routes": [ + { + "title": "Overview", + "path": "upgrading" + }, + { + "title": "Compatibility Promise", + "path": "upgrading/compatibility" + }, + { + "title": "Specific Version Details", + "path": "upgrading/upgrade-specific" + }, + { + "title": "Upgrade Instructions", + "routes": [ + { + "title": "Overview", + "path": "upgrading/instructions" + }, + { + "title": "General Process", + "path": "upgrading/instructions/general-process" + }, + { + "title": "Upgrading to Latest 1.2.x", + "path": "upgrading/instructions/upgrade-to-1-2-x" + }, + { + "title": "Upgrading to Latest 1.6.x", + "path": "upgrading/instructions/upgrade-to-1-6-x" + }, + { + "title": "Upgrading to Latest 1.8.x", + "path": "upgrading/instructions/upgrade-to-1-8-x" + }, + { + "title": "Upgrading to Latest 1.10.x", + "path": "upgrading/instructions/upgrade-to-1-10-x" + } + ] + } + ] + }, { "title": "Architecture", "routes": [ @@ -542,6 +669,23 @@ { "title": "Proxy Integration", "href": "/docs/connect/proxies/integrate" + }, + { + "title": "Consul Tools", + "path": "download-tools" + } + ] + }, + { + "title": "Troubleshoot", + "routes": [ + { + "title": "Common Error Messages", + "path": "troubleshoot/common-errors" + }, + { + "title": "FAQ", + "path": "troubleshoot/faq" } ] }, @@ -1176,150 +1320,6 @@ { "divider": true }, - { - "title": "Release Notes", - "routes": [ - { - "title": "Overview", - "path": "release-notes" - }, - { - "title": "Consul", - "routes": [ - { - "title": "v1.11.x", - "path": "release-notes/consul/v1_11_x" - }, - { - "title": "v1.10.x", - "path": "release-notes/consul/v1_10_x" - }, - { - "title": "v1.9.x", - "path": "release-notes/consul/v1_9_x" - } - ] - }, - { - "title": "Consul API Gateway", - "routes": [ - { - "title": "v0.4.x", - "path": "release-notes/consul-api-gateway/v0_4_x" - }, - { - "title": "v0.3.x", - "path": "release-notes/consul-api-gateway/v0_3_x" - }, - { - "title": "v0.2.x", - "path": "release-notes/consul-api-gateway/v0_2_x" - }, - { - "title": "v0.1.x", - "path": "release-notes/consul-api-gateway/v0_1_x" - } - ] - }, - { - "title": "Consul ECS", - "routes": [ - { - "title": "v0.5.x", - "path": "release-notes/consul-ecs/v0_5_x" - }, - { - "title": "v0.4.x", - "path": "release-notes/consul-ecs/v0_4_x" - }, - { - "title": "v0.3.x", - "path": "release-notes/consul-ecs/v0_3_x" - }, - { - "title": "v0.2.x", - "path": "release-notes/consul-ecs/v0_2_x" - } - ] - }, - { - "title": "Consul Terraform Sync", - "routes": [ - { - "title": "v0.6.x", - "path": "release-notes/consul-terraform-sync/v0_6_x" - }, - { - "title": "v0.5.x", - "path": "release-notes/consul-terraform-sync/v0_5_x" - } - ] - } - ] - }, - { - "title": "Upgrade", - "routes": [ - { - "title": "Overview", - "path": "upgrading" - }, - { - "title": "Compatibility Promise", - "path": "upgrading/compatibility" - }, - { - "title": "Specific Version Details", - "path": "upgrading/upgrade-specific" - }, - { - "title": "Upgrade Instructions", - "routes": [ - { - "title": "Overview", - "path": "upgrading/instructions" - }, - { - "title": "General Process", - "path": "upgrading/instructions/general-process" - }, - { - "title": "Upgrading to Latest 1.2.x", - "path": "upgrading/instructions/upgrade-to-1-2-x" - }, - { - "title": "Upgrading to Latest 1.6.x", - "path": "upgrading/instructions/upgrade-to-1-6-x" - }, - { - "title": "Upgrading to Latest 1.8.x", - "path": "upgrading/instructions/upgrade-to-1-8-x" - }, - { - "title": "Upgrading to Latest 1.10.x", - "path": "upgrading/instructions/upgrade-to-1-10-x" - } - ] - } - ] - }, - { - "title": "Troubleshoot", - "routes": [ - { - "title": "Common Error Messages", - "path": "troubleshoot/common-errors" - }, - { - "title": "FAQ", - "path": "troubleshoot/faq" - } - ] - }, - { - "title": "Consul Tools", - "path": "download-tools" - }, { "title": "Internals", "hidden": true, From 82c1190cc040c04293ce3d9887eb90311fadcd00 Mon Sep 17 00:00:00 2001 From: Tu Nguyen Date: Tue, 23 Aug 2022 13:00:04 -0700 Subject: [PATCH 088/104] more reshuffling --- website/data/docs-nav-data.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index acaf67628..2b889364d 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -672,7 +672,7 @@ }, { "title": "Consul Tools", - "path": "download-tools" + "path": "integrate/download-tools" } ] }, From a6aa7c30a92031940582d75b39e113fe0bcae6e7 Mon Sep 17 00:00:00 2001 From: Tu Nguyen Date: Tue, 23 Aug 2022 13:04:21 -0700 Subject: [PATCH 089/104] more more reshuffling --- website/data/docs-nav-data.json | 3 --- 1 file changed, 3 deletions(-) diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 2b889364d..3b8f0281f 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -1317,9 +1317,6 @@ } ] }, - { - "divider": true - }, { "title": "Internals", "hidden": true, From 60ed09fa899f5ba5cfc6da6a70d4e26b2df4c99b Mon Sep 17 00:00:00 2001 From: Rosemary Wang <915624+joatmon08@users.noreply.github.com> Date: Tue, 23 Aug 2022 17:52:03 -0400 Subject: [PATCH 090/104] Clarify transparent proxy documentation (#14301) * Clarify transparent proxy documentation Some confusion over known limitations for transparent proxy, specifically over federation versus cluster peering. Updated `KubeDNS` to Kubernetes DNS for consistency with Kubernetes documentation. Co-authored-by: David Yu Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> --- .../docs/connect/cluster-peering/k8s.mdx | 9 ++-- .../docs/connect/transparent-proxy.mdx | 44 +++++++++++++------ 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/website/content/docs/connect/cluster-peering/k8s.mdx b/website/content/docs/connect/cluster-peering/k8s.mdx index 7471efed8..35f17959c 100644 --- a/website/content/docs/connect/cluster-peering/k8s.mdx +++ b/website/content/docs/connect/cluster-peering/k8s.mdx @@ -132,7 +132,7 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a ## Export services between clusters -1. For the service in "cluster-02" that you want to export, add the following [annotations](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) to your service's pods. +1. For the service in "cluster-02" that you want to export, add the following [annotation](/docs/k8s/annotations-and-labels) to your service's pods. @@ -140,7 +140,6 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a ##… annotations: "consul.hashicorp.com/connect-inject": "true" - "consul.hashicorp.com/transparent-proxy": "false" ##… ``` @@ -207,8 +206,6 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a ##… annotations: "consul.hashicorp.com/connect-inject": "true" - "consul.hashicorp.com/transparent-proxy": "false" - "consul.hashicorp.com/connect-service-upstreams": "backend-service.svc.cluster-02.peer:1234" ##… ``` @@ -220,10 +217,10 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a $ kubectl apply --filename frontend-service.yml ``` -1. Run the following command and check the output to confirm that you peered your clusters successfully. +1. Run the following command in `frontend-service` and check the output to confirm that you peered your clusters successfully. ```shell-session - $ curl localhost:1234 + $ kubectl exec -it $(kubectl get pod -l app=frontend -o name) -- curl localhost:1234 { "name": "backend-service", ##… diff --git a/website/content/docs/connect/transparent-proxy.mdx b/website/content/docs/connect/transparent-proxy.mdx index 6e3353bba..57ad48ba7 100644 --- a/website/content/docs/connect/transparent-proxy.mdx +++ b/website/content/docs/connect/transparent-proxy.mdx @@ -31,7 +31,7 @@ With transparent proxy: 1. Local upstreams are inferred from service intentions and peered upstreams are inferred from imported services, so no explicit configuration is needed. -1. Outbound connections pointing to a KubeDNS name "just work" — network rules +1. Outbound connections pointing to a Kubernetes DNS record "just work" — network rules redirect them through the proxy. 1. Inbound traffic is forced to go through the proxy to prevent unauthorized direct access to the application. @@ -160,27 +160,43 @@ configure exceptions on a per-Pod basis. The following Pod annotations allow you - [`consul.hashicorp.com/transparent-proxy-exclude-uids`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-uids) +### Dialing Services Across Kubernetes Clusters + +- You cannot use transparent proxy in a deployment configuration with [federation between Kubernetes clusters](/docs/k8s/installation/multi-cluster/kubernetes). + Instead, services in one Kubernetes cluster must explicitly dial a service to a Consul datacenter in another Kubernetes cluster using the + [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) + annotation. For example, an annotation of + `"consul.hashicorp.com/connect-service-upstreams": "my-service:1234:dc2"` reaches an upstream service called `my-service` + in the datacenter `dc2` on port `1234`. + +- You cannot use transparent proxy in a deployment configuration with a + [single Consul datacenter spanning multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s). Instead, + services in one Kubernetes cluster must explicitly dial a service in another Kubernetes cluster using the + [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) + annotation. For example, an annotation of + `"consul.hashicorp.com/connect-service-upstreams": "my-service:1234"`, + reaches an upstream service called `my-service` in another Kubernetes cluster and on port `1234`. + Although transparent proxy is enabled, Kubernetes DNS is not utilized when communicating between services that exist on separate Kubernetes clusters. + +- In a deployment configuration with [cluster peering](/docs/connect/cluster-peering), + transparent proxy is fully supported and thus dialing services explicitly is not required. + + ## Known Limitations -* Traffic can only be transparently proxied when the address dialed corresponds to the address of a service in the -transparent proxy's datacenter. Services can also dial explicit upstreams in other datacenters without transparent proxy, for example, by adding an -[annotation](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) such as -`"consul.hashicorp.com/connect-service-upstreams": "my-service:1234:dc2"` to reach an upstream service called `my-service` -in the datacenter `dc2`. -* In the deployment configuration where a [single Consul datacenter spans multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s), services in one Kubernetes cluster must explicitly dial a service in another Kubernetes cluster using the [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) annotation. An example would be -`"consul.hashicorp.com/connect-service-upstreams": "my-service:1234"`, where `my-service` is the service that exists in another Kubernetes cluster and is exposed on port `1234`. Although Transparent Proxy is enabled, KubeDNS is not utilized when communicating between services existing on separate Kubernetes clusters. +- Deployment configurations with federation across or a single datacenter spanning multiple clusters must explicitly dial a + service in another datacenter or cluster using annotations. -* When dialing headless services, the request will be proxied using a plain TCP - proxy. The upstream's protocol is not considered. +- When dialing headless services, the request is proxied using a plain TCP proxy. The upstream's protocol is not considered. ## Using Transparent Proxy In Kubernetes, services can reach other services via their -[KubeDNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) address or via Pod IPs, and that +[Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) address or through Pod IPs, and that traffic will be transparently sent through the proxy. Connect services in Kubernetes are required to have a Kubernetes service selecting the Pods. -~> Note: In order to use KubeDNS, the Kubernetes service name will need to match the Consul service name. This will be the +~> **Note**: In order to use Kubernetes DNS, the Kubernetes service name needs to match the Consul service name. This is the case by default, unless the service Pods have the annotation `consul.hashicorp.com/connect-service` overriding the Consul service name. @@ -192,7 +208,7 @@ inbound and outbound listener on the sidecar proxy. The proxy will be configured appropriate upstream services based on [Service Intentions](/docs/connect/config-entries/service-intentions). This means Connect services no longer need to use the `consul.hashicorp.com/connect-service-upstreams` annotation to configure upstreams explicitly. Once the -Service Intentions are set, they can simply address the upstream services using KubeDNS. +Service Intentions are set, they can simply address the upstream services using Kubernetes DNS. As of Consul-k8s >= `0.26.0` and Consul-helm >= `0.32.0`, a Kubernetes service that selects application pods is required for Connect applications, i.e: @@ -213,7 +229,7 @@ spec: In the example above, if another service wants to reach `sample-app` via transparent proxying, it can dial `sample-app.default.svc.cluster.local`, using -[KubeDNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/). +[Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/). If ACLs with default "deny" policy are enabled, it also needs a [ServiceIntention](/docs/connect/config-entries/service-intentions) allowing it to talk to `sample-app`. From 4b3a39c04d3db7ebb9003690a06b36921eb31e78 Mon Sep 17 00:00:00 2001 From: twunderlich-grapl <88346193+twunderlich-grapl@users.noreply.github.com> Date: Tue, 23 Aug 2022 20:06:00 -0400 Subject: [PATCH 091/104] Clarify docs around using either Consul or Vault managed PKI paths (#13295) * Clarify docs around using either Consul or Vault managed PKI paths The current docs can be misread to indicate that you need both the Consul and Vault managed PKI Paths policies. The [Learning Tutorial](https://learn.hashicorp.com/tutorials/consul/vault-pki-consul-connect-ca?in=consul/vault-secure#create-vault-policies) is clearer. This tries to make the original docs as clear as the learning tutorial * Clarify that PKI secret engines are used to store certs Co-authored-by: Blake Covarrubias --- website/content/docs/connect/ca/vault.mdx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/content/docs/connect/ca/vault.mdx b/website/content/docs/connect/ca/vault.mdx index e0a9daa6e..e563a6d83 100644 --- a/website/content/docs/connect/ca/vault.mdx +++ b/website/content/docs/connect/ca/vault.mdx @@ -201,6 +201,8 @@ If the paths already exist, Consul will use them as configured. ## Vault ACL Policies +Vault PKI can be managed by either Consul or by Vault. If you want to manually create and tune the PKI secret engines used to store the root and intermediate certificates, use Vault Managed PKI Paths. If you want to have the PKI automatically managed for you, use Consul Managed PKI Paths. + ### Vault Managed PKI Paths The following Vault policy allows Consul to use pre-existing PKI paths in Vault. From 20c87d235fc349c1c7dd29096b34aecca22c84a3 Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Wed, 24 Aug 2022 12:03:15 +0100 Subject: [PATCH 092/104] dataplane: update envoy bootstrap params for consul-dataplane (#14017) Contains 2 changes to the GetEnvoyBootstrapParams response to support consul-dataplane. Exposing node_name and node_id: consul-dataplane will support providing either the node_id or node_name in its configuration. Unfortunately, supporting both in the xDS meta adds a fair amount of complexity (partly because most tables are currently indexed on node_name) so for now we're going to return them both from the bootstrap params endpoint, allowing consul-dataplane to exchange a node_id for a node_name (which it will supply in the xDS meta). Properly setting service for gateways: To avoid the need to special case gateways in consul-dataplane, service will now either be the destination service name for connect proxies, or the gateway service name. This means it can be used as-is in Envoy configuration (i.e. as a cluster name or in metric tags). --- agent/consul/state/catalog.go | 3 + agent/consul/state/catalog_test.go | 7 +- .../dataplane/get_envoy_bootstrap_params.go | 10 +- ....go => get_envoy_bootstrap_params_test.go} | 10 +- proto-public/pbdataplane/dataplane.pb.go | 151 ++++++++++-------- proto-public/pbdataplane/dataplane.proto | 7 +- 6 files changed, 118 insertions(+), 70 deletions(-) rename agent/grpc-external/services/dataplane/{get_envoy_boostrap_params_test.go => get_envoy_bootstrap_params_test.go} (96%) diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 258519d5b..f9483a313 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -1717,6 +1717,9 @@ func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.Ent if err != nil { return 0, nil, fmt.Errorf("failed querying service for node %q: %w", node.Node, err) } + if service != nil { + service.ID = node.ID + } return idx, service, nil } diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index 10e7af6db..1e096d136 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -270,17 +270,20 @@ func TestStateStore_EnsureRegistration(t *testing.T) { require.Equal(t, uint64(2), idx) require.Equal(t, svcmap["redis1"], r) + exp := svcmap["redis1"].ToServiceNode("node1") + exp.ID = nodeID + // lookup service by node name idx, sn, err := s.ServiceNode("", "node1", "redis1", nil, peerName) require.NoError(t, err) require.Equal(t, uint64(2), idx) - require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) + require.Equal(t, exp, sn) // lookup service by node ID idx, sn, err = s.ServiceNode(string(nodeID), "", "redis1", nil, peerName) require.NoError(t, err) require.Equal(t, uint64(2), idx) - require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) + require.Equal(t, exp, sn) // lookup service by invalid node _, _, err = s.ServiceNode("", "invalid-node", "redis1", nil, peerName) diff --git a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go index bed302d12..b320559e9 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go @@ -52,13 +52,21 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G } // Build out the response + var serviceName string + if svc.ServiceKind == structs.ServiceKindConnectProxy { + serviceName = svc.ServiceProxy.DestinationServiceName + } else { + serviceName = svc.ServiceName + } resp := &pbdataplane.GetEnvoyBootstrapParamsResponse{ - Service: svc.ServiceProxy.DestinationServiceName, + Service: serviceName, Partition: svc.EnterpriseMeta.PartitionOrDefault(), Namespace: svc.EnterpriseMeta.NamespaceOrDefault(), Datacenter: s.Datacenter, ServiceKind: convertToResponseServiceKind(svc.ServiceKind), + NodeName: svc.Node, + NodeId: string(svc.ID), } bootstrapConfig, err := structpb.NewStruct(svc.ServiceProxy.Config) diff --git a/agent/grpc-external/services/dataplane/get_envoy_boostrap_params_test.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go similarity index 96% rename from agent/grpc-external/services/dataplane/get_envoy_boostrap_params_test.go rename to agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go index c3b4fd146..aa42b0bf1 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_boostrap_params_test.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go @@ -97,14 +97,20 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) { resp, err := client.GetEnvoyBootstrapParams(ctx, req) require.NoError(t, err) - require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service) + if tc.registerReq.Service.IsGateway() { + require.Equal(t, tc.registerReq.Service.Service, resp.Service) + } else { + require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service) + } + require.Equal(t, serverDC, resp.Datacenter) require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition) require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace) require.Contains(t, resp.Config.Fields, proxyConfigKey) require.Equal(t, structpb.NewStringValue(proxyConfigValue), resp.Config.Fields[proxyConfigKey]) require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind) - + require.Equal(t, tc.registerReq.Node, resp.NodeName) + require.Equal(t, string(tc.registerReq.ID), resp.NodeId) } testCases := []testCase{ diff --git a/proto-public/pbdataplane/dataplane.pb.go b/proto-public/pbdataplane/dataplane.pb.go index 1da1eea15..8e8a1000f 100644 --- a/proto-public/pbdataplane/dataplane.pb.go +++ b/proto-public/pbdataplane/dataplane.pb.go @@ -401,12 +401,17 @@ type GetEnvoyBootstrapParamsResponse struct { unknownFields protoimpl.UnknownFields ServiceKind ServiceKind `protobuf:"varint,1,opt,name=service_kind,json=serviceKind,proto3,enum=hashicorp.consul.dataplane.ServiceKind" json:"service_kind,omitempty"` - // The destination service name + // service is be used to identify the service (as the local cluster name and + // in metric tags). If the service is a connect proxy it will be the name of + // the proxy's destination service, for gateways it will be the gateway + // service's name. Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` Partition string `protobuf:"bytes,4,opt,name=partition,proto3" json:"partition,omitempty"` Datacenter string `protobuf:"bytes,5,opt,name=datacenter,proto3" json:"datacenter,omitempty"` Config *structpb.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"` + NodeId string `protobuf:"bytes,7,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeName string `protobuf:"bytes,8,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` } func (x *GetEnvoyBootstrapParamsResponse) Reset() { @@ -483,6 +488,20 @@ func (x *GetEnvoyBootstrapParamsResponse) GetConfig() *structpb.Struct { return nil } +func (x *GetEnvoyBootstrapParamsResponse) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + +func (x *GetEnvoyBootstrapParamsResponse) GetNodeName() string { + if x != nil { + return x.NodeName + } + return "" +} + var File_proto_public_pbdataplane_dataplane_proto protoreflect.FileDescriptor var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{ @@ -525,7 +544,7 @@ var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x94, + 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0xca, 0x02, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6b, 0x69, @@ -543,69 +562,73 @@ var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{ 0x6e, 0x74, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a, 0xc7, 0x01, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x44, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x2a, 0xc7, 0x01, 0x0a, 0x11, + 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, + 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, + 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x57, 0x41, 0x54, 0x43, + 0x48, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x32, 0x0a, 0x2e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, - 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x24, 0x0a, 0x20, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, - 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x57, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x32, 0x0a, 0x2e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, - 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x44, 0x47, 0x45, - 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, - 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x34, 0x0a, 0x30, 0x44, 0x41, 0x54, - 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, - 0x45, 0x4e, 0x56, 0x4f, 0x59, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, - 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a, - 0xcc, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, - 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, - 0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x59, - 0x50, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, - 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49, - 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4d, 0x45, 0x53, 0x48, 0x5f, 0x47, 0x41, 0x54, - 0x45, 0x57, 0x41, 0x59, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, - 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49, - 0x4e, 0x47, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x47, - 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x05, 0x32, 0xd2, - 0x02, 0x0a, 0x10, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x40, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, - 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x94, 0x01, 0x0a, - 0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, - 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, - 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, - 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x42, 0xf0, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, - 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xa2, 0x02, - 0x03, 0x48, 0x43, 0x44, 0xaa, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0xca, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xe2, 0x02, - 0x26, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x44, 0x61, 0x74, - 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x53, 0x5f, 0x45, 0x44, 0x47, 0x45, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, + 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, + 0x34, 0x0a, 0x30, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, + 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x4e, 0x56, 0x4f, 0x59, 0x5f, 0x42, 0x4f, 0x4f, 0x54, + 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a, 0xcc, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, + 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, + 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1e, 0x0a, + 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, + 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4d, 0x45, + 0x53, 0x48, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x45, 0x52, + 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, + 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, + 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, + 0x41, 0x59, 0x10, 0x05, 0x32, 0xd2, 0x02, 0x0a, 0x10, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x1d, 0x47, 0x65, + 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x40, 0x2e, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, + 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x94, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3a, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, + 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, + 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xf0, 0x01, 0x0a, 0x1e, 0x63, 0x6f, + 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x0e, 0x44, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x44, 0xaa, 0x02, 0x1a, 0x48, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xca, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0xe2, 0x02, 0x26, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, + 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto-public/pbdataplane/dataplane.proto b/proto-public/pbdataplane/dataplane.proto index 0502dcd70..cc95f3a51 100644 --- a/proto-public/pbdataplane/dataplane.proto +++ b/proto-public/pbdataplane/dataplane.proto @@ -68,12 +68,17 @@ enum ServiceKind { message GetEnvoyBootstrapParamsResponse { ServiceKind service_kind = 1; - // The destination service name + // service is be used to identify the service (as the local cluster name and + // in metric tags). If the service is a connect proxy it will be the name of + // the proxy's destination service, for gateways it will be the gateway + // service's name. string service = 2; string namespace = 3; string partition = 4; string datacenter = 5; google.protobuf.Struct config = 6; + string node_id = 7; + string node_name = 8; } service DataplaneService { From f3e50ea5ee0a0b3d35fd19d50f20dc7f60555a95 Mon Sep 17 00:00:00 2001 From: Tyler Wendlandt Date: Wed, 24 Aug 2022 06:44:01 -0600 Subject: [PATCH 093/104] ui: Replace file-mask with file-text icon usage on policy list (#14275) --- ui/.gitignore | 1 + ui/packages/consul-ui/app/components/composite-row/index.scss | 2 +- ui/packages/consul-ui/app/styles/base/icons/icons/index.scss | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ui/.gitignore b/ui/.gitignore index 08df27ddb..6bd9a0135 100644 --- a/ui/.gitignore +++ b/ui/.gitignore @@ -12,6 +12,7 @@ node_modules .pnp* .sass-cache .DS_Store +.tool-versions connect.lock coverage coverage_* diff --git a/ui/packages/consul-ui/app/components/composite-row/index.scss b/ui/packages/consul-ui/app/components/composite-row/index.scss index bd66491a7..1dce70e4b 100644 --- a/ui/packages/consul-ui/app/components/composite-row/index.scss +++ b/ui/packages/consul-ui/app/components/composite-row/index.scss @@ -95,7 +95,7 @@ } %composite-row-detail .policy::before { - @extend %with-file-fill-mask, %as-pseudo; + @extend %with-file-text-mask, %as-pseudo; margin-right: 3px; } %composite-row-detail .role::before { diff --git a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss index 9d1a5efe3..20f57edc7 100644 --- a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss +++ b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss @@ -330,7 +330,7 @@ // @import './file-minus/index.scss'; // @import './file-plus/index.scss'; // @import './file-source/index.scss'; -// @import './file-text/index.scss'; +@import './file-text/index.scss'; // @import './file-x/index.scss'; // @import './files/index.scss'; // @import './film/index.scss'; From bb56a3ee502541557116109b8126c24a0ad84b53 Mon Sep 17 00:00:00 2001 From: DanStough Date: Fri, 19 Aug 2022 16:51:11 -0400 Subject: [PATCH 094/104] doc: tproxy destination fixes --- .../config-entries/terminating-gateway.mdx | 5 +-- .../docs/k8s/connect/terminating-gateways.mdx | 36 +++++++++++-------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/website/content/docs/connect/config-entries/terminating-gateway.mdx b/website/content/docs/connect/config-entries/terminating-gateway.mdx index 3692eff1e..c406c5687 100644 --- a/website/content/docs/connect/config-entries/terminating-gateway.mdx +++ b/website/content/docs/connect/config-entries/terminating-gateway.mdx @@ -153,8 +153,9 @@ spec: Link gateway named "us-west-gateway" with the billing service, and specify a CA file to be used for one-way TLS authentication. --> **Note**: The `CAFile` parameter must be specified _and_ point to a valid CA -bundle in order to properly initiate a TLS connection to the destination service. +-> **Note**: When not using destinations in transparent proxy mode, you must specify the `CAFile` parameter +and point to a valid CA bundle in order to properly initiate a TLS +connection to the destination service. For more information about configuring a gateway for destinations, refer to [Register an External Service as a Destination](/docs/k8s/connect/terminating-gateways#register-an-external-service-as-a-destination). diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index 13da908b4..e82bd773f 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -89,13 +89,13 @@ Registering the external services with Consul is a multi-step process: ### Register external services with Consul There are two ways to register an external service with Consul: -1. If [`TransparentProxy`](/docs/k8s/helm#v-connectinject-transparentproxy) is enabled, you can declare external endpoints in the [`Destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of `service-defaults`. +1. If [`TransparentProxy`](/docs/connect/transparent-proxy) is enabled, the preferred method is to declare external endpoints in the [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of `ServiceDefaults`. 1. You can add the service as a node in the Consul catalog. -#### Register an external service as a Destination +#### Register an external service as a destination -`Destination` fields allow clients to dial the external service directly and are valid only in [`TransparentProxy`](/docs/k8s/helm#v-connectinject-transparentproxy) mode. -The following table describes traffic behaviors when using `Destination`s to route traffic through a terminating gateway: +The [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of the `ServiceDefaults` Custom Resource Definition (CRD) allows clients to dial the external service directly. It is valid only in [`TransparentProxy`](/docs/connect/transparent-proxy)) mode. +The following table describes traffic behaviors when using `destination`s to route traffic through a terminating gateway: | External Services Layer | Client dials | Client uses TLS | Allowed | Notes | |---|---|---|---|---| @@ -109,11 +109,13 @@ The following table describes traffic behaviors when using `Destination`s to rou | L7 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. | You can provide a `caFile` to secure traffic between unencrypted clients that connect to external services through the terminating gateway. -Refer to [Create the configuration entry for the terminating gateway](/docs/k8s/connect/terminating-gateways#create-the-configuration-entry-for-the-terminating-gateway) for details. +Refer to [Create the configuration entry for the terminating gateway](#create-the-configuration-entry-for-the-terminating-gateway) for details. -Create a `service-defaults` custom resource for the external service: +Also note that regardless of the `protocol` specified in the `ServiceDefaults`, [L7 intentions](/docs/connect/config-entries/service-intentions#permissions) are not currently supported with `ServiceDefaults` destinations. - +Create a `ServiceDefaults` custom resource for the external service: + + ```yaml apiVersion: consul.hashicorp.com/v1alpha1 @@ -133,10 +135,10 @@ Create a `service-defaults` custom resource for the external service: Apply the `ServiceDefaults` resource with `kubectl apply`: ```shell-session -$ kubectl apply --filename service-defaults.yaml +$ kubectl apply --filename serviceDefaults.yaml ``` -All other terminating gateway operations can use the name of the `service-defaults` in place of a typical Consul service name. +All other terminating gateway operations can use the name of the `ServiceDefaults` in place of a typical Consul service name. #### Register an external service as a Catalog Node @@ -261,11 +263,13 @@ spec: --> **NOTE**: If TLS is enabled for external services registered through the Consul catalog, you must include the `caFile` parameter that points to the system trust store of the terminating gateway container. +If TLS is enabled for external services registered through the Consul catalog and you are not using [transparent proxy `destination`](#register-an-external-service-as-a-destination), you must include the [`caFile`](/docs/connect/config-entries/terminating-gateway#cafile) parameter that points to the system trust store of the terminating gateway container. By default, the trust store is located in the `/etc/ssl/certs/ca-certificates.crt` directory. -Configure the `caFile` parameter to point to the `/etc/ssl/cert.pem` directory if TLS is enabled and you are using one of the following components: - * Consul Helm chart 0.43 or older - * Or an Envoy image with an alpine base image +Configure the [`caFile`](https://www.consul.io/docs/connect/config-entries/terminating-gateway#cafile) parameter in the `TerminatingGateway` config entry to point to the `/etc/ssl/cert.pem` directory if TLS is enabled and you are using one of the following components: +- Consul Helm chart 0.43 or older +- An Envoy image with an alpine base image + +For `ServiceDefaults` destinations, refer to [Register an external service as a destination](#register-an-external-service-as-a-destination). Apply the `TerminatingGateway` resource with `kubectl apply`: @@ -273,7 +277,7 @@ Apply the `TerminatingGateway` resource with `kubectl apply`: $ kubectl apply --filename terminating-gateway.yaml ``` -If using ACLs and TLS, create a [`ServiceIntentions`](/docs/connect/config-entries/service-intentions) resource to allow access from services in the mesh to the external service +If using ACLs and TLS, create a [`ServiceIntentions`](/docs/connect/config-entries/service-intentions) resource to allow access from services in the mesh to the external service: @@ -292,6 +296,8 @@ spec: +-> **NOTE**: [L7 Intentions](/docs/connect/config-entries/service-intentions#permissions) are not currently supported for `ServiceDefaults` destinations. + Apply the `ServiceIntentions` resource with `kubectl apply`: ```shell-session @@ -372,7 +378,7 @@ $ kubectl exec deploy/static-client -- curl -vvvs --header "Host: example-https. - + ```shell-session $ kubectl exec deploy/static-client -- curl -vvvs https://example.com/ From 4188769c320010cf30b4e25a6f5ccda0e7739b98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Ruiz=20Garc=C3=ADa?= Date: Wed, 24 Aug 2022 18:31:38 +0200 Subject: [PATCH 095/104] Added new auto_encrypt.grpc_server_tls config option to control AutoTLS enabling of GRPC Server's TLS usage Fix for #14253 Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- agent/config/builder.go | 8 +- agent/config/config.go | 1 + agent/config/runtime_test.go | 103 ++++++++++++++++-- .../TestRuntimeConfig_Sanitize.golden | 11 +- agent/config/testdata/full-config.hcl | 1 + agent/config/testdata/full-config.json | 3 +- agent/grpc-external/server.go | 5 +- tlsutil/config.go | 20 +++- tlsutil/config_test.go | 41 +++++-- .../docs/agent/config/config-files.mdx | 2 + 10 files changed, 162 insertions(+), 33 deletions(-) diff --git a/agent/config/builder.go b/agent/config/builder.go index 40389553d..960d86ea4 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -2531,10 +2531,9 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza") } - // TLS is only enabled on the gRPC listener if there's an HTTPS port configured - // for historic and backwards-compatibility reasons. - if rt.HTTPSPort <= 0 && (t.GRPC != TLSProtocolConfig{} && t.GRPCModifiedByDeprecatedConfig == nil) { - b.warn("tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)") + // And UseAutoCert right now only applies to external gRPC interface. + if t.Defaults.UseAutoCert != nil || t.HTTPS.UseAutoCert != nil || t.InternalRPC.UseAutoCert != nil { + return c, errors.New("use_auto_cert is only valid in the tls.grpc stanza") } defaultTLSMinVersion := b.tlsVersion("tls.defaults.tls_min_version", t.Defaults.TLSMinVersion) @@ -2591,6 +2590,7 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error mapCommon("https", t.HTTPS, &c.HTTPS) mapCommon("grpc", t.GRPC, &c.GRPC) + c.GRPC.UseAutoCert = boolValWithDefault(t.GRPC.UseAutoCert, false) c.ServerName = rt.ServerName c.NodeName = rt.NodeName diff --git a/agent/config/config.go b/agent/config/config.go index 145c74db7..2d21e75da 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -867,6 +867,7 @@ type TLSProtocolConfig struct { VerifyIncoming *bool `mapstructure:"verify_incoming"` VerifyOutgoing *bool `mapstructure:"verify_outgoing"` VerifyServerHostname *bool `mapstructure:"verify_server_hostname"` + UseAutoCert *bool `mapstructure:"use_auto_cert"` } type TLS struct { diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index e0266811e..f5e9bd335 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -5516,7 +5516,70 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { }, }) run(t, testCase{ - desc: "tls.grpc without ports.https", + desc: "tls.grpc.use_auto_cert defaults to false", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + "tls": { + "grpc": {} + } + } + `}, + hcl: []string{` + tls { + grpc {} + } + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.TLS.Domain = "consul." + rt.TLS.NodeName = "thehostname" + rt.TLS.GRPC.UseAutoCert = false + }, + }) + run(t, testCase{ + desc: "tls.grpc.use_auto_cert defaults to false (II)", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + "tls": {} + } + `}, + hcl: []string{` + tls { + } + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.TLS.Domain = "consul." + rt.TLS.NodeName = "thehostname" + rt.TLS.GRPC.UseAutoCert = false + }, + }) + run(t, testCase{ + desc: "tls.grpc.use_auto_cert defaults to false (III)", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + } + `}, + hcl: []string{` + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.TLS.Domain = "consul." + rt.TLS.NodeName = "thehostname" + rt.TLS.GRPC.UseAutoCert = false + }, + }) + run(t, testCase{ + desc: "tls.grpc.use_auto_cert enabled when true", args: []string{ `-data-dir=` + dataDir, }, @@ -5524,7 +5587,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { { "tls": { "grpc": { - "cert_file": "cert-1234" + "use_auto_cert": true } } } @@ -5532,20 +5595,43 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { hcl: []string{` tls { grpc { - cert_file = "cert-1234" + use_auto_cert = true } } `}, expected: func(rt *RuntimeConfig) { rt.DataDir = dataDir - rt.TLS.Domain = "consul." rt.TLS.NodeName = "thehostname" - - rt.TLS.GRPC.CertFile = "cert-1234" + rt.TLS.GRPC.UseAutoCert = true }, - expectedWarnings: []string{ - "tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)", + }) + run(t, testCase{ + desc: "tls.grpc.use_auto_cert disabled when false", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + "tls": { + "grpc": { + "use_auto_cert": false + } + } + } + `}, + hcl: []string{` + tls { + grpc { + use_auto_cert = false + } + } + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.TLS.Domain = "consul." + rt.TLS.NodeName = "thehostname" + rt.TLS.GRPC.UseAutoCert = false }, }) } @@ -6340,6 +6426,7 @@ func TestLoad_FullConfig(t *testing.T) { TLSMinVersion: types.TLSv1_0, CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA}, VerifyOutgoing: false, + UseAutoCert: true, }, HTTPS: tlsutil.ProtocolConfig{ VerifyIncoming: true, diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 09ecd4cfe..8f91743db 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -374,7 +374,8 @@ "TLSMinVersion": "", "VerifyIncoming": false, "VerifyOutgoing": false, - "VerifyServerHostname": false + "VerifyServerHostname": false, + "UseAutoCert": false }, "HTTPS": { "CAFile": "", @@ -385,7 +386,8 @@ "TLSMinVersion": "", "VerifyIncoming": false, "VerifyOutgoing": false, - "VerifyServerHostname": false + "VerifyServerHostname": false, + "UseAutoCert": false }, "InternalRPC": { "CAFile": "", @@ -396,7 +398,8 @@ "TLSMinVersion": "", "VerifyIncoming": false, "VerifyOutgoing": false, - "VerifyServerHostname": false + "VerifyServerHostname": false, + "UseAutoCert": false }, "NodeName": "", "ServerName": "" @@ -466,4 +469,4 @@ "VersionMetadata": "", "VersionPrerelease": "", "Watches": [] -} \ No newline at end of file +} diff --git a/agent/config/testdata/full-config.hcl b/agent/config/testdata/full-config.hcl index ed8203296..305df9b89 100644 --- a/agent/config/testdata/full-config.hcl +++ b/agent/config/testdata/full-config.hcl @@ -697,6 +697,7 @@ tls { tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" tls_min_version = "TLSv1_0" verify_incoming = true + use_auto_cert = true } } tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" diff --git a/agent/config/testdata/full-config.json b/agent/config/testdata/full-config.json index 8294a27b7..bc72c2955 100644 --- a/agent/config/testdata/full-config.json +++ b/agent/config/testdata/full-config.json @@ -692,7 +692,8 @@ "key_file": "1y4prKjl", "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "tls_min_version": "TLSv1_0", - "verify_incoming": true + "verify_incoming": true, + "use_auto_cert": true } }, "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", diff --git a/agent/grpc-external/server.go b/agent/grpc-external/server.go index 751cca91c..4ae8c6d65 100644 --- a/agent/grpc-external/server.go +++ b/agent/grpc-external/server.go @@ -1,12 +1,13 @@ package external import ( + "time" + middleware "github.com/grpc-ecosystem/go-grpc-middleware" recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" - "time" agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware" "github.com/hashicorp/consul/tlsutil" @@ -34,7 +35,7 @@ func NewServer(logger agentmiddleware.Logger, tls *tlsutil.Configurator) *grpc.S MinTime: 15 * time.Second, }), } - if tls != nil && tls.GRPCTLSConfigured() { + if tls != nil && tls.GRPCServerUseTLS() { creds := credentials.NewTLS(tls.IncomingGRPCConfig()) opts = append(opts, grpc.Creds(creds)) } diff --git a/tlsutil/config.go b/tlsutil/config.go index 7c9e6d2ad..2e1614165 100644 --- a/tlsutil/config.go +++ b/tlsutil/config.go @@ -102,6 +102,10 @@ type ProtocolConfig struct { // // Note: this setting only applies to the Internal RPC configuration. VerifyServerHostname bool + + // UseAutoCert is used to enable usage of auto_encrypt/auto_config generated + // certificate & key material on external gRPC listener. + UseAutoCert bool } // Config configures the Configurator. @@ -167,6 +171,10 @@ type protocolConfig struct { // combinedCAPool is a pool containing both manualCAPEMs and the certificates // received from auto-config/auto-encrypt. combinedCAPool *x509.CertPool + + // useAutoCert indicates wether we should use auto-encrypt/config data + // for TLS server/listener. NOTE: Only applies to external GRPC Server. + useAutoCert bool } // Configurator provides tls.Config and net.Dial wrappers to enable TLS for @@ -323,6 +331,7 @@ func (c *Configurator) loadProtocolConfig(base Config, pc ProtocolConfig) (*prot manualCAPEMs: pems, manualCAPool: manualPool, combinedCAPool: combinedPool, + useAutoCert: pc.UseAutoCert, }, nil } @@ -620,16 +629,15 @@ func (c *Configurator) Cert() *tls.Certificate { return cert } -// GRPCTLSConfigured returns whether there's a TLS certificate configured for -// gRPC (either manually or by auto-config/auto-encrypt). It is checked, along -// with the presence of an HTTPS port, to determine whether to enable TLS on -// incoming gRPC connections. +// GRPCServerUseTLS returns whether there's a TLS certificate configured for +// (external) gRPC (either manually or by auto-config/auto-encrypt), and use +// of TLS for gRPC has not been explicitly disabled at auto-encrypt. // // This function acquires a read lock because it reads from the config. -func (c *Configurator) GRPCTLSConfigured() bool { +func (c *Configurator) GRPCServerUseTLS() bool { c.lock.RLock() defer c.lock.RUnlock() - return c.grpc.cert != nil || c.autoTLS.cert != nil + return c.grpc.cert != nil || (c.grpc.useAutoCert && c.autoTLS.cert != nil) } // VerifyIncomingRPC returns true if we should verify incoming connnections to diff --git a/tlsutil/config_test.go b/tlsutil/config_test.go index 75fa83945..fc817aec6 100644 --- a/tlsutil/config_test.go +++ b/tlsutil/config_test.go @@ -1465,7 +1465,7 @@ func TestConfigurator_AuthorizeInternalRPCServerConn(t *testing.T) { }) } -func TestConfigurator_GRPCTLSConfigured(t *testing.T) { +func TestConfigurator_GRPCServerUseTLS(t *testing.T) { t.Run("certificate manually configured", func(t *testing.T) { c := makeConfigurator(t, Config{ GRPC: ProtocolConfig{ @@ -1473,22 +1473,47 @@ func TestConfigurator_GRPCTLSConfigured(t *testing.T) { KeyFile: "../test/hostname/Alice.key", }, }) - require.True(t, c.GRPCTLSConfigured()) + require.True(t, c.GRPCServerUseTLS()) }) - t.Run("AutoTLS", func(t *testing.T) { + t.Run("no certificate", func(t *testing.T) { + c := makeConfigurator(t, Config{}) + require.False(t, c.GRPCServerUseTLS()) + }) + + t.Run("AutoTLS (default)", func(t *testing.T) { c := makeConfigurator(t, Config{}) bobCert := loadFile(t, "../test/hostname/Bob.crt") bobKey := loadFile(t, "../test/hostname/Bob.key") require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey)) - - require.True(t, c.GRPCTLSConfigured()) + require.False(t, c.GRPCServerUseTLS()) }) - t.Run("no certificate", func(t *testing.T) { - c := makeConfigurator(t, Config{}) - require.False(t, c.GRPCTLSConfigured()) + t.Run("AutoTLS w/ UseAutoCert Disabled", func(t *testing.T) { + c := makeConfigurator(t, Config{ + GRPC: ProtocolConfig{ + UseAutoCert: false, + }, + }) + + bobCert := loadFile(t, "../test/hostname/Bob.crt") + bobKey := loadFile(t, "../test/hostname/Bob.key") + require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey)) + require.False(t, c.GRPCServerUseTLS()) + }) + + t.Run("AutoTLS w/ UseAutoCert Enabled", func(t *testing.T) { + c := makeConfigurator(t, Config{ + GRPC: ProtocolConfig{ + UseAutoCert: true, + }, + }) + + bobCert := loadFile(t, "../test/hostname/Bob.crt") + bobKey := loadFile(t, "../test/hostname/Bob.key") + require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey)) + require.True(t, c.GRPCServerUseTLS()) }) } diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index bf3e219be..263137873 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -2019,6 +2019,8 @@ specially crafted certificate signed by the CA can be used to gain full access t - `verify_incoming` - ((#tls_grpc_verify_incoming)) Overrides [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming). + - `use_auto_cert` - (Defaults to `false`) Enables or disables TLS on gRPC servers. Set to `true` to allow `auto_encrypt` TLS settings to apply to gRPC listeners. We recommend disabling TLS on gRPC servers if you are using `auto_encrypt` for other TLS purposes, such as enabling HTTPS. + - `https` ((#tls_https)) Provides settings for the HTTPS interface. To enable the HTTPS interface you must define a port via [`ports.https`](#https_port). From c0390284016370b537c111b58286bd07466ef888 Mon Sep 17 00:00:00 2001 From: skpratt Date: Wed, 24 Aug 2022 12:00:09 -0500 Subject: [PATCH 096/104] no-op: refactor usagemetrics tests for clarity and DRY cases (#14313) --- .../usagemetrics/usagemetrics_oss_test.go | 2067 +++++------------ 1 file changed, 560 insertions(+), 1507 deletions(-) diff --git a/agent/consul/usagemetrics/usagemetrics_oss_test.go b/agent/consul/usagemetrics/usagemetrics_oss_test.go index c860e5b74..8c37fe269 100644 --- a/agent/consul/usagemetrics/usagemetrics_oss_test.go +++ b/agent/consul/usagemetrics/usagemetrics_oss_test.go @@ -8,10 +8,11 @@ import ( "time" "github.com/armon/go-metrics" - uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/require" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" @@ -23,371 +24,368 @@ func newStateStore() (*state.Store, error) { return state.NewStateStore(nil), nil } +type testCase struct { + modfiyStateStore func(t *testing.T, s *state.Store) + getMembersFunc getMembersFunc + expectedGauges map[string]metrics.GaugeValue +} + +var baseCases = map[string]testCase{ + "empty-state": { + expectedGauges: map[string]metrics.GaugeValue{ + // --- node --- + "consul.usage.test.consul.state.nodes;datacenter=dc1": { + Name: "consul.usage.test.consul.state.nodes", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- peering --- + "consul.usage.test.consul.state.peerings;datacenter=dc1": { + Name: "consul.usage.test.consul.state.peerings", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- member --- + "consul.usage.test.consul.members.clients;datacenter=dc1": { + Name: "consul.usage.test.consul.members.clients", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + "consul.usage.test.consul.members.servers;datacenter=dc1": { + Name: "consul.usage.test.consul.members.servers", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- service --- + "consul.usage.test.consul.state.services;datacenter=dc1": { + Name: "consul.usage.test.consul.state.services", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + "consul.usage.test.consul.state.service_instances;datacenter=dc1": { + Name: "consul.usage.test.consul.state.service_instances", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- service mesh --- + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-proxy"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "terminating-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "mesh-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-native"}, + }, + }, + // --- kv --- + "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { + Name: "consul.usage.test.consul.state.kv_entries", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- config entries --- + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-intentions"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-resolver"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-router"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-defaults"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-splitter"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "mesh"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "proxy-defaults"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "terminating-gateway"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "exported-services"}, + }, + }, + }, + getMembersFunc: func() []serf.Member { return []serf.Member{} }, + }, + "nodes": { + modfiyStateStore: func(t *testing.T, s *state.Store) { + require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) + require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) + }, + getMembersFunc: func() []serf.Member { + return []serf.Member{ + { + Name: "foo", + Tags: map[string]string{"role": "consul"}, + Status: serf.StatusAlive, + }, + { + Name: "bar", + Tags: map[string]string{"role": "consul"}, + Status: serf.StatusAlive, + }, + } + }, + expectedGauges: map[string]metrics.GaugeValue{ + // --- node --- + "consul.usage.test.consul.state.nodes;datacenter=dc1": { + Name: "consul.usage.test.consul.state.nodes", + Value: 2, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- peering --- + "consul.usage.test.consul.state.peerings;datacenter=dc1": { + Name: "consul.usage.test.consul.state.peerings", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- member --- + "consul.usage.test.consul.members.servers;datacenter=dc1": { + Name: "consul.usage.test.consul.members.servers", + Value: 2, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + "consul.usage.test.consul.members.clients;datacenter=dc1": { + Name: "consul.usage.test.consul.members.clients", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- service --- + "consul.usage.test.consul.state.services;datacenter=dc1": { + Name: "consul.usage.test.consul.state.services", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + "consul.usage.test.consul.state.service_instances;datacenter=dc1": { + Name: "consul.usage.test.consul.state.service_instances", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- service mesh --- + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-proxy"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "terminating-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "mesh-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-native"}, + }, + }, + // --- kv --- + "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { + Name: "consul.usage.test.consul.state.kv_entries", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- config entries --- + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-intentions"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-resolver"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-router"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-defaults"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-splitter"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "mesh"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "proxy-defaults"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "terminating-gateway"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "exported-services"}, + }, + }, + }, + }, +} + func TestUsageReporter_emitNodeUsage_OSS(t *testing.T) { - type testCase struct { - modfiyStateStore func(t *testing.T, s *state.Store) - getMembersFunc getMembersFunc - expectedGauges map[string]metrics.GaugeValue - } - cases := map[string]testCase{ - "empty-state": { - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - getMembersFunc: func() []serf.Member { return []serf.Member{} }, - }, - "nodes": { - modfiyStateStore: func(t *testing.T, s *state.Store) { - require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) - require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) - require.NoError(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"})) - }, - getMembersFunc: func() []serf.Member { - return []serf.Member{ - { - Name: "foo", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "bar", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "baz", - Tags: map[string]string{"role": "node"}, - Status: serf.StatusAlive, - }, - } - }, - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 3, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 2, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 1, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - }, - } + cases := baseCases for name, tcase := range cases { t.Run(name, func(t *testing.T) { @@ -426,371 +424,57 @@ func TestUsageReporter_emitNodeUsage_OSS(t *testing.T) { } func TestUsageReporter_emitPeeringUsage_OSS(t *testing.T) { - type testCase struct { - modfiyStateStore func(t *testing.T, s *state.Store) - getMembersFunc getMembersFunc - expectedGauges map[string]metrics.GaugeValue + cases := make(map[string]testCase) + for k, v := range baseCases { + eg := make(map[string]metrics.GaugeValue) + for k, v := range v.expectedGauges { + eg[k] = v + } + cases[k] = testCase{v.modfiyStateStore, v.getMembersFunc, eg} } - cases := map[string]testCase{ - "empty-state": { - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - getMembersFunc: func() []serf.Member { return []serf.Member{} }, - }, - "peerings": { - modfiyStateStore: func(t *testing.T, s *state.Store) { - id, err := uuid.GenerateUUID() - require.NoError(t, err) - require.NoError(t, s.PeeringWrite(1, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "foo", ID: id}})) - id, err = uuid.GenerateUUID() - require.NoError(t, err) - require.NoError(t, s.PeeringWrite(2, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "bar", ID: id}})) - id, err = uuid.GenerateUUID() - require.NoError(t, err) - require.NoError(t, s.PeeringWrite(3, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "baz", ID: id}})) - }, - getMembersFunc: func() []serf.Member { - return []serf.Member{ - { - Name: "foo", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "bar", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - } - }, - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 3, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 2, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - }, + peeringsCase := cases["nodes"] + peeringsCase.modfiyStateStore = func(t *testing.T, s *state.Store) { + id, err := uuid.GenerateUUID() + require.NoError(t, err) + require.NoError(t, s.PeeringWrite(1, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "foo", ID: id}})) + id, err = uuid.GenerateUUID() + require.NoError(t, err) + require.NoError(t, s.PeeringWrite(2, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "bar", ID: id}})) + id, err = uuid.GenerateUUID() + require.NoError(t, err) + require.NoError(t, s.PeeringWrite(3, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "baz", ID: id}})) } + peeringsCase.getMembersFunc = func() []serf.Member { + return []serf.Member{ + { + Name: "foo", + Tags: map[string]string{"role": "consul"}, + Status: serf.StatusAlive, + }, + { + Name: "bar", + Tags: map[string]string{"role": "consul"}, + Status: serf.StatusAlive, + }, + } + } + peeringsCase.expectedGauges["consul.usage.test.consul.state.nodes;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.nodes", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + peeringsCase.expectedGauges["consul.usage.test.consul.state.peerings;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.peerings", + Value: 3, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + peeringsCase.expectedGauges["consul.usage.test.consul.members.clients;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.members.clients", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + cases["peerings"] = peeringsCase + delete(cases, "nodes") for name, tcase := range cases { t.Run(name, func(t *testing.T) { @@ -829,420 +513,134 @@ func TestUsageReporter_emitPeeringUsage_OSS(t *testing.T) { } func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) { - type testCase struct { - modfiyStateStore func(t *testing.T, s *state.Store) - getMembersFunc getMembersFunc - expectedGauges map[string]metrics.GaugeValue + cases := make(map[string]testCase) + for k, v := range baseCases { + eg := make(map[string]metrics.GaugeValue) + for k, v := range v.expectedGauges { + eg[k] = v + } + cases[k] = testCase{v.modfiyStateStore, v.getMembersFunc, eg} } - cases := map[string]testCase{ - "empty-state": { - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - getMembersFunc: func() []serf.Member { return []serf.Member{} }, - }, - "nodes-and-services": { - modfiyStateStore: func(t *testing.T, s *state.Store) { - require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) - require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) - require.NoError(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"})) - require.NoError(t, s.EnsureNode(4, &structs.Node{Node: "qux", Address: "127.0.0.3"})) - mgw := structs.TestNodeServiceMeshGateway(t) - mgw.ID = "mesh-gateway" + nodesAndSvcsCase := cases["nodes"] + nodesAndSvcsCase.modfiyStateStore = func(t *testing.T, s *state.Store) { + require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) + require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) + require.NoError(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"})) + require.NoError(t, s.EnsureNode(4, &structs.Node{Node: "qux", Address: "127.0.0.3"})) - tgw := structs.TestNodeServiceTerminatingGateway(t, "1.1.1.1") - tgw.ID = "terminating-gateway" - // Typical services and some consul services spread across two nodes - require.NoError(t, s.EnsureService(5, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000})) - require.NoError(t, s.EnsureService(6, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000})) - require.NoError(t, s.EnsureService(7, "foo", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) - require.NoError(t, s.EnsureService(8, "bar", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) - require.NoError(t, s.EnsureService(9, "foo", &structs.NodeService{ID: "db-connect-proxy", Service: "db-connect-proxy", Tags: nil, Address: "", Port: 5000, Kind: structs.ServiceKindConnectProxy})) - require.NoError(t, s.EnsureRegistration(10, structs.TestRegisterIngressGateway(t))) - require.NoError(t, s.EnsureService(11, "foo", mgw)) - require.NoError(t, s.EnsureService(12, "foo", tgw)) - require.NoError(t, s.EnsureService(13, "bar", &structs.NodeService{ID: "db-native", Service: "db", Tags: nil, Address: "", Port: 5000, Connect: structs.ServiceConnect{Native: true}})) - require.NoError(t, s.EnsureConfigEntry(14, &structs.IngressGatewayConfigEntry{ - Kind: structs.IngressGateway, - Name: "foo", - })) - require.NoError(t, s.EnsureConfigEntry(15, &structs.IngressGatewayConfigEntry{ - Kind: structs.IngressGateway, - Name: "bar", - })) - require.NoError(t, s.EnsureConfigEntry(16, &structs.IngressGatewayConfigEntry{ - Kind: structs.IngressGateway, - Name: "baz", - })) - }, - getMembersFunc: func() []serf.Member { - return []serf.Member{ - { - Name: "foo", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "bar", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "baz", - Tags: map[string]string{"role": "node", "segment": "a"}, - Status: serf.StatusAlive, - }, - { - Name: "qux", - Tags: map[string]string{"role": "node", "segment": "b"}, - Status: serf.StatusAlive, - }, - } - }, - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 4, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 2, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 2, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 7, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 9, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 1, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 1, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 1, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 1, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 1, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 3, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, + mgw := structs.TestNodeServiceMeshGateway(t) + mgw.ID = "mesh-gateway" + + tgw := structs.TestNodeServiceTerminatingGateway(t, "1.1.1.1") + tgw.ID = "terminating-gateway" + // Typical services and some consul services spread across two nodes + require.NoError(t, s.EnsureService(5, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000})) + require.NoError(t, s.EnsureService(6, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000})) + require.NoError(t, s.EnsureService(7, "foo", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) + require.NoError(t, s.EnsureService(8, "bar", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) + require.NoError(t, s.EnsureService(9, "foo", &structs.NodeService{ID: "db-connect-proxy", Service: "db-connect-proxy", Tags: nil, Address: "", Port: 5000, Kind: structs.ServiceKindConnectProxy})) + require.NoError(t, s.EnsureRegistration(10, structs.TestRegisterIngressGateway(t))) + require.NoError(t, s.EnsureService(11, "foo", mgw)) + require.NoError(t, s.EnsureService(12, "foo", tgw)) + require.NoError(t, s.EnsureService(13, "bar", &structs.NodeService{ID: "db-native", Service: "db", Tags: nil, Address: "", Port: 5000, Connect: structs.ServiceConnect{Native: true}})) + require.NoError(t, s.EnsureConfigEntry(14, &structs.IngressGatewayConfigEntry{ + Kind: structs.IngressGateway, + Name: "foo", + })) + require.NoError(t, s.EnsureConfigEntry(15, &structs.IngressGatewayConfigEntry{ + Kind: structs.IngressGateway, + Name: "bar", + })) + require.NoError(t, s.EnsureConfigEntry(16, &structs.IngressGatewayConfigEntry{ + Kind: structs.IngressGateway, + Name: "baz", + })) + } + baseCaseMembers := nodesAndSvcsCase.getMembersFunc() + nodesAndSvcsCase.getMembersFunc = func() []serf.Member { + baseCaseMembers = append(baseCaseMembers, serf.Member{ + Name: "baz", + Tags: map[string]string{"role": "node", "segment": "a"}, + Status: serf.StatusAlive, + }) + baseCaseMembers = append(baseCaseMembers, serf.Member{ + Name: "qux", + Tags: map[string]string{"role": "node", "segment": "b"}, + Status: serf.StatusAlive, + }) + return baseCaseMembers + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.nodes;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.nodes", + Value: 4, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.members.clients;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.members.clients", + Value: 2, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.services;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.services", + Value: 7, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.service_instances;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.service_instances", + Value: 9, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.connect_instances", + Value: 1, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-proxy"}, }, } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.connect_instances", + Value: 1, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "terminating-gateway"}, + }, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.connect_instances", + Value: 1, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.connect_instances", + Value: 1, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "mesh-gateway"}, + }, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.connect_instances", + Value: 1, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-native"}, + }, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.config_entries", + Value: 3, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + } + cases["nodes-and-services"] = nodesAndSvcsCase + delete(cases, "nodes") for name, tcase := range cases { t.Run(name, func(t *testing.T) { @@ -1280,379 +678,34 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) { } func TestUsageReporter_emitKVUsage_OSS(t *testing.T) { - type testCase struct { - modfiyStateStore func(t *testing.T, s *state.Store) - getMembersFunc getMembersFunc - expectedGauges map[string]metrics.GaugeValue + cases := make(map[string]testCase) + for k, v := range baseCases { + eg := make(map[string]metrics.GaugeValue) + for k, v := range v.expectedGauges { + eg[k] = v + } + cases[k] = testCase{v.modfiyStateStore, v.getMembersFunc, eg} } - cases := map[string]testCase{ - "empty-state": { - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - getMembersFunc: func() []serf.Member { return []serf.Member{} }, - }, - "nodes": { - modfiyStateStore: func(t *testing.T, s *state.Store) { - require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) - require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) - require.NoError(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"})) - require.NoError(t, s.KVSSet(4, &structs.DirEntry{Key: "a", Value: []byte{1}})) - require.NoError(t, s.KVSSet(5, &structs.DirEntry{Key: "b", Value: []byte{1}})) - require.NoError(t, s.KVSSet(6, &structs.DirEntry{Key: "c", Value: []byte{1}})) - require.NoError(t, s.KVSSet(7, &structs.DirEntry{Key: "d", Value: []byte{1}})) - require.NoError(t, s.KVSDelete(8, "d", &acl.EnterpriseMeta{})) - require.NoError(t, s.KVSDelete(9, "c", &acl.EnterpriseMeta{})) - require.NoError(t, s.KVSSet(10, &structs.DirEntry{Key: "e", Value: []byte{1}})) - require.NoError(t, s.KVSSet(11, &structs.DirEntry{Key: "f", Value: []byte{1}})) - }, - getMembersFunc: func() []serf.Member { - return []serf.Member{ - { - Name: "foo", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "bar", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "baz", - Tags: map[string]string{"role": "node"}, - Status: serf.StatusAlive, - }, - } - }, - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 3, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 2, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 1, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 4, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - }, + nodesCase := cases["nodes"] + mss := nodesCase.modfiyStateStore + nodesCase.modfiyStateStore = func(t *testing.T, s *state.Store) { + mss(t, s) + require.NoError(t, s.KVSSet(4, &structs.DirEntry{Key: "a", Value: []byte{1}})) + require.NoError(t, s.KVSSet(5, &structs.DirEntry{Key: "b", Value: []byte{1}})) + require.NoError(t, s.KVSSet(6, &structs.DirEntry{Key: "c", Value: []byte{1}})) + require.NoError(t, s.KVSSet(7, &structs.DirEntry{Key: "d", Value: []byte{1}})) + require.NoError(t, s.KVSDelete(8, "d", &acl.EnterpriseMeta{})) + require.NoError(t, s.KVSDelete(9, "c", &acl.EnterpriseMeta{})) + require.NoError(t, s.KVSSet(10, &structs.DirEntry{Key: "e", Value: []byte{1}})) + require.NoError(t, s.KVSSet(11, &structs.DirEntry{Key: "f", Value: []byte{1}})) } + nodesCase.expectedGauges["consul.usage.test.consul.state.kv_entries;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.kv_entries", + Value: 4, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + cases["nodes"] = nodesCase for name, tcase := range cases { t.Run(name, func(t *testing.T) { From 96d30050457b2fb46c91e48d2aa14aa7f2915102 Mon Sep 17 00:00:00 2001 From: Derek Menteer Date: Wed, 24 Aug 2022 12:39:15 -0500 Subject: [PATCH 097/104] Add 14269 changelog entry. --- .changelog/14269.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/14269.txt diff --git a/.changelog/14269.txt b/.changelog/14269.txt new file mode 100644 index 000000000..29eec6d5d --- /dev/null +++ b/.changelog/14269.txt @@ -0,0 +1,3 @@ +```release-note:bugfix +connect: Fix issue where `auto_config` and `auto_encrypt` could unintentionally enable TLS for gRPC xDS connections. +``` \ No newline at end of file From 7ee1c857c3d3e2ffe6eddf5b4c5300cd886a7b65 Mon Sep 17 00:00:00 2001 From: cskh Date: Wed, 24 Aug 2022 14:13:10 -0400 Subject: [PATCH 098/104] =?UTF-8?q?Fix:=20the=20inboundconnection=20limit?= =?UTF-8?q?=20filter=20should=20be=20placed=20in=20front=20of=20http=20co?= =?UTF-8?q?=E2=80=A6=20(#14325)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: the inboundconnection limit should be placed in front of http connection manager Co-authored-by: Freddy --- agent/xds/listeners.go | 44 ++++++++++++------- ...ener-max-inbound-connections.latest.golden | 15 ++++--- 2 files changed, 36 insertions(+), 23 deletions(-) diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 95b84c94c..33c339c4d 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -1214,16 +1214,38 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot filterOpts.forwardClientPolicy = envoy_http_v3.HttpConnectionManager_APPEND_FORWARD } } + + // If an inbound connect limit is set, inject a connection limit filter on each chain. + if cfg.MaxInboundConnections > 0 { + connectionLimitFilter, err := makeConnectionLimitFilter(cfg.MaxInboundConnections) + if err != nil { + return nil, err + } + l.FilterChains = []*envoy_listener_v3.FilterChain{ + { + Filters: []*envoy_listener_v3.Filter{ + connectionLimitFilter, + }, + }, + } + } + filter, err := makeListenerFilter(filterOpts) if err != nil { return nil, err } - l.FilterChains = []*envoy_listener_v3.FilterChain{ - { - Filters: []*envoy_listener_v3.Filter{ - filter, + + if len(l.FilterChains) > 0 { + // The list of FilterChains has already been initialized + l.FilterChains[0].Filters = append(l.FilterChains[0].Filters, filter) + } else { + l.FilterChains = []*envoy_listener_v3.FilterChain{ + { + Filters: []*envoy_listener_v3.Filter{ + filter, + }, }, - }, + } } err = s.finalizePublicListenerFromConfig(l, cfgSnap, cfg, useHTTPFilter) @@ -1249,17 +1271,6 @@ func (s *ResourceGenerator) finalizePublicListenerFromConfig(l *envoy_listener_v return nil } - // If an inbound connect limit is set, inject a connection limit filter on each chain. - if proxyCfg.MaxInboundConnections > 0 { - filter, err := makeConnectionLimitFilter(proxyCfg.MaxInboundConnections) - if err != nil { - return nil - } - for idx := range l.FilterChains { - l.FilterChains[idx].Filters = append(l.FilterChains[idx].Filters, filter) - } - } - return nil } @@ -1990,6 +2001,7 @@ func makeTCPProxyFilter(filterName, cluster, statPrefix string) (*envoy_listener func makeConnectionLimitFilter(limit int) (*envoy_listener_v3.Filter, error) { cfg := &envoy_connection_limit_v3.ConnectionLimit{ + StatPrefix: "inbound_connection_limit", MaxConnections: wrapperspb.UInt64(uint64(limit)), } return makeFilter("envoy.filters.network.connection_limit", cfg) diff --git a/agent/xds/testdata/listeners/listener-max-inbound-connections.latest.golden b/agent/xds/testdata/listeners/listener-max-inbound-connections.latest.golden index be3b83433..cbfda69f5 100644 --- a/agent/xds/testdata/listeners/listener-max-inbound-connections.latest.golden +++ b/agent/xds/testdata/listeners/listener-max-inbound-connections.latest.golden @@ -73,6 +73,14 @@ "statPrefix": "connect_authz" } }, + { + "name": "envoy.filters.network.connection_limit", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit", + "statPrefix": "inbound_connection_limit", + "maxConnections": "222" + } + }, { "name": "envoy.filters.network.tcp_proxy", "typedConfig": { @@ -80,13 +88,6 @@ "statPrefix": "public_listener", "cluster": "local_app" } - }, - { - "name": "envoy.filters.network.connection_limit", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit", - "maxConnections": "222" - } } ], "transportSocket": { From e14ab541311ecb600867401dec34206e98b6f61d Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Wed, 24 Aug 2022 17:04:26 -0700 Subject: [PATCH 099/104] docs: Update Envoy support matrix to match the code (#14338) --- website/content/docs/connect/proxies/envoy.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/connect/proxies/envoy.mdx b/website/content/docs/connect/proxies/envoy.mdx index 526d642bc..7ada5b6fd 100644 --- a/website/content/docs/connect/proxies/envoy.mdx +++ b/website/content/docs/connect/proxies/envoy.mdx @@ -37,8 +37,8 @@ Consul supports **four major Envoy releases** at the beginning of each major Con | Consul Version | Compatible Envoy Versions | | ------------------- | -----------------------------------------------------------------------------------| | 1.13.x | 1.23.0, 1.22.2, 1.21.4, 1.20.6 | -| 1.12.x | 1.22.2, 1.21.3, 1.20.4, 1.19.5 | -| 1.11.x | 1.20.2, 1.19.3, 1.18.6, 1.17.41 | +| 1.12.x | 1.22.2, 1.21.4, 1.20.6, 1.19.5 | +| 1.11.x | 1.20.6, 1.19.5, 1.18.6, 1.17.41 | 1. Envoy 1.20.1 and earlier are vulnerable to [CVE-2022-21654](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21654) and [CVE-2022-21655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21655). Both CVEs were patched in Envoy versions 1.18.6, 1.19.3, and 1.20.2. Envoy 1.16.x and older releases are no longer supported (see [HCSEC-2022-07](https://discuss.hashicorp.com/t/hcsec-2022-07-consul-s-connect-service-mesh-affected-by-recent-envoy-security-releases/36332)). Consul 1.9.x clusters should be upgraded to 1.10.x and Envoy upgraded to the latest supported Envoy version for that release, 1.18.6. From 2e75833133ed5bfa7f7ce22e436d1fb901b0e935 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Thu, 25 Aug 2022 11:25:59 -0400 Subject: [PATCH 100/104] Exit loop when context is cancelled --- agent/consul/leader_peering.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index bc5b669cd..d1823b026 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -391,6 +391,12 @@ func (s *Server) runPeeringDeletions(ctx context.Context) error { // process. This includes deletion of the peerings themselves in addition to any peering data raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate)) for { + select { + case <-ctx.Done(): + return nil + default: + } + ws := memdb.NewWatchSet() state := s.fsm.State() _, peerings, err := s.fsm.State().PeeringListDeleted(ws) From 99df4df0571a6fbb78da04199ea981f6fbbe8ae9 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Wed, 27 Jul 2022 14:03:06 -0700 Subject: [PATCH 101/104] docs: improve health check related docs Includes: - Improved scannability and organization of checks overview - Checks overview includes more guidance on - How to register a health check - The options available for a health check definition - Contextual cross-references to maintenance mode --- website/content/api-docs/agent/check.mdx | 13 +- website/content/api-docs/health.mdx | 3 + website/content/docs/discovery/checks.mdx | 595 +++++++++++++--------- 3 files changed, 365 insertions(+), 246 deletions(-) diff --git a/website/content/api-docs/agent/check.mdx b/website/content/api-docs/agent/check.mdx index eafbb17c4..785fbce8b 100644 --- a/website/content/api-docs/agent/check.mdx +++ b/website/content/api-docs/agent/check.mdx @@ -6,7 +6,10 @@ description: The /agent/check endpoints interact with checks on the local agent # Check - Agent HTTP API -The `/agent/check` endpoints interact with checks on the local agent in Consul. +Consul's health check capabilities are described in the +[health checks overview](/docs/discovery/checks). +The `/agent/check` endpoints interact with health checks +managed by the local agent in Consul. These should not be confused with checks in the catalog. ## List Checks @@ -418,6 +421,10 @@ $ curl \ This endpoint is used with a TTL type check to set the status of the check to `critical` and to reset the TTL clock. +If you want to manually mark a service as unhealthy, +use [maintenance mode](/api-docs/agent#enable-maintenance-mode) +instead of defining a TTL health check and using this endpoint. + | Method | Path | Produces | | ------ | ----------------------------- | ------------------ | | `PUT` | `/agent/check/fail/:check_id` | `application/json` | @@ -456,6 +463,10 @@ $ curl \ This endpoint is used with a TTL type check to set the status of the check and to reset the TTL clock. +If you want to manually mark a service as unhealthy, +use [maintenance mode](/api-docs/agent#enable-maintenance-mode) +instead of defining a TTL health check and using this endpoint. + | Method | Path | Produces | | ------ | ------------------------------- | ------------------ | | `PUT` | `/agent/check/update/:check_id` | `application/json` | diff --git a/website/content/api-docs/health.mdx b/website/content/api-docs/health.mdx index 898c8ffe4..cad74bbad 100644 --- a/website/content/api-docs/health.mdx +++ b/website/content/api-docs/health.mdx @@ -14,6 +14,9 @@ optional health checking mechanisms. Additionally, some of the query results from the health endpoints are filtered while the catalog endpoints provide the raw entries. +To modify health check registration or information, +use the [`/agent/check`](/api-docs/agent/check) endpoints. + ## List Checks for Node This endpoint returns the checks specific to the node provided on the path. diff --git a/website/content/docs/discovery/checks.mdx b/website/content/docs/discovery/checks.mdx index 5a2149579..1b4c4faf4 100644 --- a/website/content/docs/discovery/checks.mdx +++ b/website/content/docs/discovery/checks.mdx @@ -13,144 +13,72 @@ description: >- One of the primary roles of the agent is management of system-level and application-level health checks. A health check is considered to be application-level if it is associated with a service. If not associated with a service, the check monitors the health of the entire node. -Review the [health checks tutorial](https://learn.hashicorp.com/tutorials/consul/service-registration-health-checks) to get a more complete example on how to leverage health check capabilities in Consul. -A check is defined in a configuration file or added at runtime over the HTTP interface. Checks -created via the HTTP interface persist with that node. +Review the [service health checks tutorial](https://learn.hashicorp.com/tutorials/consul/service-registration-health-checks) +to get a more complete example on how to leverage health check capabilities in Consul. -There are several different kinds of checks: +## Registering a health check -- Script + Interval - These checks depend on invoking an external application - that performs the health check, exits with an appropriate exit code, and potentially - generates some output. A script is paired with an invocation interval (e.g. - every 30 seconds). This is similar to the Nagios plugin system. The output of - a script check is limited to 4KB. Output larger than this will be truncated. - By default, Script checks will be configured with a timeout equal to 30 seconds. - It is possible to configure a custom Script check timeout value by specifying the - `timeout` field in the check definition. When the timeout is reached on Windows, - Consul will wait for any child processes spawned by the script to finish. For any - other system, Consul will attempt to force-kill the script and any child processes - it has spawned once the timeout has passed. - In Consul 0.9.0 and later, script checks are not enabled by default. To use them you - can either use : +There are three ways to register a service with health checks: - - [`enable_local_script_checks`](/docs/agent/config/cli-flags#_enable_local_script_checks): - enable script checks defined in local config files. Script checks defined via the HTTP - API will not be allowed. - - [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks): enable - script checks regardless of how they are defined. +1. Start or reload a Consul agent with a service definition file in the + [agent's configuration directory](/docs/agent#configuring-consul-agents). +1. Call the + [`/agent/service/register`](/api-docs/agent/service#register-service) + HTTP API endpoint to register the service. +1. Use the + [`consul services register`](/commands/services/register) + CLI command to register the service. - ~> **Security Warning:** Enabling script checks in some configurations may - introduce a remote execution vulnerability which is known to be targeted by - malware. We strongly recommend `enable_local_script_checks` instead. See [this - blog post](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations) - for more details. +When a service is registered using the HTTP API endpoint or CLI command, +the checks persist in the Consul data folder across Consul agent restarts. -- `HTTP + Interval` - These checks make an HTTP `GET` request to the specified URL, - waiting the specified `interval` amount of time between requests (eg. 30 seconds). - The status of the service depends on the HTTP response code: any `2xx` code is - considered passing, a `429 Too ManyRequests` is a warning, and anything else is - a failure. This type of check - should be preferred over a script that uses `curl` or another external process - to check a simple HTTP operation. By default, HTTP checks are `GET` requests - unless the `method` field specifies a different method. Additional header - fields can be set through the `header` field which is a map of lists of - strings, e.g. `{"x-foo": ["bar", "baz"]}`. By default, HTTP checks will be - configured with a request timeout equal to 10 seconds. +## Types of checks - It is possible to configure a custom HTTP check timeout value by - specifying the `timeout` field in the check definition. The output of the - check is limited to roughly 4KB. Responses larger than this will be truncated. - HTTP checks also support TLS. By default, a valid TLS certificate is expected. - Certificate verification can be turned off by setting the `tls_skip_verify` - field to `true` in the check definition. When using TLS, the SNI will be set - automatically from the URL if it uses a hostname (as opposed to an IP address); - the value can be overridden by setting `tls_server_name`. +This section describes the available types of health checks you can use to +automatically monitor the health of a service instance or node. - Consul follows HTTP redirects by default. Set the `disable_redirects` field to - `true` to disable redirects. +-> **To manually mark a service unhealthy:** Use the maintenance mode + [CLI command](/commands/maint) or + [HTTP API endpoint](/api-docs/agent#enable-maintenance-mode) + to temporarily remove one or all service instances on a node + from service discovery DNS and HTTP API query results. -- `TCP + Interval` - These checks make a TCP connection attempt to the specified - IP/hostname and port, waiting `interval` amount of time between attempts - (e.g. 30 seconds). If no hostname - is specified, it defaults to "localhost". The status of the service depends on - whether the connection attempt is successful (ie - the port is currently - accepting connections). If the connection is accepted, the status is - `success`, otherwise the status is `critical`. In the case of a hostname that - resolves to both IPv4 and IPv6 addresses, an attempt will be made to both - addresses, and the first successful connection attempt will result in a - successful check. This type of check should be preferred over a script that - uses `netcat` or another external process to check a simple socket operation. - By default, TCP checks will be configured with a request timeout of 10 seconds. - It is possible to configure a custom TCP check timeout value by specifying the - `timeout` field in the check definition. +### Script check ((#script-interval)) -- `UDP + Interval` - These checks direct the client to periodically send UDP datagrams - to the specified IP/hostname and port. The duration specified in the `interval` field sets the amount of time - between attempts, such as `30s` to indicate 30 seconds. The check is logged as healthy if any response from the UDP server is received. Any other result sets the status to `critical`. - The default interval for, UDP checks is `10s`, but you can configure a custom UDP check timeout value by specifying the - `timeout` field in the check definition. If any timeout on read exists, the check is still considered healthy. +Script checks periodically invoke an external application that performs the health check, +exits with an appropriate exit code, and potentially generates some output. +The specified `interval` determines the time between check invocations. +The output of a script check is limited to 4KB. +Larger outputs are truncated. -- `Time to Live (TTL)` ((#ttl)) - These checks retain their last known state - for a given TTL. The state of the check must be updated periodically over the HTTP - interface. If an external system fails to update the status within a given TTL, - the check is set to the failed state. This mechanism, conceptually similar to a - dead man's switch, relies on the application to directly report its health. For - example, a healthy app can periodically `PUT` a status update to the HTTP endpoint; - if the app fails, the TTL will expire and the health check enters a critical state. - The endpoints used to update health information for a given check are: [pass](/api-docs/agent/check#ttl-check-pass), - [warn](/api-docs/agent/check#ttl-check-warn), [fail](/api-docs/agent/check#ttl-check-fail), - and [update](/api-docs/agent/check#ttl-check-update). TTL checks also persist their - last known status to disk. This allows the Consul agent to restore the last known - status of the check across restarts. Persisted check status is valid through the - end of the TTL from the time of the last check. +By default, script checks are configured with a timeout equal to 30 seconds. +To configure a custom script check timeout value, +specify the `timeout` field in the check definition. +After reaching the timeout on a Windows system, +Consul waits for any child processes spawned by the script to finish. +After reaching the timeout on other systems, +Consul attempts to force-kill the script and any child processes it spawned. -- `Docker + Interval` - These checks depend on invoking an external application which - is packaged within a Docker Container. The application is triggered within the running - container via the Docker Exec API. We expect that the Consul agent user has access - to either the Docker HTTP API or the unix socket. Consul uses `$DOCKER_HOST` to - determine the Docker API endpoint. The application is expected to run, perform a health - check of the service running inside the container, and exit with an appropriate exit code. - The check should be paired with an invocation interval. The shell on which the check - has to be performed is configurable which makes it possible to run containers which - have different shells on the same host. Check output for Docker is limited to - 4KB. Any output larger than this will be truncated. In Consul 0.9.0 and later, the agent - must be configured with [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks) - set to `true` in order to enable Docker health checks. +Script checks are not enabled by default. +To enable a Consul agent to perform script checks, +use one of the following agent configuration options: -- `gRPC + Interval` - These checks are intended for applications that support the standard - [gRPC health checking protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - The state of the check will be updated by probing the configured endpoint, waiting `interval` - amount of time between probes (eg. 30 seconds). By default, gRPC checks will be configured - with a default timeout of 10 seconds. - It is possible to configure a custom timeout value by specifying the `timeout` field in - the check definition. gRPC checks will default to not using TLS, but TLS can be enabled by - setting `grpc_use_tls` in the check definition. If TLS is enabled, then by default, a valid - TLS certificate is expected. Certificate verification can be turned off by setting the - `tls_skip_verify` field to `true` in the check definition. - To check on a specific service instead of the whole gRPC server, add the service identifier after the `gRPC` check's endpoint in the following format `/:service_identifier`. +- [`enable_local_script_checks`](/docs/agent/config/cli-flags#_enable_local_script_checks): + Enable script checks defined in local config files. + Script checks registered using the HTTP API are not allowed. +- [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks): + Enable script checks no matter how they are registered. -- `H2ping + Interval` - These checks test an endpoint that uses http2 - by connecting to the endpoint and sending a ping frame. TLS is assumed to be configured by default. - To disable TLS and use h2c, set `h2ping_use_tls` to `false`. If the ping is successful - within a specified timeout, then the check is updated as passing. - The timeout defaults to 10 seconds, but is configurable using the `timeout` field. If TLS is enabled a valid - certificate is required, unless `tls_skip_verify` is set to `true`. - The check will be run on the interval specified by the `interval` field. + ~> **Security Warning:** + Enabling non-local script checks in some configurations may introduce + a remote execution vulnerability known to be targeted by malware. + We strongly recommend `enable_local_script_checks` instead. + For more information, refer to + [this blog post](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations). -- `Alias` - These checks alias the health state of another registered - node or service. The state of the check will be updated asynchronously, but is - nearly instant. For aliased services on the same agent, the local state is monitored - and no additional network resources are consumed. For other services and nodes, - the check maintains a blocking query over the agent's connection with a current - server and allows stale requests. If there are any errors in watching the aliased - node or service, the check state will be critical. For the blocking query, the - check will use the ACL token set on the service or check definition or otherwise - will fall back to the default ACL token set with the agent (`acl_token`). - -## Check Definition - -A script check: +The following service definition file snippet is an example +of a script check definition: @@ -162,7 +90,6 @@ check = { interval = "10s" timeout = "1s" } - ``` ```json @@ -179,7 +106,47 @@ check = { -A HTTP check: +#### Check script conventions + +A check script's exit code is used to determine the health check status: + +- Exit code 0 - Check is passing +- Exit code 1 - Check is warning +- Any other code - Check is failing + +Any output of the script is captured and made available in the +`Output` field of checks included in HTTP API responses, +as in this example from the [local service health endpoint](/api-docs/agent/service#by-name-json). + +### HTTP check ((#http-interval)) + +HTTP checks periodically make an HTTP `GET` request to the specified URL, +waiting the specified `interval` amount of time between requests. +The status of the service depends on the HTTP response code: any `2xx` code is +considered passing, a `429 Too ManyRequests` is a warning, and anything else is +a failure. This type of check +should be preferred over a script that uses `curl` or another external process +to check a simple HTTP operation. By default, HTTP checks are `GET` requests +unless the `method` field specifies a different method. Additional request +headers can be set through the `header` field which is a map of lists of +strings, such as `{"x-foo": ["bar", "baz"]}`. + +By default, HTTP checks are configured with a request timeout equal to 10 seconds. +To configure a custom HTTP check timeout value, +specify the `timeout` field in the check definition. +The output of an HTTP check is limited to approximately 4KB. +Larger outputs are truncated. +HTTP checks also support TLS. By default, a valid TLS certificate is expected. +Certificate verification can be turned off by setting the `tls_skip_verify` +field to `true` in the check definition. When using TLS, the SNI is implicitly +determined from the URL if it uses a hostname instead of an IP address. +You can explicitly set the SNI value by setting `tls_server_name`. + +Consul follows HTTP redirects by default. +To disable redirects, set the `disable_redirects` field to `true`. + +The following service definition file snippet is an example +of an HTTP check definition: @@ -220,7 +187,23 @@ check = { -A TCP check: +### TCP check ((#tcp-interval)) + +TCP checks periodically make a TCP connection attempt to the specified IP/hostname and port, waiting `interval` amount of time between attempts. +If no hostname is specified, it defaults to "localhost". +The health check status is `success` if the target host accepts the connection attempt, +otherwise the status is `critical`. In the case of a hostname that +resolves to both IPv4 and IPv6 addresses, an attempt is made to both +addresses, and the first successful connection attempt results in a +successful check. This type of check should be preferred over a script that +uses `netcat` or another external process to check a simple socket operation. + +By default, TCP checks are configured with a request timeout equal to 10 seconds. +To configure a custom TCP check timeout value, +specify the `timeout` field in the check definition. + +The following service definition file snippet is an example +of a TCP check definition: @@ -232,7 +215,6 @@ check = { interval = "10s" timeout = "1s" } - ``` ```json @@ -249,7 +231,21 @@ check = { -A UDP check: +### UDP check ((#udp-interval)) + +UDP checks periodically direct the Consul agent to send UDP datagrams +to the specified IP/hostname and port, +waiting `interval` amount of time between attempts. +The check status is set to `success` if any response is received from the targeted UDP server. +Any other result sets the status to `critical`. + +By default, UDP checks are configured with a request timeout equal to 10 seconds. +To configure a custom UDP check timeout value, +specify the `timeout` field in the check definition. +If any timeout on read exists, the check is still considered healthy. + +The following service definition file snippet is an example +of a UDP check definition: @@ -261,7 +257,6 @@ check = { interval = "10s" timeout = "1s" } - ``` ```json @@ -278,7 +273,32 @@ check = { -A TTL check: +### Time to live (TTL) check ((#ttl)) + +TTL checks retain their last known state for the specified `ttl` duration. +If the `ttl` duration elapses before a new check update +is provided over the HTTP interface, +the check is set to `critical` state. + +This mechanism relies on the application to directly report its health. +For example, a healthy app can periodically `PUT` a status update to the HTTP endpoint. +Then, if the app is disrupted and unable to perform this update +before the TTL expires, the health check enters the `critical` state. +The endpoints used to update health information for a given check are: [pass](/api-docs/agent/check#ttl-check-pass), +[warn](/api-docs/agent/check#ttl-check-warn), [fail](/api-docs/agent/check#ttl-check-fail), +and [update](/api-docs/agent/check#ttl-check-update). TTL checks also persist their +last known status to disk. This persistence allows the Consul agent to restore the last known +status of the check across agent restarts. Persisted check status is valid through the +end of the TTL from the time of the last check. + +To manually mark a service unhealthy, +it is far more convenient to use the maintenance mode +[CLI command](/commands/maint) or +[HTTP API endpoint](/api-docs/agent#enable-maintenance-mode) +rather than a TTL health check with arbitrarily high `ttl`. + +The following service definition file snippet is an example +of a TTL check definition: @@ -304,7 +324,24 @@ check = { -A Docker check: +### Docker check ((#docker-interval)) + +These checks depend on periodically invoking an external application that +is packaged within a Docker Container. The application is triggered within the running +container through the Docker Exec API. We expect that the Consul agent user has access +to either the Docker HTTP API or the unix socket. Consul uses `$DOCKER_HOST` to +determine the Docker API endpoint. The application is expected to run, perform a health +check of the service running inside the container, and exit with an appropriate exit code. +The check should be paired with an invocation interval. The shell on which the check +has to be performed is configurable, making it possible to run containers which +have different shells on the same host. +The output of a Docker check is limited to 4KB. +Larger outputs are truncated. +The agent must be configured with [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks) +set to `true` in order to enable Docker health checks. + +The following service definition file snippet is an example +of a Docker check definition: @@ -334,7 +371,26 @@ check = { -A gRPC check for the whole application: +### gRPC check ((##grpc-interval)) + +gRPC checks are intended for applications that support the standard +[gRPC health checking protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md). +The state of the check will be updated by periodically probing the configured endpoint, +waiting `interval` amount of time between attempts. + +By default, gRPC checks are configured with a timeout equal to 10 seconds. +To configure a custom Docker check timeout value, +specify the `timeout` field in the check definition. + +gRPC checks default to not using TLS. +To enable TLS, set `grpc_use_tls` in the check definition. +If TLS is enabled, then by default, a valid TLS certificate is expected. +Certificate verification can be turned off by setting the +`tls_skip_verify` field to `true` in the check definition. +To check on a specific service instead of the whole gRPC server, add the service identifier after the `gRPC` check's endpoint in the following format `/:service_identifier`. + +The following service definition file snippet is an example +of a gRPC check for a whole application: @@ -362,7 +418,8 @@ check = { -A gRPC check for the specific `my_service` service: +The following service definition file snippet is an example +of a gRPC check for the specific `my_service` service @@ -390,7 +447,23 @@ check = { -A h2ping check: +### H2ping check ((#h2ping-interval)) + +H2ping checks test an endpoint that uses http2 by connecting to the endpoint +and sending a ping frame, waiting `interval` amount of time between attempts. +If the ping is successful within a specified timeout, +then the check status is set to `success`. + +By default, h2ping checks are configured with a request timeout equal to 10 seconds. +To configure a custom h2ping check timeout value, +specify the `timeout` field in the check definition. + +TLS is enabled by default. +To disable TLS and use h2c, set `h2ping_use_tls` to `false`. +If TLS is not disabled, a valid certificate is required unless `tls_skip_verify` is set to `true`. + +The following service definition file snippet is an example +of an h2ping check definition: @@ -418,7 +491,29 @@ check = { -An alias check for a local service: +### Alias check + +These checks alias the health state of another registered +node or service. The state of the check updates asynchronously, but is +nearly instant. For aliased services on the same agent, the local state is monitored +and no additional network resources are consumed. For other services and nodes, +the check maintains a blocking query over the agent's connection with a current +server and allows stale requests. If there are any errors in watching the aliased +node or service, the check state is set to `critical`. +For the blocking query, the check uses the ACL token set on the service or check definition. +If no ACL token is set in the service or check definition, +the blocking query uses the agent's default ACL token +([`acl.tokens.default`](/docs/agent/config/config-files#acl_tokens_default)). + +~> **Configuration info**: The alias check configuration expects the alias to be +registered on the same agent as the one you are aliasing. If the service is +not registered with the same agent, `"alias_node": ""` must also be +specified. When using `alias_node`, if no service is specified, the check will +alias the health of the node. If a service is specified, the check will alias +the specified service on this particular node. + +The following service definition file snippet is an example +of an alias check for a local service: @@ -440,72 +535,137 @@ check = { -~> Configuration info: The alias check configuration expects the alias to be -registered on the same agent as the one you are aliasing. If the service is -not registered with the same agent, `"alias_node": ""` must also be -specified. When using `alias_node`, if no service is specified, the check will -alias the health of the node. If a service is specified, the check will alias -the specified service on this particular node. +## Check definition -Each type of definition must include a `name` and may optionally provide an -`id` and `notes` field. The `id` must be unique per _agent_ otherwise only the -last defined check with that `id` will be registered. If the `id` is not set -and the check is embedded within a service definition a unique check id is -generated. Otherwise, `id` will be set to `name`. If names might conflict, -unique IDs should be provided. +This section covers some of the most common options for check definitions. +For a complete list of all check options, refer to the +[Register Check HTTP API endpoint documentation](/api-docs/agent/check#json-request-body-schema). -The `notes` field is opaque to Consul but can be used to provide a human-readable -description of the current state of the check. Similarly, an external process -updating a TTL check via the HTTP interface can set the `notes` value. +-> **Casing for check options:** + The correct casing for an option depends on whether the check is defined in + a service definition file or an HTTP API JSON request body. + For example, the option `deregister_critical_service_after` in a service + definition file is instead named `DeregisterCriticalServiceAfter` in an + HTTP API JSON request body. -Checks may also contain a `token` field to provide an ACL token. This token is -used for any interaction with the catalog for the check, including -[anti-entropy syncs](/docs/architecture/anti-entropy) and deregistration. -For Alias checks, this token is used if a remote blocking query is necessary -to watch the state of the aliased node or service. +#### General options -Script, TCP, UDP, HTTP, Docker, and gRPC checks must include an `interval` field. This -field is parsed by Go's `time` package, and has the following -[formatting specification](https://golang.org/pkg/time/#ParseDuration): +- `name` `(string: )` - Specifies the name of the check. -> A duration string is a possibly signed sequence of decimal numbers, each with -> optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". -> Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". +- `id` `(string: "")` - Specifies a unique ID for this check on this node. + + If unspecified, Consul defines the check id by: + - If the check definition is embedded within a service definition file, + a unique check id is auto-generated. + - Otherwise, the `id` is set to the value of `name`. + If names might conflict, you must provide unique IDs to avoid + overwriting existing checks with the same id on this node. -In Consul 0.7 and later, checks that are associated with a service may also contain -an optional `deregister_critical_service_after` field, which is a timeout in the -same Go time format as `interval` and `ttl`. If a check is in the critical state -for more than this configured value, then its associated service (and all of its -associated checks) will automatically be deregistered. The minimum timeout is 1 -minute, and the process that reaps critical services runs every 30 seconds, so it -may take slightly longer than the configured timeout to trigger the deregistration. -This should generally be configured with a timeout that's much, much longer than -any expected recoverable outage for the given service. +- `interval` `(string: )` - Specifies + the frequency at which to run this check. + Required for all check types except TTL and alias checks. -To configure a check, either provide it as a `-config-file` option to the -agent or place it inside the `-config-dir` of the agent. The file must -end in a ".json" or ".hcl" extension to be loaded by Consul. Check definitions -can also be updated by sending a `SIGHUP` to the agent. Alternatively, the -check can be registered dynamically using the [HTTP API](/api). + The value is parsed by Go's `time` package, and has the following + [formatting specification](https://golang.org/pkg/time/#ParseDuration): -## Check Scripts + > A duration string is a possibly signed sequence of decimal numbers, each with + > optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". + > Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". -A check script is generally free to do anything to determine the status -of the check. The only limitations placed are that the exit codes must obey -this convention: +- `service_id` `(string: )` - Specifies + the ID of a service instance to associate this check with. + That service instance must be on this node. + If not specified, this check is treated as a node-level check. + For more information, refer to the + [service-bound checks](#service-bound-checks) section. -- Exit code 0 - Check is passing -- Exit code 1 - Check is warning -- Any other code - Check is failing +- `status` `(string: "")` - Specifies the initial status of the health check as + "critical" (default), "warning", or "passing". For more details, refer to + the [initial health check status](#initial-health-check-status) section. + + -> **Health defaults to critical:** If health status it not initially specified, + it defaults to "critical" to protect against including a service + in discovery results before it is ready. -This is the only convention that Consul depends on. Any output of the script -will be captured and stored in the `output` field. +- `deregister_critical_service_after` `(string: "")` - If specified, + the associated service and all its checks are deregistered + after this check is in the critical state for more than the specified value. + The value has the same formatting specification as the [`interval`](#interval) field. -In Consul 0.9.0 and later, the agent must be configured with -[`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks) set to `true` -in order to enable script checks. + The minimum timeout is 1 minute, + and the process that reaps critical services runs every 30 seconds, + so it may take slightly longer than the configured timeout to trigger the deregistration. + This field should generally be configured with a timeout that's significantly longer than + any expected recoverable outage for the given service. -## Initial Health Check Status +- `notes` `(string: "")` - Provides a human-readable description of the check. + This field is opaque to Consul and can be used however is useful to the user. + For example, it could be used to describe the current state of the check. + +- `token` `(string: "")` - Specifies an ACL token used for any interaction + with the catalog for the check, including + [anti-entropy syncs](/docs/architecture/anti-entropy) and deregistration. + + For alias checks, this token is used if a remote blocking query is necessary to watch the state of the aliased node or service. + +#### Success/failures before passing/warning/critical + +To prevent flapping health checks and limit the load they cause on the cluster, +a health check may be configured to become passing/warning/critical only after a +specified number of consecutive checks return as passing/critical. +The status does not transition states until the configured threshold is reached. + +- `success_before_passing` - Number of consecutive successful results required + before check status transitions to passing. Defaults to `0`. Added in Consul 1.7.0. + +- `failures_before_warning` - Number of consecutive unsuccessful results required + before check status transitions to warning. Defaults to the same value as that of + `failures_before_critical` to maintain the expected behavior of not changing the + status of service checks to `warning` before `critical` unless configured to do so. + Values higher than `failures_before_critical` are invalid. Added in Consul 1.11.0. + +- `failures_before_critical` - Number of consecutive unsuccessful results required + before check status transitions to critical. Defaults to `0`. Added in Consul 1.7.0. + +This feature is available for all check types except TTL and alias checks. +By default, both passing and critical thresholds are set to 0 so the check +status always reflects the last check result. + + + +```hcl +checks = [ + { + name = "HTTP TCP on port 80" + tcp = "localhost:80" + interval = "10s" + timeout = "1s" + success_before_passing = 3 + failures_before_warning = 1 + failures_before_critical = 3 + } +] +``` + +```json +{ + "checks": [ + { + "name": "HTTP TCP on port 80", + "tcp": "localhost:80", + "interval": "10s", + "timeout": "1s", + "success_before_passing": 3, + "failures_before_warning": 1, + "failures_before_critical": 3 + } + ] +} +``` + + + +## Initial health check status By default, when checks are registered against a Consul agent, the state is set immediately to "critical". This is useful to prevent services from being @@ -576,13 +736,13 @@ In the above configuration, if the web-app health check begins failing, it will only affect the availability of the web-app service. All other services provided by the node will remain unchanged. -## Agent Certificates for TLS Checks +## Agent certificates for TLS checks The [enable_agent_tls_for_checks](/docs/agent/config/config-files#enable_agent_tls_for_checks) agent configuration option can be utilized to have HTTP or gRPC health checks to use the agent's credentials when configured for TLS. -## Multiple Check Definitions +## Multiple check definitions Multiple check definitions can be defined using the `checks` (plural) key in your configuration file. @@ -640,58 +800,3 @@ checks = [ ``` - -## Success/Failures before passing/warning/critical - -To prevent flapping health checks, and limit the load they cause on the cluster, -a health check may be configured to become passing/warning/critical only after a -specified number of consecutive checks return passing/critical. -The status will not transition states until the configured threshold is reached. - -- `success_before_passing` - Number of consecutive successful results required - before check status transitions to passing. Defaults to `0`. Added in Consul 1.7.0. -- `failures_before_warning` - Number of consecutive unsuccessful results required - before check status transitions to warning. Defaults to the same value as that of - `failures_before_critical` to maintain the expected behavior of not changing the - status of service checks to `warning` before `critical` unless configured to do so. - Values higher than `failures_before_critical` are invalid. Added in Consul 1.11.0. -- `failures_before_critical` - Number of consecutive unsuccessful results required - before check status transitions to critical. Defaults to `0`. Added in Consul 1.7.0. - -This feature is available for HTTP, TCP, gRPC, Docker & Monitor checks. -By default, both passing and critical thresholds will be set to 0 so the check -status will always reflect the last check result. - - - -```hcl -checks = [ - { - name = "HTTP TCP on port 80" - tcp = "localhost:80" - interval = "10s" - timeout = "1s" - success_before_passing = 3 - failures_before_warning = 1 - failures_before_critical = 3 - } -] -``` - -```json -{ - "checks": [ - { - "name": "HTTP TCP on port 80", - "tcp": "localhost:80", - "interval": "10s", - "timeout": "1s", - "success_before_passing": 3, - "failures_before_warning": 1, - "failures_before_critical": 3 - } - ] -} -``` - - From ca508de2e6ee25d0ad60f5728b4a455f4d90cd21 Mon Sep 17 00:00:00 2001 From: Dao Thanh Tung Date: Fri, 26 Aug 2022 06:21:49 +0800 Subject: [PATCH 102/104] Fix Consul KV CLI 'GET' flags 'keys' and 'recurse' to be set together (#13493) allow flags -recurse and -keys to be run at the same time in consul kv get CLI --- .changelog/13493.txt | 3 ++ command/kv/get/kv_get.go | 36 +++++++++++-- command/kv/get/kv_get_test.go | 99 +++++++++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 5 deletions(-) create mode 100644 .changelog/13493.txt diff --git a/.changelog/13493.txt b/.changelog/13493.txt new file mode 100644 index 000000000..9c3eec605 --- /dev/null +++ b/.changelog/13493.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fix Consul kv CLI 'GET' flags 'keys' and 'recurse' to be set together +``` diff --git a/command/kv/get/kv_get.go b/command/kv/get/kv_get.go index 099aedb9f..aa93ef963 100644 --- a/command/kv/get/kv_get.go +++ b/command/kv/get/kv_get.go @@ -99,6 +99,32 @@ func (c *cmd) Run(args []string) int { } switch { + case c.keys && c.recurse: + pairs, _, err := client.KV().List(key, &api.QueryOptions{ + AllowStale: c.http.Stale(), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error querying Consul agent: %s", err)) + return 1 + } + + for i, pair := range pairs { + if c.detailed { + var b bytes.Buffer + if err := prettyKVPair(&b, pair, false, true); err != nil { + c.UI.Error(fmt.Sprintf("Error rendering KV key: %s", err)) + return 1 + } + c.UI.Info(b.String()) + + if i < len(pairs)-1 { + c.UI.Info("") + } + } else { + c.UI.Info(fmt.Sprintf("%s", pair.Key)) + } + } + return 0 case c.keys: keys, _, err := client.KV().Keys(key, c.separator, &api.QueryOptions{ AllowStale: c.http.Stale(), @@ -125,7 +151,7 @@ func (c *cmd) Run(args []string) int { for i, pair := range pairs { if c.detailed { var b bytes.Buffer - if err := prettyKVPair(&b, pair, c.base64encode); err != nil { + if err := prettyKVPair(&b, pair, c.base64encode, false); err != nil { c.UI.Error(fmt.Sprintf("Error rendering KV pair: %s", err)) return 1 } @@ -161,7 +187,7 @@ func (c *cmd) Run(args []string) int { if c.detailed { var b bytes.Buffer - if err := prettyKVPair(&b, pair, c.base64encode); err != nil { + if err := prettyKVPair(&b, pair, c.base64encode, false); err != nil { c.UI.Error(fmt.Sprintf("Error rendering KV pair: %s", err)) return 1 } @@ -187,7 +213,7 @@ func (c *cmd) Help() string { return c.help } -func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool) error { +func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool, keysOnly bool) error { tw := tabwriter.NewWriter(w, 0, 2, 6, ' ', 0) fmt.Fprintf(tw, "CreateIndex\t%d\n", pair.CreateIndex) fmt.Fprintf(tw, "Flags\t%d\n", pair.Flags) @@ -205,9 +231,9 @@ func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool) error { if pair.Namespace != "" { fmt.Fprintf(tw, "Namespace\t%s\n", pair.Namespace) } - if base64EncodeValue { + if !keysOnly && base64EncodeValue { fmt.Fprintf(tw, "Value\t%s", base64.StdEncoding.EncodeToString(pair.Value)) - } else { + } else if !keysOnly { fmt.Fprintf(tw, "Value\t%s", pair.Value) } return tw.Flush() diff --git a/command/kv/get/kv_get_test.go b/command/kv/get/kv_get_test.go index 3a7b12d8a..5143391ef 100644 --- a/command/kv/get/kv_get_test.go +++ b/command/kv/get/kv_get_test.go @@ -418,3 +418,102 @@ func TestKVGetCommand_DetailedBase64(t *testing.T) { t.Fatalf("bad %#v, value is not base64 encoded", output) } } + +func TestKVGetCommand_KeysRecurse(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := agent.NewTestAgent(t, ``) + defer a.Shutdown() + client := a.Client() + + ui := cli.NewMockUi() + c := New(ui) + keys := map[string]string{ + "foo/": "", + "foo/a": "Hello World 2", + "foo1/a": "Hello World 1", + } + for k, v := range keys { + var pair *api.KVPair + switch v { + case "": + pair = &api.KVPair{Key: k, Value: nil} + default: + pair = &api.KVPair{Key: k, Value: []byte(v)} + } + if _, err := client.KV().Put(pair, nil); err != nil { + t.Fatalf("err: %#v", err) + } + } + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-recurse", + "-keys", + "foo", + } + + code := c.Run(args) + if code != 0 { + t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) + } + output := ui.OutputWriter.String() + for key, value := range keys { + if !strings.Contains(output, key) { + t.Fatalf("bad %#v missing %q", output, key) + } + if strings.Contains(output, key+":"+value) { + t.Fatalf("bad %#v expected no values for keys %q but received %q", output, key, value) + } + } +} +func TestKVGetCommand_DetailedKeysRecurse(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := agent.NewTestAgent(t, ``) + defer a.Shutdown() + client := a.Client() + + ui := cli.NewMockUi() + c := New(ui) + keys := map[string]string{ + "foo/": "", + "foo/a": "Hello World 2", + "foo1/a": "Hello World 1", + } + for k, v := range keys { + var pair *api.KVPair + switch v { + case "": + pair = &api.KVPair{Key: k, Value: nil} + default: + pair = &api.KVPair{Key: k, Value: []byte(v)} + } + if _, err := client.KV().Put(pair, nil); err != nil { + t.Fatalf("err: %#v", err) + } + } + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-recurse", + "-keys", + "-detailed", + "foo", + } + + code := c.Run(args) + if code != 0 { + t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) + } + output := ui.OutputWriter.String() + for key, value := range keys { + if value != "" && strings.Contains(output, value) { + t.Fatalf("bad %#v expected no values for keys %q but received %q", output, key, value) + } + } +} From f64af3be2423da5260a65ff552dcf931d73f5809 Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Thu, 25 Aug 2022 16:32:59 -0700 Subject: [PATCH 103/104] peering: add peer health metric (#14004) Signed-off-by: acpana <8968914+acpana@users.noreply.github.com> --- agent/consul/leader_peering.go | 36 ++++-- agent/consul/leader_peering_test.go | 32 +++++ agent/consul/server.go | 1 + .../services/peerstream/server.go | 8 +- .../services/peerstream/stream_resources.go | 5 +- .../services/peerstream/stream_test.go | 52 +++++--- .../services/peerstream/stream_tracker.go | 62 +++++++++- .../peerstream/stream_tracker_test.go | 113 ++++++++++++++++-- 8 files changed, 262 insertions(+), 47 deletions(-) diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index d1823b026..00128bcd8 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -31,11 +31,18 @@ import ( ) var leaderExportedServicesCountKey = []string{"consul", "peering", "exported_services"} +var leaderHealthyPeeringKey = []string{"consul", "peering", "healthy"} var LeaderPeeringMetrics = []prometheus.GaugeDefinition{ { Name: leaderExportedServicesCountKey, Help: "A gauge that tracks how many services are exported for the peering. " + - "The labels are \"peering\" and, for enterprise, \"partition\". " + + "The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " + + "We emit this metric every 9 seconds", + }, + { + Name: leaderHealthyPeeringKey, + Help: "A gauge that tracks how if a peering is healthy (1) or not (0). " + + "The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " + "We emit this metric every 9 seconds", }, } @@ -85,13 +92,6 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric } for _, peer := range peers { - status, found := s.peerStreamServer.StreamStatus(peer.ID) - if !found { - logger.Trace("did not find status for", "peer_name", peer.Name) - continue - } - - esc := status.GetExportedServicesCount() part := peer.Partition labels := []metrics.Label{ {Name: "peer_name", Value: peer.Name}, @@ -101,7 +101,25 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric labels = append(labels, metrics.Label{Name: "partition", Value: part}) } - metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels) + status, found := s.peerStreamServer.StreamStatus(peer.ID) + if found { + // exported services count metric + esc := status.GetExportedServicesCount() + metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels) + } + + // peering health metric + if status.NeverConnected { + metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels) + } else { + healthy := status.IsHealthy() + healthyInt := 0 + if healthy { + healthyInt = 1 + } + + metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(healthyInt), labels) + } } return nil diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index 46a74b6ad..d41930385 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io/ioutil" + "math" "testing" "time" @@ -974,6 +975,7 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { var ( s2PeerID1 = generateUUID() s2PeerID2 = generateUUID() + s2PeerID3 = generateUUID() testContextTimeout = 60 * time.Second lastIdx = uint64(0) ) @@ -1063,6 +1065,24 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { // mimic tracking exported services mst2.TrackExportedService(structs.ServiceName{Name: "d-service"}) mst2.TrackExportedService(structs.ServiceName{Name: "e-service"}) + + // pretend that the hearbeat happened + mst2.TrackRecvHeartbeat() + } + + // Simulate a peering that never connects + { + p3 := &pbpeering.Peering{ + ID: s2PeerID3, + Name: "my-peer-s4", + PeerID: token.PeerID, // doesn't much matter what these values are + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p3.ShouldDial()) + lastIdx++ + require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: p3})) } // set up a metrics sink @@ -1092,6 +1112,18 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric2)) require.Equal(r, float32(2), metric2.Value) // for d, e services + + keyHealthyMetric2 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s3;peer_id=%s", s2PeerID2) + healthyMetric2, ok := intv.Gauges[keyHealthyMetric2] + require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric2)) + + require.Equal(r, float32(1), healthyMetric2.Value) + + keyHealthyMetric3 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s4;peer_id=%s", s2PeerID3) + healthyMetric3, ok := intv.Gauges[keyHealthyMetric3] + require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric3)) + + require.True(r, math.IsNaN(float64(healthyMetric3.Value))) }) } diff --git a/agent/consul/server.go b/agent/consul/server.go index 1afa74c91..8f2986c3e 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -742,6 +742,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser return s.ForwardGRPC(s.grpcConnPool, info, fn) }, }) + s.peerStreamTracker.SetHeartbeatTimeout(s.peerStreamServer.Config.IncomingHeartbeatTimeout) s.peerStreamServer.Register(s.externalGRPCServer) // Initialize internal gRPC server. diff --git a/agent/grpc-external/services/peerstream/server.go b/agent/grpc-external/services/peerstream/server.go index 7254c60c7..6568d7bf8 100644 --- a/agent/grpc-external/services/peerstream/server.go +++ b/agent/grpc-external/services/peerstream/server.go @@ -42,8 +42,8 @@ type Config struct { // outgoingHeartbeatInterval is how often we send a heartbeat. outgoingHeartbeatInterval time.Duration - // incomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection. - incomingHeartbeatTimeout time.Duration + // IncomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection. + IncomingHeartbeatTimeout time.Duration } //go:generate mockery --name ACLResolver --inpackage @@ -63,8 +63,8 @@ func NewServer(cfg Config) *Server { if cfg.outgoingHeartbeatInterval == 0 { cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval } - if cfg.incomingHeartbeatTimeout == 0 { - cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout + if cfg.IncomingHeartbeatTimeout == 0 { + cfg.IncomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout } return &Server{ Config: cfg, diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index 657972b88..0e6b28f45 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -406,7 +406,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { // incomingHeartbeatCtx will complete if incoming heartbeats time out. incomingHeartbeatCtx, incomingHeartbeatCtxCancel := - context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout) + context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout) // NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're // re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned // value, not the current value. @@ -605,7 +605,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { // They just can't trace the execution properly for some reason (possibly golang/go#29587). //nolint:govet incomingHeartbeatCtx, incomingHeartbeatCtxCancel = - context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout) + context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout) } case update := <-subCh: @@ -642,6 +642,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { if err := streamSend(replResp); err != nil { return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err) } + status.TrackSendSuccess() } } } diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index 49ba7be04..be4a44ec8 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -572,7 +572,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }) }) - var lastSendSuccess time.Time + var lastSendAck, lastSendSuccess time.Time testutil.RunStep(t, "ack tracked as success", func(t *testing.T) { ack := &pbpeerstream.ReplicationMessage{ @@ -587,19 +587,22 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }, } - lastSendSuccess = it.FutureNow(1) + lastSendAck = time.Date(2000, time.January, 1, 0, 0, 2, 0, time.UTC) + lastSendSuccess = time.Date(2000, time.January, 1, 0, 0, 3, 0, time.UTC) err := client.Send(ack) require.NoError(t, err) expect := Status{ - Connected: true, - LastAck: lastSendSuccess, + Connected: true, + LastAck: lastSendAck, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { - status, ok := srv.StreamStatus(testPeerID) + rStatus, ok := srv.StreamStatus(testPeerID) require.True(r, ok) - require.Equal(r, expect, status) + require.Equal(r, expect, rStatus) }) }) @@ -621,23 +624,26 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }, } - lastNack = it.FutureNow(1) + lastSendAck = time.Date(2000, time.January, 1, 0, 0, 4, 0, time.UTC) + lastNack = time.Date(2000, time.January, 1, 0, 0, 5, 0, time.UTC) err := client.Send(nack) require.NoError(t, err) lastNackMsg = "client peer was unable to apply resource: bad bad not good" expect := Status{ - Connected: true, - LastAck: lastSendSuccess, - LastNack: lastNack, - LastNackMessage: lastNackMsg, + Connected: true, + LastAck: lastSendAck, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { - status, ok := srv.StreamStatus(testPeerID) + rStatus, ok := srv.StreamStatus(testPeerID) require.True(r, ok) - require.Equal(r, expect, status) + require.Equal(r, expect, rStatus) }) }) @@ -694,13 +700,15 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, - LastAck: lastSendSuccess, + LastAck: lastSendAck, LastNack: lastNack, LastNackMessage: lastNackMsg, LastRecvResourceSuccess: lastRecvResourceSuccess, ImportedServices: map[string]struct{}{ api.String(): {}, }, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -753,7 +761,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, - LastAck: lastSendSuccess, + LastAck: lastSendAck, LastNack: lastNack, LastNackMessage: lastNackMsg, LastRecvResourceSuccess: lastRecvResourceSuccess, @@ -762,6 +770,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { ImportedServices: map[string]struct{}{ api.String(): {}, }, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -785,7 +795,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, - LastAck: lastSendSuccess, + LastAck: lastSendAck, LastNack: lastNack, LastNackMessage: lastNackMsg, LastRecvResourceSuccess: lastRecvResourceSuccess, @@ -795,6 +805,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { ImportedServices: map[string]struct{}{ api.String(): {}, }, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -816,7 +828,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: false, DisconnectErrorMessage: lastRecvErrorMsg, - LastAck: lastSendSuccess, + LastAck: lastSendAck, LastNack: lastNack, LastNackMessage: lastNackMsg, DisconnectTime: disconnectTime, @@ -827,6 +839,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { ImportedServices: map[string]struct{}{ api.String(): {}, }, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -1129,7 +1143,7 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) { srv, store := newTestServer(t, func(c *Config) { c.Tracker.SetClock(it.Now) - c.incomingHeartbeatTimeout = 5 * time.Millisecond + c.IncomingHeartbeatTimeout = 5 * time.Millisecond }) p := writePeeringToBeDialed(t, store, 1, "my-peer") @@ -1236,7 +1250,7 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) { srv, store := newTestServer(t, func(c *Config) { c.Tracker.SetClock(it.Now) - c.incomingHeartbeatTimeout = incomingHeartbeatTimeout + c.IncomingHeartbeatTimeout = incomingHeartbeatTimeout }) p := writePeeringToBeDialed(t, store, 1, "my-peer") diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index f7a451595..ffde98ba3 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -16,6 +16,8 @@ type Tracker struct { // timeNow is a shim for testing. timeNow func() time.Time + + heartbeatTimeout time.Duration } func NewTracker() *Tracker { @@ -33,6 +35,12 @@ func (t *Tracker) SetClock(clock func() time.Time) { } } +func (t *Tracker) SetHeartbeatTimeout(heartbeatTimeout time.Duration) { + t.mu.Lock() + defer t.mu.Unlock() + t.heartbeatTimeout = heartbeatTimeout +} + // Register a stream for a given peer but do not mark it as connected. func (t *Tracker) Register(id string) (*MutableStatus, error) { t.mu.Lock() @@ -44,7 +52,7 @@ func (t *Tracker) Register(id string) (*MutableStatus, error) { func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) { status, ok := t.streams[id] if !ok { - status = newMutableStatus(t.timeNow, initAsConnected) + status = newMutableStatus(t.timeNow, t.heartbeatTimeout, initAsConnected) t.streams[id] = status return status, true, nil } @@ -101,7 +109,9 @@ func (t *Tracker) StreamStatus(id string) (resp Status, found bool) { s, ok := t.streams[id] if !ok { - return Status{}, false + return Status{ + NeverConnected: true, + }, false } return s.GetStatus(), true } @@ -142,9 +152,14 @@ type MutableStatus struct { // Status contains information about the replication stream to a peer cluster. // TODO(peering): There's a lot of fields here... type Status struct { + heartbeatTimeout time.Duration + // Connected is true when there is an open stream for the peer. Connected bool + // NeverConnected is true for peerings that have never connected, false otherwise. + NeverConnected bool + // DisconnectErrorMessage tracks the error that caused the stream to disconnect non-gracefully. // If the stream is connected or it disconnected gracefully it will be empty. DisconnectErrorMessage string @@ -167,6 +182,9 @@ type Status struct { // LastSendErrorMessage tracks the last error message when sending into the stream. LastSendErrorMessage string + // LastSendSuccess tracks the time of the last success response sent into the stream. + LastSendSuccess time.Time + // LastRecvHeartbeat tracks when we last received a heartbeat from our peer. LastRecvHeartbeat time.Time @@ -196,10 +214,40 @@ func (s *Status) GetExportedServicesCount() uint64 { return uint64(len(s.ExportedServices)) } -func newMutableStatus(now func() time.Time, connected bool) *MutableStatus { +// IsHealthy is a convenience func that returns true/ false for a peering status. +// We define a peering as unhealthy if its status satisfies one of the following: +// - If heartbeat hasn't been received within the IncomingHeartbeatTimeout +// - If the last sent error is newer than last sent success +// - If the last received error is newer than last received success +// If none of these conditions apply, we call the peering healthy. +func (s *Status) IsHealthy() bool { + if time.Now().Sub(s.LastRecvHeartbeat) > s.heartbeatTimeout { + // 1. If heartbeat hasn't been received for a while - report unhealthy + return false + } + + if s.LastSendError.After(s.LastSendSuccess) { + // 2. If last sent error is newer than last sent success - report unhealthy + return false + } + + if s.LastRecvError.After(s.LastRecvResourceSuccess) { + // 3. If last recv error is newer than last recv success - report unhealthy + return false + } + + return true +} + +func newMutableStatus(now func() time.Time, heartbeatTimeout time.Duration, connected bool) *MutableStatus { + if heartbeatTimeout.Microseconds() == 0 { + heartbeatTimeout = defaultIncomingHeartbeatTimeout + } return &MutableStatus{ Status: Status{ - Connected: connected, + Connected: connected, + heartbeatTimeout: heartbeatTimeout, + NeverConnected: !connected, }, timeNow: now, doneCh: make(chan struct{}), @@ -223,6 +271,12 @@ func (s *MutableStatus) TrackSendError(error string) { s.mu.Unlock() } +func (s *MutableStatus) TrackSendSuccess() { + s.mu.Lock() + s.LastSendSuccess = s.timeNow().UTC() + s.mu.Unlock() +} + // TrackRecvResourceSuccess tracks receiving a replicated resource. func (s *MutableStatus) TrackRecvResourceSuccess() { s.mu.Lock() diff --git a/agent/grpc-external/services/peerstream/stream_tracker_test.go b/agent/grpc-external/services/peerstream/stream_tracker_test.go index f7a9df321..8cdcbc79a 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker_test.go +++ b/agent/grpc-external/services/peerstream/stream_tracker_test.go @@ -10,6 +10,97 @@ import ( "github.com/hashicorp/consul/sdk/testutil" ) +const ( + aPeerID = "63b60245-c475-426b-b314-4588d210859d" +) + +func TestStatus_IsHealthy(t *testing.T) { + type testcase struct { + name string + dontConnect bool + modifierFunc func(status *MutableStatus) + expectedVal bool + heartbeatTimeout time.Duration + } + + tcs := []testcase{ + { + name: "never connected, unhealthy", + expectedVal: false, + dontConnect: true, + }, + { + name: "no heartbeat, unhealthy", + expectedVal: false, + }, + { + name: "heartbeat is not received, unhealthy", + expectedVal: false, + modifierFunc: func(status *MutableStatus) { + // set heartbeat + status.LastRecvHeartbeat = time.Now().Add(-1 * time.Second) + }, + heartbeatTimeout: 1 * time.Second, + }, + { + name: "send error before send success", + expectedVal: false, + modifierFunc: func(status *MutableStatus) { + // set heartbeat + status.LastRecvHeartbeat = time.Now() + + status.LastSendSuccess = time.Now() + status.LastSendError = time.Now() + }, + }, + { + name: "received error before received success", + expectedVal: false, + modifierFunc: func(status *MutableStatus) { + // set heartbeat + status.LastRecvHeartbeat = time.Now() + + status.LastRecvResourceSuccess = time.Now() + status.LastRecvError = time.Now() + }, + }, + { + name: "healthy", + expectedVal: true, + modifierFunc: func(status *MutableStatus) { + // set heartbeat + status.LastRecvHeartbeat = time.Now() + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + tracker := NewTracker() + if tc.heartbeatTimeout.Microseconds() != 0 { + tracker.SetHeartbeatTimeout(tc.heartbeatTimeout) + } + + if !tc.dontConnect { + st, err := tracker.Connected(aPeerID) + require.NoError(t, err) + require.True(t, st.Connected) + + if tc.modifierFunc != nil { + tc.modifierFunc(st) + } + + require.Equal(t, tc.expectedVal, st.IsHealthy()) + + } else { + st, found := tracker.StreamStatus(aPeerID) + require.False(t, found) + require.Equal(t, tc.expectedVal, st.IsHealthy()) + } + }) + } +} + func TestTracker_EnsureConnectedDisconnected(t *testing.T) { tracker := NewTracker() peerID := "63b60245-c475-426b-b314-4588d210859d" @@ -29,7 +120,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { require.NoError(t, err) expect := Status{ - Connected: true, + Connected: true, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, } status, ok := tracker.StreamStatus(peerID) @@ -55,8 +147,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() expect := Status{ - Connected: true, - LastAck: lastSuccess, + Connected: true, + LastAck: lastSuccess, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, } require.Equal(t, expect, status) }) @@ -66,9 +159,10 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { sequence++ expect := Status{ - Connected: false, - DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(), - LastAck: lastSuccess, + Connected: false, + DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(), + LastAck: lastSuccess, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, } status, ok := tracker.StreamStatus(peerID) require.True(t, ok) @@ -80,8 +174,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { require.NoError(t, err) expect := Status{ - Connected: true, - LastAck: lastSuccess, + Connected: true, + LastAck: lastSuccess, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, // DisconnectTime gets cleared on re-connect. } @@ -96,7 +191,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { status, ok := tracker.StreamStatus(peerID) require.False(t, ok) - require.Zero(t, status) + require.Equal(t, Status{NeverConnected: true}, status) }) } From dc25f0deea7c4a7430552ffde79014f45070f497 Mon Sep 17 00:00:00 2001 From: Tu Nguyen Date: Thu, 25 Aug 2022 20:25:35 -0700 Subject: [PATCH 104/104] Update redirects --- website/redirects.js | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/website/redirects.js b/website/redirects.js index a4b3f272b..ff851092c 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -1288,4 +1288,24 @@ module.exports = [ destination: '/docs/api-gateway/upgrades', permanent: true, }, + { + source: '/docs/api-gateway/upgrade-specific-versions', + destination: '/docs/api-gateway/upgrades', + permanent: true, + }, + { + source: '/docs/intro/usecases/what-is-service-discovery', + destination: '/docs/concepts/service-discovery', + permanent: true, + }, + { + source: '/docs/intro/usecases/what-is-a-service-mesh', + destination: '/docs/concepts/service-mesh', + permanent: true, + }, + { + source: '/docs/download-tools', + destination: '/docs/integrate/download-tools', + permanent: true, + } ]