xds: tcp services using the discovery chain should not assume RDS during LDS (#6623)

Previously the logic for configuring RDS during LDS for L7 upstreams was
overapplied to TCP proxies resulting in a cluster name of <emptystring>
being used incorrectly.

Fixes #6621
This commit is contained in:
R.B. Boyer 2019-10-17 16:44:59 -05:00 committed by GitHub
parent 2b16f176b3
commit 1ab04a8b6a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 111 additions and 5 deletions

View File

@ -577,8 +577,23 @@ func (s *Server) makeUpstreamListenerForDiscoveryChain(
proto = "tcp"
}
useRDS := true
clusterName := ""
if proto == "tcp" {
startNode := chain.Nodes[chain.StartNode]
if startNode == nil {
panic("missing first node in compiled discovery chain for: " + chain.ServiceName)
} else if startNode.Type != structs.DiscoveryGraphNodeTypeResolver {
panic(fmt.Sprintf("unexpected first node in discovery chain using protocol=%q: %s", proto, startNode.Type))
}
targetID := startNode.Resolver.Target
target := chain.Targets[targetID]
clusterName = CustomizeClusterName(target.Name, chain)
useRDS = false
}
filter, err := makeListenerFilter(
true, proto, upstreamID, "", "upstream_", "", false)
useRDS, proto, upstreamID, clusterName, "upstream_", "", false)
if err != nil {
return nil, err
}
@ -607,6 +622,11 @@ func makeListenerFilter(
case "tcp":
fallthrough
default:
if useRDS {
return envoylistener.Filter{}, fmt.Errorf("RDS is not compatible with the tcp proxy filter")
} else if cluster == "" {
return envoylistener.Filter{}, fmt.Errorf("cluster name is required for a tcp proxy filter")
}
return makeTCPProxyFilter(filterName, cluster, statPrefix)
}
}

View File

@ -16,7 +16,7 @@
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "",
"cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"stat_prefix": "upstream_db_tcp"
}
}

View File

@ -16,7 +16,7 @@
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "",
"cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"stat_prefix": "upstream_db_tcp"
}
}

View File

@ -16,7 +16,7 @@
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "",
"cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"stat_prefix": "upstream_db_tcp"
}
}

View File

@ -16,7 +16,7 @@
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "",
"cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"stat_prefix": "upstream_db_tcp"
}
}

View File

@ -0,0 +1,21 @@
enable_central_service_config = true
config_entries {
bootstrap {
kind = "proxy-defaults"
name = "global"
config {
protocol = "tcp"
}
}
bootstrap {
kind = "service-resolver"
name = "s2"
redirect {
service = "s3"
}
}
}

View File

@ -0,0 +1,5 @@
services {
name = "s3"
port = 8282
connect { sidecar_service {} }
}

View File

@ -0,0 +1,11 @@
#!/bin/bash
set -eEuo pipefail
# wait for bootstrap to apply config entries
wait_for_config_entry proxy-defaults global
wait_for_config_entry service-resolver s2
gen_envoy_bootstrap s1 19000 primary
gen_envoy_bootstrap s2 19001 primary
gen_envoy_bootstrap s3 19002 primary

View File

@ -0,0 +1,3 @@
#!/bin/bash
export REQUIRED_SERVICES="$DEFAULT_REQUIRED_SERVICES s3 s3-sidecar-proxy"

View File

@ -0,0 +1,46 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s3 proxy admin is up on :19002" {
retry_default curl -f -s localhost:19002/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}
@test "s3 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21002 s3
}
@test "s3 proxy should be healthy" {
assert_service_has_healthy_instances s3 1
}
@test "s1 upstream should have healthy endpoints for s3" {
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s3.default.primary HEALTHY 1
}
@test "s1 upstream should be able to connect to its upstream simply" {
run retry_default curl -s -f -d hello localhost:5000
[ "$status" -eq 0 ]
[ "$output" = "hello" ]
}
@test "s1 upstream should be able to connect to s3 via upstream s2" {
assert_expected_fortio_name s3
}