From e060748d3f2de28312e8f3e7578614acaf6fe38f Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Tue, 23 Jul 2019 20:08:36 -0500 Subject: [PATCH] tests: adding new envoy integration tests for L7 service-resolvers (#6129) Additionally: - wait for bootstrap config entries to be applied - run the verify container in the host's PID namespace so we can kill envoys without mounting the docker socket * assert that we actually send HEALTHY and UNHEALTHY endpoints down in EDS during failover --- .../connect/envoy/case-badauthz/setup.sh | 2 +- .../connect/envoy/case-badauthz/verify.bats | 2 +- .../connect/envoy/case-basic/verify.bats | 2 +- .../connect/envoy/case-centralconf/setup.sh | 12 +- .../envoy/case-centralconf/verify.bats | 2 +- .../config_entries.hcl | 28 ++++ .../case-cfg-resolver-defaultsubset/s2-v1.hcl | 13 ++ .../case-cfg-resolver-defaultsubset/s2-v2.hcl | 13 ++ .../case-cfg-resolver-defaultsubset/setup.sh | 20 +++ .../verify.bats | 48 ++++++ .../config_entries.hcl | 35 ++++ .../s3-v1.hcl | 13 ++ .../s3-v2.hcl | 13 ++ .../case-cfg-resolver-subset-redirect/s3.hcl | 8 + .../setup.sh | 23 +++ .../verify.bats | 56 +++++++ .../config_entries.hcl | 39 +++++ .../case-cfg-resolver-svc-failover/s3-v1.hcl | 13 ++ .../case-cfg-resolver-svc-failover/s3-v2.hcl | 13 ++ .../case-cfg-resolver-svc-failover/s3.hcl | 8 + .../case-cfg-resolver-svc-failover/setup.sh | 23 +++ .../verify.bats | 75 +++++++++ .../case-cfg-resolver-svc-redirect/setup.sh | 17 +- .../verify.bats | 2 +- .../envoy/case-dogstatsd-udp/verify.bats | 2 +- .../connect/envoy/case-grpc/verify.bats | 2 +- .../envoy/case-http-badauthz/verify.bats | 2 +- .../connect/envoy/case-http/verify.bats | 2 +- .../connect/envoy/case-http2/verify.bats | 2 +- .../connect/envoy/case-prometheus/verify.bats | 2 +- .../connect/envoy/case-statsd-udp/verify.bats | 2 +- .../connect/envoy/case-zipkin/verify.bats | 2 +- .../connect/envoy/docker-compose.yml | 149 ++++++++++++++++++ test/integration/connect/envoy/helpers.bash | 61 +++++-- 34 files changed, 673 insertions(+), 33 deletions(-) create mode 100644 test/integration/connect/envoy/case-cfg-resolver-defaultsubset/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-defaultsubset/s2-v1.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-defaultsubset/s2-v2.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-defaultsubset/setup.sh create mode 100644 test/integration/connect/envoy/case-cfg-resolver-defaultsubset/verify.bats create mode 100644 test/integration/connect/envoy/case-cfg-resolver-subset-redirect/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3-v1.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3-v2.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-subset-redirect/setup.sh create mode 100644 test/integration/connect/envoy/case-cfg-resolver-subset-redirect/verify.bats create mode 100644 test/integration/connect/envoy/case-cfg-resolver-svc-failover/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3-v1.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3-v2.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-svc-failover/setup.sh create mode 100644 test/integration/connect/envoy/case-cfg-resolver-svc-failover/verify.bats diff --git a/test/integration/connect/envoy/case-badauthz/setup.sh b/test/integration/connect/envoy/case-badauthz/setup.sh index daa2b19e0..a811fe4bd 100644 --- a/test/integration/connect/envoy/case-badauthz/setup.sh +++ b/test/integration/connect/envoy/case-badauthz/setup.sh @@ -8,4 +8,4 @@ docker_consul intention create -deny s1 s2 gen_envoy_bootstrap s1 19000 gen_envoy_bootstrap s2 19001 -export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy" \ No newline at end of file +export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy" diff --git a/test/integration/connect/envoy/case-badauthz/verify.bats b/test/integration/connect/envoy/case-badauthz/verify.bats index b47987558..62b3e489c 100644 --- a/test/integration/connect/envoy/case-badauthz/verify.bats +++ b/test/integration/connect/envoy/case-badauthz/verify.bats @@ -23,7 +23,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should NOT be able to connect to s2" { diff --git a/test/integration/connect/envoy/case-basic/verify.bats b/test/integration/connect/envoy/case-basic/verify.bats index 5c41a62bf..d357ef5e9 100644 --- a/test/integration/connect/envoy/case-basic/verify.bats +++ b/test/integration/connect/envoy/case-basic/verify.bats @@ -27,7 +27,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should be able to connect to s2" { diff --git a/test/integration/connect/envoy/case-centralconf/setup.sh b/test/integration/connect/envoy/case-centralconf/setup.sh index 12fa68c1d..378a70d03 100644 --- a/test/integration/connect/envoy/case-centralconf/setup.sh +++ b/test/integration/connect/envoy/case-centralconf/setup.sh @@ -2,8 +2,12 @@ set -euo pipefail -# retry because resolving the central config might race -retry_default gen_envoy_bootstrap s1 19000 -retry_default gen_envoy_bootstrap s2 19001 +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-defaults s1 +wait_for_config_entry service-defaults s2 -export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy" \ No newline at end of file +gen_envoy_bootstrap s1 19000 +gen_envoy_bootstrap s2 19001 + +export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy" diff --git a/test/integration/connect/envoy/case-centralconf/verify.bats b/test/integration/connect/envoy/case-centralconf/verify.bats index 0c414afb0..b8ca7afff 100644 --- a/test/integration/connect/envoy/case-centralconf/verify.bats +++ b/test/integration/connect/envoy/case-centralconf/verify.bats @@ -23,7 +23,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should be able to connect to s2 with http/1.1" { diff --git a/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/config_entries.hcl new file mode 100644 index 000000000..e61e6bda9 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/config_entries.hcl @@ -0,0 +1,28 @@ +enable_central_service_config = true + +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + default_subset = "v2" + + subsets = { + "v1" = { + filter = "Service.Meta.version == v1" + } + + "v2" = { + filter = "Service.Meta.version == v2" + } + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/s2-v1.hcl b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/s2-v1.hcl new file mode 100644 index 000000000..dd84f5376 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/s2-v1.hcl @@ -0,0 +1,13 @@ +services { + id = "s2-v1" + name = "s2" + port = 8182 + + meta { + version = "v1" + } + + connect { + sidecar_service {} + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/s2-v2.hcl b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/s2-v2.hcl new file mode 100644 index 000000000..b15ca6c4e --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/s2-v2.hcl @@ -0,0 +1,13 @@ +services { + id = "s2-v2" + name = "s2" + port = 8183 + + meta { + version = "v2" + } + + connect { + sidecar_service {} + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/setup.sh new file mode 100644 index 000000000..de5693e29 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/setup.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -euo pipefail + +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 + +# s2 is retained just to have a honeypot for bad envoy configs to route into +gen_envoy_bootstrap s1 19000 +gen_envoy_bootstrap s2-v1 19001 +gen_envoy_bootstrap s2-v2 19002 +gen_envoy_bootstrap s2 19003 + +export REQUIRED_SERVICES=" +s1 s1-sidecar-proxy +s2 s2-sidecar-proxy +s2-v1 s2-v1-sidecar-proxy +s2-v2 s2-v2-sidecar-proxy +" diff --git a/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/verify.bats b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/verify.bats new file mode 100644 index 000000000..066112889 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-defaultsubset/verify.bats @@ -0,0 +1,48 @@ +#!/usr/bin/env bats + +load helpers + +@test "s1 proxy admin is up on :19000" { + retry_default curl -f -s localhost:19000/stats -o /dev/null +} + +@test "s2-v1 proxy admin is up on :19001" { + retry_default curl -f -s localhost:19001/stats -o /dev/null +} + +@test "s2-v2 proxy admin is up on :19002" { + retry_default curl -f -s localhost:19002/stats -o /dev/null +} + +@test "s2 proxy admin is up on :19003" { + retry_default curl -f -s localhost:19003/stats -o /dev/null +} + +@test "s1 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21000 s1 +} + +@test "s2-v1 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21001 s2 +} + +@test "s2-v2 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21002 s2 +} + +@test "s2 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21003 s2 +} + +@test "s2 proxies should be healthy" { + assert_service_has_healthy_instances s2 3 +} + +@test "s1 upstream should have healthy endpoints for v2.s2" { + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 v2.s2 HEALTHY 1 +} + +@test "s1 upstream should be able to connect to s2-v2 via upstream s2" { + assert_expected_fortio_name s2-v2 +} + diff --git a/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/config_entries.hcl new file mode 100644 index 000000000..d7226df6e --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/config_entries.hcl @@ -0,0 +1,35 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s3" + + subsets = { + "v1" = { + filter = "Service.Meta.version == v1" + } + + "v2" = { + filter = "Service.Meta.version == v2" + } + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + redirect { + service = "s3" + service_subset = "v2" + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3-v1.hcl b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3-v1.hcl new file mode 100644 index 000000000..5b0b95677 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3-v1.hcl @@ -0,0 +1,13 @@ +services { + id = "s3-v1" + name = "s3" + port = 8283 + + meta { + version = "v1" + } + + connect { + sidecar_service {} + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3-v2.hcl b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3-v2.hcl new file mode 100644 index 000000000..9a5590736 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3-v2.hcl @@ -0,0 +1,13 @@ +services { + id = "s3-v2" + name = "s3" + port = 8284 + + meta { + version = "v2" + } + + connect { + sidecar_service {} + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3.hcl b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3.hcl new file mode 100644 index 000000000..f8f7150ea --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/s3.hcl @@ -0,0 +1,8 @@ +services { + name = "s3" + port = 8282 + + connect { + sidecar_service {} + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/setup.sh new file mode 100644 index 000000000..ca085250c --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/setup.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +set -euo pipefail + +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 +wait_for_config_entry service-resolver s3 + +# s2, s3, and s3-v1 are retained just to have a honeypot for bad envoy configs to route into +gen_envoy_bootstrap s1 19000 +gen_envoy_bootstrap s2 19001 +gen_envoy_bootstrap s3-v1 19002 +gen_envoy_bootstrap s3-v2 19003 +gen_envoy_bootstrap s3 19004 + +export REQUIRED_SERVICES=" +s1 s1-sidecar-proxy +s2 s2-sidecar-proxy +s3 s3-sidecar-proxy +s3-v1 s3-v1-sidecar-proxy +s3-v2 s3-v2-sidecar-proxy +" diff --git a/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/verify.bats b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/verify.bats new file mode 100644 index 000000000..c4423bf7c --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-subset-redirect/verify.bats @@ -0,0 +1,56 @@ +#!/usr/bin/env bats + +load helpers + +@test "s1 proxy admin is up on :19000" { + retry_default curl -f -s localhost:19000/stats -o /dev/null +} + +@test "s2 proxy admin is up on :19001" { + retry_default curl -f -s localhost:19001/stats -o /dev/null +} + +@test "s3-v1 proxy admin is up on :19002" { + retry_default curl -f -s localhost:19002/stats -o /dev/null +} + +@test "s3-v2 proxy admin is up on :19003" { + retry_default curl -f -s localhost:19003/stats -o /dev/null +} + +@test "s3 proxy admin is up on :19004" { + retry_default curl -f -s localhost:19004/stats -o /dev/null +} + +@test "s1 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21000 s1 +} + +@test "s2 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21001 s2 +} + +@test "s3-v1 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21002 s3 +} + +@test "s3-v2 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21002 s3 +} + +@test "s3 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21003 s3 +} + +@test "s3 proxies should be healthy" { + assert_service_has_healthy_instances s3 3 +} + +@test "s1 upstream should have healthy endpoints for v2.s3" { + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 v2.s3 HEALTHY 1 +} + +@test "s1 upstream should be able to connect to s3-v2 via upstream s2" { + assert_expected_fortio_name s3-v2 +} + diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-failover/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/config_entries.hcl new file mode 100644 index 000000000..f67283a45 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/config_entries.hcl @@ -0,0 +1,39 @@ +enable_central_service_config = true + +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "http" + } + } + + bootstrap { + kind = "service-resolver" + name = "s3" + + subsets = { + "v1" = { + filter = "Service.Meta.version == v1" + } + + "v2" = { + filter = "Service.Meta.version == v2" + } + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + failover = { + "*" = { + service = "s3" + service_subset = "v1" + } + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3-v1.hcl b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3-v1.hcl new file mode 100644 index 000000000..5b0b95677 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3-v1.hcl @@ -0,0 +1,13 @@ +services { + id = "s3-v1" + name = "s3" + port = 8283 + + meta { + version = "v1" + } + + connect { + sidecar_service {} + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3-v2.hcl b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3-v2.hcl new file mode 100644 index 000000000..9a5590736 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3-v2.hcl @@ -0,0 +1,13 @@ +services { + id = "s3-v2" + name = "s3" + port = 8284 + + meta { + version = "v2" + } + + connect { + sidecar_service {} + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3.hcl b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3.hcl new file mode 100644 index 000000000..f8f7150ea --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/s3.hcl @@ -0,0 +1,8 @@ +services { + name = "s3" + port = 8282 + + connect { + sidecar_service {} + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-failover/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/setup.sh new file mode 100644 index 000000000..ca085250c --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/setup.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +set -euo pipefail + +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 +wait_for_config_entry service-resolver s3 + +# s2, s3, and s3-v1 are retained just to have a honeypot for bad envoy configs to route into +gen_envoy_bootstrap s1 19000 +gen_envoy_bootstrap s2 19001 +gen_envoy_bootstrap s3-v1 19002 +gen_envoy_bootstrap s3-v2 19003 +gen_envoy_bootstrap s3 19004 + +export REQUIRED_SERVICES=" +s1 s1-sidecar-proxy +s2 s2-sidecar-proxy +s3 s3-sidecar-proxy +s3-v1 s3-v1-sidecar-proxy +s3-v2 s3-v2-sidecar-proxy +" diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-failover/verify.bats b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/verify.bats new file mode 100644 index 000000000..d37899c06 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-failover/verify.bats @@ -0,0 +1,75 @@ +#!/usr/bin/env bats + +load helpers + +@test "s1 proxy admin is up on :19000" { + retry_default curl -f -s localhost:19000/stats -o /dev/null +} + +@test "s2 proxy admin is up on :19001" { + retry_default curl -f -s localhost:19001/stats -o /dev/null +} + +@test "s3-v1 proxy admin is up on :19002" { + retry_default curl -f -s localhost:19002/stats -o /dev/null +} + +@test "s3-v2 proxy admin is up on :19003" { + retry_default curl -f -s localhost:19003/stats -o /dev/null +} + +@test "s3 proxy admin is up on :19004" { + retry_default curl -f -s localhost:19004/stats -o /dev/null +} + +@test "s1 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21000 s1 +} + +@test "s2 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21001 s2 +} + +@test "s3-v1 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21002 s3 +} + +@test "s3-v2 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21003 s3 +} + +@test "s3 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21004 s3 +} + +@test "s2 proxies should be healthy" { + assert_service_has_healthy_instances s2 1 +} + +@test "s3 proxies should be healthy" { + assert_service_has_healthy_instances s3 3 +} + +# Note: when failover is configured the cluster is named for the original +# service not any destination related to failover. +@test "s1 upstream should have healthy endpoints for s2 and s3 together" { + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 2 +} + +@test "s1 upstream should be able to connect to s2 via upstream s2 to start" { + assert_expected_fortio_name s2 +} + +@test "terminate instance of s2 envoy which should trigger failover to s3 when tcp check fails" { + kill_envoy s2 +} + +@test "s1 upstream should have healthy endpoints for s3-v1 and unhealthy endpoints for s2" { + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 UNHEALTHY 1 +} + +@test "s1 upstream should be able to connect to s3-v1 now" { + assert_expected_fortio_name s3-v1 +} + diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-redirect/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect/setup.sh index 03ba66933..1be5cec2a 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-svc-redirect/setup.sh +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect/setup.sh @@ -2,9 +2,16 @@ set -euo pipefail -# retry because resolving the central config might race -retry_default gen_envoy_bootstrap s1 19000 -retry_default gen_envoy_bootstrap s2 19001 -retry_default gen_envoy_bootstrap s3 19002 +# wait for bootstrap to apply config entries +wait_for_config_entry proxy-defaults global +wait_for_config_entry service-resolver s2 -export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy s3 s3-sidecar-proxy" +gen_envoy_bootstrap s1 19000 +gen_envoy_bootstrap s2 19001 +gen_envoy_bootstrap s3 19002 + +export REQUIRED_SERVICES=" +s1 s1-sidecar-proxy +s2 s2-sidecar-proxy +s3 s3-sidecar-proxy +" diff --git a/test/integration/connect/envoy/case-cfg-resolver-svc-redirect/verify.bats b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect/verify.bats index 260cc7f6a..c132ef809 100644 --- a/test/integration/connect/envoy/case-cfg-resolver-svc-redirect/verify.bats +++ b/test/integration/connect/envoy/case-cfg-resolver-svc-redirect/verify.bats @@ -31,7 +31,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s3" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s3 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s3 HEALTHY 1 } @test "s1 upstream should be able to connect to its upstream simply" { diff --git a/test/integration/connect/envoy/case-dogstatsd-udp/verify.bats b/test/integration/connect/envoy/case-dogstatsd-udp/verify.bats index c66a497b1..050d71f8f 100644 --- a/test/integration/connect/envoy/case-dogstatsd-udp/verify.bats +++ b/test/integration/connect/envoy/case-dogstatsd-udp/verify.bats @@ -15,7 +15,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should be able to connect to s2" { diff --git a/test/integration/connect/envoy/case-grpc/verify.bats b/test/integration/connect/envoy/case-grpc/verify.bats index ffce93c25..a6085b467 100644 --- a/test/integration/connect/envoy/case-grpc/verify.bats +++ b/test/integration/connect/envoy/case-grpc/verify.bats @@ -15,7 +15,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should be able to connect to s2 via grpc" { diff --git a/test/integration/connect/envoy/case-http-badauthz/verify.bats b/test/integration/connect/envoy/case-http-badauthz/verify.bats index a746a0814..167a6d6f5 100644 --- a/test/integration/connect/envoy/case-http-badauthz/verify.bats +++ b/test/integration/connect/envoy/case-http-badauthz/verify.bats @@ -23,7 +23,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should NOT be able to connect to s2" { diff --git a/test/integration/connect/envoy/case-http/verify.bats b/test/integration/connect/envoy/case-http/verify.bats index 00454902b..2a2010a62 100644 --- a/test/integration/connect/envoy/case-http/verify.bats +++ b/test/integration/connect/envoy/case-http/verify.bats @@ -23,7 +23,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should be able to connect to s2 with http/1.1" { diff --git a/test/integration/connect/envoy/case-http2/verify.bats b/test/integration/connect/envoy/case-http2/verify.bats index f3ee52b70..f423d2f23 100644 --- a/test/integration/connect/envoy/case-http2/verify.bats +++ b/test/integration/connect/envoy/case-http2/verify.bats @@ -23,7 +23,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should be able to connect to s2 via http2" { diff --git a/test/integration/connect/envoy/case-prometheus/verify.bats b/test/integration/connect/envoy/case-prometheus/verify.bats index 182eb439e..8ce9a9803 100644 --- a/test/integration/connect/envoy/case-prometheus/verify.bats +++ b/test/integration/connect/envoy/case-prometheus/verify.bats @@ -23,7 +23,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should be able to connect to s2 with http/1.1" { diff --git a/test/integration/connect/envoy/case-statsd-udp/verify.bats b/test/integration/connect/envoy/case-statsd-udp/verify.bats index ee678ad74..51ad6b7a5 100644 --- a/test/integration/connect/envoy/case-statsd-udp/verify.bats +++ b/test/integration/connect/envoy/case-statsd-udp/verify.bats @@ -15,7 +15,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should be able to connect to s2" { diff --git a/test/integration/connect/envoy/case-zipkin/verify.bats b/test/integration/connect/envoy/case-zipkin/verify.bats index bfc697cfd..4fa85cd78 100644 --- a/test/integration/connect/envoy/case-zipkin/verify.bats +++ b/test/integration/connect/envoy/case-zipkin/verify.bats @@ -23,7 +23,7 @@ load helpers } @test "s1 upstream should have healthy endpoints for s2" { - assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1 } @test "s1 upstream should be able to connect to s2" { diff --git a/test/integration/connect/envoy/docker-compose.yml b/test/integration/connect/envoy/docker-compose.yml index 0de581695..d54c29e3f 100644 --- a/test/integration/connect/envoy/docker-compose.yml +++ b/test/integration/connect/envoy/docker-compose.yml @@ -74,6 +74,38 @@ services: - "disabled" network_mode: service:consul + s2-v1: + depends_on: + - consul + image: "fortio/fortio" + environment: + - "FORTIO_NAME=s2-v1" + command: + - "server" + - "-http-port" + - ":8182" + - "-grpc-port" + - ":8178" + - "-redirect-port" + - "disabled" + network_mode: service:consul + + s2-v2: + depends_on: + - consul + image: "fortio/fortio" + environment: + - "FORTIO_NAME=s2-v2" + command: + - "server" + - "-http-port" + - ":8183" + - "-grpc-port" + - ":8177" + - "-redirect-port" + - "disabled" + network_mode: service:consul + s3: depends_on: - consul @@ -90,6 +122,38 @@ services: - "disabled" network_mode: service:consul + s3-v1: + depends_on: + - consul + image: "fortio/fortio" + environment: + - "FORTIO_NAME=s3-v1" + command: + - "server" + - "-http-port" + - ":8283" + - "-grpc-port" + - ":8278" + - "-redirect-port" + - "disabled" + network_mode: service:consul + + s3-v2: + depends_on: + - consul + image: "fortio/fortio" + environment: + - "FORTIO_NAME=s3-v2" + command: + - "server" + - "-http-port" + - ":8284" + - "-grpc-port" + - ":8277" + - "-redirect-port" + - "disabled" + network_mode: service:consul + s1-sidecar-proxy: depends_on: - consul @@ -132,6 +196,48 @@ services: - *workdir-volume network_mode: service:consul + s2-v1-sidecar-proxy: + depends_on: + - consul + image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}" + command: + - "envoy" + - "-c" + - "/workdir/envoy/s2-v1-bootstrap.json" + - "-l" + - "debug" + # Hot restart breaks since both envoys seem to interact with each other + # despite separate containers that don't share IPC namespace. Not quite + # sure how this happens but may be due to unix socket being in some shared + # location? + - "--disable-hot-restart" + - "--drain-time-s" + - "1" + volumes: + - *workdir-volume + network_mode: service:consul + + s2-v2-sidecar-proxy: + depends_on: + - consul + image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}" + command: + - "envoy" + - "-c" + - "/workdir/envoy/s2-v2-bootstrap.json" + - "-l" + - "debug" + # Hot restart breaks since both envoys seem to interact with each other + # despite separate containers that don't share IPC namespace. Not quite + # sure how this happens but may be due to unix socket being in some shared + # location? + - "--disable-hot-restart" + - "--drain-time-s" + - "1" + volumes: + - *workdir-volume + network_mode: service:consul + s3-sidecar-proxy: depends_on: - consul @@ -153,6 +259,48 @@ services: - *workdir-volume network_mode: service:consul + s3-v1-sidecar-proxy: + depends_on: + - consul + image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}" + command: + - "envoy" + - "-c" + - "/workdir/envoy/s3-v1-bootstrap.json" + - "-l" + - "debug" + # Hot restart breaks since both envoys seem to interact with each other + # despite separate containers that don't share IPC namespace. Not quite + # sure how this happens but may be due to unix socket being in some shared + # location? + - "--disable-hot-restart" + - "--drain-time-s" + - "1" + volumes: + - *workdir-volume + network_mode: service:consul + + s3-v2-sidecar-proxy: + depends_on: + - consul + image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}" + command: + - "envoy" + - "-c" + - "/workdir/envoy/s3-v2-bootstrap.json" + - "-l" + - "debug" + # Hot restart breaks since both envoys seem to interact with each other + # despite separate containers that don't share IPC namespace. Not quite + # sure how this happens but may be due to unix socket being in some shared + # location? + - "--disable-hot-restart" + - "--drain-time-s" + - "1" + volumes: + - *workdir-volume + network_mode: service:consul + verify: depends_on: - consul @@ -168,6 +316,7 @@ services: volumes: - *workdir-volume network_mode: service:consul + pid: host s1-sidecar-proxy-consul-exec: depends_on: diff --git a/test/integration/connect/envoy/helpers.bash b/test/integration/connect/envoy/helpers.bash index a051228d1..c1c6f368c 100755 --- a/test/integration/connect/envoy/helpers.bash +++ b/test/integration/connect/envoy/helpers.bash @@ -28,6 +28,10 @@ function retry_default { retry 5 1 $@ } +function retry_long { + retry 30 1 $@ +} + function echored { tput setaf 1 tput bold @@ -120,34 +124,38 @@ function snapshot_envoy_admin { docker_wget "http://${HOSTPORT}/clusters?format=json" -q -O - > "./workdir/envoy/${ENVOY_NAME}-clusters.json" } -function get_healthy_upstream_endpoint_count { +function get_upstream_endpoint_in_status_count { local HOSTPORT=$1 local CLUSTER_NAME=$2 + local HEALTH_STATUS=$3 run retry_default curl -s -f "http://${HOSTPORT}/clusters?format=json" [ "$status" -eq 0 ] + # echo "$output" >&3 echo "$output" | jq --raw-output " .cluster_statuses[] | select(.name|startswith(\"${CLUSTER_NAME}.default.dc1.internal.\")) -| .host_statuses[].health_status -| select(.eds_health_status == \"HEALTHY\") +| [.host_statuses[].health_status.eds_health_status] +| [select(.[] == \"${HEALTH_STATUS}\")] | length" } -function assert_upstream_has_healthy_endpoints_once { +function assert_upstream_has_endpoints_in_status_once { local HOSTPORT=$1 local CLUSTER_NAME=$2 - local EXPECT_COUNT=$3 + local HEALTH_STATUS=$3 + local EXPECT_COUNT=$4 - GOT_COUNT=$(get_healthy_upstream_endpoint_count $HOSTPORT $CLUSTER_NAME) + GOT_COUNT=$(get_upstream_endpoint_in_status_count $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS) [ "$GOT_COUNT" -eq $EXPECT_COUNT ] } -function assert_upstream_has_healthy_endpoints { +function assert_upstream_has_endpoints_in_status { local HOSTPORT=$1 local CLUSTER_NAME=$2 - local EXPECT_COUNT=$3 - run retry 30 1 assert_upstream_has_healthy_endpoints_once $HOSTPORT $CLUSTER_NAME $EXPECT_COUNT + local HEALTH_STATUS=$3 + local EXPECT_COUNT=$4 + run retry_long assert_upstream_has_endpoints_in_status_once $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS $EXPECT_COUNT [ "$status" -eq 0 ] } @@ -171,18 +179,37 @@ function assert_service_has_healthy_instances { local SERVICE_NAME=$1 local EXPECT_COUNT=$2 - run retry 30 1 assert_service_has_healthy_instances_once $SERVICE_NAME $EXPECT_COUNT + run retry_long assert_service_has_healthy_instances_once $SERVICE_NAME $EXPECT_COUNT [ "$status" -eq 0 ] } function docker_consul { - docker run -ti --rm --network container:envoy_consul_1 consul-dev $@ + docker run -i --rm --network container:envoy_consul_1 consul-dev $@ } function docker_wget { docker run -ti --rm --network container:envoy_consul_1 alpine:3.9 wget $@ } +function get_envoy_pid { + local BOOTSTRAP_NAME=$1 + run ps aux + [ "$status" == 0 ] + PID="$(echo "$output" | grep "envoy -c /workdir/envoy/${BOOTSTRAP_NAME}-bootstrap.json" | awk '{print $1}')" + [ -n "$PID" ] + + echo "$PID" +} + +function kill_envoy { + local BOOTSTRAP_NAME=$1 + + PID="$(get_envoy_pid $BOOTSTRAP_NAME)" + echo "PID = $PID" + + kill -TERM $PID +} + function must_match_in_statsd_logs { run cat /workdir/statsd/statsd.log COUNT=$( echo "$output" | grep -Ec $1 ) @@ -254,6 +281,18 @@ function gen_envoy_bootstrap { fi } +function read_config_entry { + local KIND=$1 + local NAME=$2 + docker_consul config read -kind $KIND -name $NAME +} + +function wait_for_config_entry { + local KIND=$1 + local NAME=$2 + retry_default read_config_entry $KIND $NAME >/dev/null +} + function get_upstream_fortio_name { run retry_default curl -v -s -f localhost:5000/debug?env=dump [ "$status" == 0 ]