tests: adding new envoy integration tests for L7 service-resolvers (#6129)
Additionally: - wait for bootstrap config entries to be applied - run the verify container in the host's PID namespace so we can kill envoys without mounting the docker socket * assert that we actually send HEALTHY and UNHEALTHY endpoints down in EDS during failover
This commit is contained in:
parent
5b6fa58453
commit
e060748d3f
|
@ -8,4 +8,4 @@ docker_consul intention create -deny s1 s2
|
|||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
|
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should NOT be able to connect to s2" {
|
||||
|
|
|
@ -27,7 +27,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
|
|
|
@ -2,8 +2,12 @@
|
|||
|
||||
set -euo pipefail
|
||||
|
||||
# retry because resolving the central config might race
|
||||
retry_default gen_envoy_bootstrap s1 19000
|
||||
retry_default gen_envoy_bootstrap s2 19001
|
||||
# wait for bootstrap to apply config entries
|
||||
wait_for_config_entry proxy-defaults global
|
||||
wait_for_config_entry service-defaults s1
|
||||
wait_for_config_entry service-defaults s2
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
|
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 with http/1.1" {
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
enable_central_service_config = true
|
||||
|
||||
config_entries {
|
||||
bootstrap {
|
||||
kind = "proxy-defaults"
|
||||
name = "global"
|
||||
|
||||
config {
|
||||
protocol = "http"
|
||||
}
|
||||
}
|
||||
|
||||
bootstrap {
|
||||
kind = "service-resolver"
|
||||
name = "s2"
|
||||
default_subset = "v2"
|
||||
|
||||
subsets = {
|
||||
"v1" = {
|
||||
filter = "Service.Meta.version == v1"
|
||||
}
|
||||
|
||||
"v2" = {
|
||||
filter = "Service.Meta.version == v2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
services {
|
||||
id = "s2-v1"
|
||||
name = "s2"
|
||||
port = 8182
|
||||
|
||||
meta {
|
||||
version = "v1"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
services {
|
||||
id = "s2-v2"
|
||||
name = "s2"
|
||||
port = 8183
|
||||
|
||||
meta {
|
||||
version = "v2"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# wait for bootstrap to apply config entries
|
||||
wait_for_config_entry proxy-defaults global
|
||||
wait_for_config_entry service-resolver s2
|
||||
|
||||
# s2 is retained just to have a honeypot for bad envoy configs to route into
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2-v1 19001
|
||||
gen_envoy_bootstrap s2-v2 19002
|
||||
gen_envoy_bootstrap s2 19003
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
s1 s1-sidecar-proxy
|
||||
s2 s2-sidecar-proxy
|
||||
s2-v1 s2-v1-sidecar-proxy
|
||||
s2-v2 s2-v2-sidecar-proxy
|
||||
"
|
|
@ -0,0 +1,48 @@
|
|||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "s1 proxy admin is up on :19000" {
|
||||
retry_default curl -f -s localhost:19000/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s2-v1 proxy admin is up on :19001" {
|
||||
retry_default curl -f -s localhost:19001/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s2-v2 proxy admin is up on :19002" {
|
||||
retry_default curl -f -s localhost:19002/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s2 proxy admin is up on :19003" {
|
||||
retry_default curl -f -s localhost:19003/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s1 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21000 s1
|
||||
}
|
||||
|
||||
@test "s2-v1 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21001 s2
|
||||
}
|
||||
|
||||
@test "s2-v2 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21002 s2
|
||||
}
|
||||
|
||||
@test "s2 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21003 s2
|
||||
}
|
||||
|
||||
@test "s2 proxies should be healthy" {
|
||||
assert_service_has_healthy_instances s2 3
|
||||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for v2.s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 v2.s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2-v2 via upstream s2" {
|
||||
assert_expected_fortio_name s2-v2
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
config_entries {
|
||||
bootstrap {
|
||||
kind = "proxy-defaults"
|
||||
name = "global"
|
||||
|
||||
config {
|
||||
protocol = "http"
|
||||
}
|
||||
}
|
||||
|
||||
bootstrap {
|
||||
kind = "service-resolver"
|
||||
name = "s3"
|
||||
|
||||
subsets = {
|
||||
"v1" = {
|
||||
filter = "Service.Meta.version == v1"
|
||||
}
|
||||
|
||||
"v2" = {
|
||||
filter = "Service.Meta.version == v2"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bootstrap {
|
||||
kind = "service-resolver"
|
||||
name = "s2"
|
||||
|
||||
redirect {
|
||||
service = "s3"
|
||||
service_subset = "v2"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
services {
|
||||
id = "s3-v1"
|
||||
name = "s3"
|
||||
port = 8283
|
||||
|
||||
meta {
|
||||
version = "v1"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
services {
|
||||
id = "s3-v2"
|
||||
name = "s3"
|
||||
port = 8284
|
||||
|
||||
meta {
|
||||
version = "v2"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
services {
|
||||
name = "s3"
|
||||
port = 8282
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# wait for bootstrap to apply config entries
|
||||
wait_for_config_entry proxy-defaults global
|
||||
wait_for_config_entry service-resolver s2
|
||||
wait_for_config_entry service-resolver s3
|
||||
|
||||
# s2, s3, and s3-v1 are retained just to have a honeypot for bad envoy configs to route into
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
gen_envoy_bootstrap s3-v1 19002
|
||||
gen_envoy_bootstrap s3-v2 19003
|
||||
gen_envoy_bootstrap s3 19004
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
s1 s1-sidecar-proxy
|
||||
s2 s2-sidecar-proxy
|
||||
s3 s3-sidecar-proxy
|
||||
s3-v1 s3-v1-sidecar-proxy
|
||||
s3-v2 s3-v2-sidecar-proxy
|
||||
"
|
|
@ -0,0 +1,56 @@
|
|||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "s1 proxy admin is up on :19000" {
|
||||
retry_default curl -f -s localhost:19000/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s2 proxy admin is up on :19001" {
|
||||
retry_default curl -f -s localhost:19001/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s3-v1 proxy admin is up on :19002" {
|
||||
retry_default curl -f -s localhost:19002/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s3-v2 proxy admin is up on :19003" {
|
||||
retry_default curl -f -s localhost:19003/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s3 proxy admin is up on :19004" {
|
||||
retry_default curl -f -s localhost:19004/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s1 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21000 s1
|
||||
}
|
||||
|
||||
@test "s2 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21001 s2
|
||||
}
|
||||
|
||||
@test "s3-v1 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21002 s3
|
||||
}
|
||||
|
||||
@test "s3-v2 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21002 s3
|
||||
}
|
||||
|
||||
@test "s3 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21003 s3
|
||||
}
|
||||
|
||||
@test "s3 proxies should be healthy" {
|
||||
assert_service_has_healthy_instances s3 3
|
||||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for v2.s3" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 v2.s3 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s3-v2 via upstream s2" {
|
||||
assert_expected_fortio_name s3-v2
|
||||
}
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
enable_central_service_config = true
|
||||
|
||||
config_entries {
|
||||
bootstrap {
|
||||
kind = "proxy-defaults"
|
||||
name = "global"
|
||||
|
||||
config {
|
||||
protocol = "http"
|
||||
}
|
||||
}
|
||||
|
||||
bootstrap {
|
||||
kind = "service-resolver"
|
||||
name = "s3"
|
||||
|
||||
subsets = {
|
||||
"v1" = {
|
||||
filter = "Service.Meta.version == v1"
|
||||
}
|
||||
|
||||
"v2" = {
|
||||
filter = "Service.Meta.version == v2"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bootstrap {
|
||||
kind = "service-resolver"
|
||||
name = "s2"
|
||||
|
||||
failover = {
|
||||
"*" = {
|
||||
service = "s3"
|
||||
service_subset = "v1"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
services {
|
||||
id = "s3-v1"
|
||||
name = "s3"
|
||||
port = 8283
|
||||
|
||||
meta {
|
||||
version = "v1"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
services {
|
||||
id = "s3-v2"
|
||||
name = "s3"
|
||||
port = 8284
|
||||
|
||||
meta {
|
||||
version = "v2"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
services {
|
||||
name = "s3"
|
||||
port = 8282
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# wait for bootstrap to apply config entries
|
||||
wait_for_config_entry proxy-defaults global
|
||||
wait_for_config_entry service-resolver s2
|
||||
wait_for_config_entry service-resolver s3
|
||||
|
||||
# s2, s3, and s3-v1 are retained just to have a honeypot for bad envoy configs to route into
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
gen_envoy_bootstrap s3-v1 19002
|
||||
gen_envoy_bootstrap s3-v2 19003
|
||||
gen_envoy_bootstrap s3 19004
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
s1 s1-sidecar-proxy
|
||||
s2 s2-sidecar-proxy
|
||||
s3 s3-sidecar-proxy
|
||||
s3-v1 s3-v1-sidecar-proxy
|
||||
s3-v2 s3-v2-sidecar-proxy
|
||||
"
|
|
@ -0,0 +1,75 @@
|
|||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "s1 proxy admin is up on :19000" {
|
||||
retry_default curl -f -s localhost:19000/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s2 proxy admin is up on :19001" {
|
||||
retry_default curl -f -s localhost:19001/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s3-v1 proxy admin is up on :19002" {
|
||||
retry_default curl -f -s localhost:19002/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s3-v2 proxy admin is up on :19003" {
|
||||
retry_default curl -f -s localhost:19003/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s3 proxy admin is up on :19004" {
|
||||
retry_default curl -f -s localhost:19004/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s1 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21000 s1
|
||||
}
|
||||
|
||||
@test "s2 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21001 s2
|
||||
}
|
||||
|
||||
@test "s3-v1 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21002 s3
|
||||
}
|
||||
|
||||
@test "s3-v2 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21003 s3
|
||||
}
|
||||
|
||||
@test "s3 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21004 s3
|
||||
}
|
||||
|
||||
@test "s2 proxies should be healthy" {
|
||||
assert_service_has_healthy_instances s2 1
|
||||
}
|
||||
|
||||
@test "s3 proxies should be healthy" {
|
||||
assert_service_has_healthy_instances s3 3
|
||||
}
|
||||
|
||||
# Note: when failover is configured the cluster is named for the original
|
||||
# service not any destination related to failover.
|
||||
@test "s1 upstream should have healthy endpoints for s2 and s3 together" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 2
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 via upstream s2 to start" {
|
||||
assert_expected_fortio_name s2
|
||||
}
|
||||
|
||||
@test "terminate instance of s2 envoy which should trigger failover to s3 when tcp check fails" {
|
||||
kill_envoy s2
|
||||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s3-v1 and unhealthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 UNHEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s3-v1 now" {
|
||||
assert_expected_fortio_name s3-v1
|
||||
}
|
||||
|
|
@ -2,9 +2,16 @@
|
|||
|
||||
set -euo pipefail
|
||||
|
||||
# retry because resolving the central config might race
|
||||
retry_default gen_envoy_bootstrap s1 19000
|
||||
retry_default gen_envoy_bootstrap s2 19001
|
||||
retry_default gen_envoy_bootstrap s3 19002
|
||||
# wait for bootstrap to apply config entries
|
||||
wait_for_config_entry proxy-defaults global
|
||||
wait_for_config_entry service-resolver s2
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy s3 s3-sidecar-proxy"
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
gen_envoy_bootstrap s3 19002
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
s1 s1-sidecar-proxy
|
||||
s2 s2-sidecar-proxy
|
||||
s3 s3-sidecar-proxy
|
||||
"
|
||||
|
|
|
@ -31,7 +31,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s3" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s3 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s3 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to its upstream simply" {
|
||||
|
|
|
@ -15,7 +15,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
|
|
|
@ -15,7 +15,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 via grpc" {
|
||||
|
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should NOT be able to connect to s2" {
|
||||
|
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 with http/1.1" {
|
||||
|
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 via http2" {
|
||||
|
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 with http/1.1" {
|
||||
|
|
|
@ -15,7 +15,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
|
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_healthy_endpoints 127.0.0.1:19000 s2 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
|
|
|
@ -74,6 +74,38 @@ services:
|
|||
- "disabled"
|
||||
network_mode: service:consul
|
||||
|
||||
s2-v1:
|
||||
depends_on:
|
||||
- consul
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s2-v1"
|
||||
command:
|
||||
- "server"
|
||||
- "-http-port"
|
||||
- ":8182"
|
||||
- "-grpc-port"
|
||||
- ":8178"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
|
||||
s2-v2:
|
||||
depends_on:
|
||||
- consul
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s2-v2"
|
||||
command:
|
||||
- "server"
|
||||
- "-http-port"
|
||||
- ":8183"
|
||||
- "-grpc-port"
|
||||
- ":8177"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
|
||||
s3:
|
||||
depends_on:
|
||||
- consul
|
||||
|
@ -90,6 +122,38 @@ services:
|
|||
- "disabled"
|
||||
network_mode: service:consul
|
||||
|
||||
s3-v1:
|
||||
depends_on:
|
||||
- consul
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s3-v1"
|
||||
command:
|
||||
- "server"
|
||||
- "-http-port"
|
||||
- ":8283"
|
||||
- "-grpc-port"
|
||||
- ":8278"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
|
||||
s3-v2:
|
||||
depends_on:
|
||||
- consul
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s3-v2"
|
||||
command:
|
||||
- "server"
|
||||
- "-http-port"
|
||||
- ":8284"
|
||||
- "-grpc-port"
|
||||
- ":8277"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
|
||||
s1-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
|
@ -132,6 +196,48 @@ services:
|
|||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
|
||||
s2-v1-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s2-v1-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
# despite separate containers that don't share IPC namespace. Not quite
|
||||
# sure how this happens but may be due to unix socket being in some shared
|
||||
# location?
|
||||
- "--disable-hot-restart"
|
||||
- "--drain-time-s"
|
||||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
|
||||
s2-v2-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s2-v2-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
# despite separate containers that don't share IPC namespace. Not quite
|
||||
# sure how this happens but may be due to unix socket being in some shared
|
||||
# location?
|
||||
- "--disable-hot-restart"
|
||||
- "--drain-time-s"
|
||||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
|
||||
s3-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
|
@ -153,6 +259,48 @@ services:
|
|||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
|
||||
s3-v1-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s3-v1-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
# despite separate containers that don't share IPC namespace. Not quite
|
||||
# sure how this happens but may be due to unix socket being in some shared
|
||||
# location?
|
||||
- "--disable-hot-restart"
|
||||
- "--drain-time-s"
|
||||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
|
||||
s3-v2-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s3-v2-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
# despite separate containers that don't share IPC namespace. Not quite
|
||||
# sure how this happens but may be due to unix socket being in some shared
|
||||
# location?
|
||||
- "--disable-hot-restart"
|
||||
- "--drain-time-s"
|
||||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
|
||||
verify:
|
||||
depends_on:
|
||||
- consul
|
||||
|
@ -168,6 +316,7 @@ services:
|
|||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
pid: host
|
||||
|
||||
s1-sidecar-proxy-consul-exec:
|
||||
depends_on:
|
||||
|
|
|
@ -28,6 +28,10 @@ function retry_default {
|
|||
retry 5 1 $@
|
||||
}
|
||||
|
||||
function retry_long {
|
||||
retry 30 1 $@
|
||||
}
|
||||
|
||||
function echored {
|
||||
tput setaf 1
|
||||
tput bold
|
||||
|
@ -120,34 +124,38 @@ function snapshot_envoy_admin {
|
|||
docker_wget "http://${HOSTPORT}/clusters?format=json" -q -O - > "./workdir/envoy/${ENVOY_NAME}-clusters.json"
|
||||
}
|
||||
|
||||
function get_healthy_upstream_endpoint_count {
|
||||
function get_upstream_endpoint_in_status_count {
|
||||
local HOSTPORT=$1
|
||||
local CLUSTER_NAME=$2
|
||||
local HEALTH_STATUS=$3
|
||||
run retry_default curl -s -f "http://${HOSTPORT}/clusters?format=json"
|
||||
[ "$status" -eq 0 ]
|
||||
# echo "$output" >&3
|
||||
echo "$output" | jq --raw-output "
|
||||
.cluster_statuses[]
|
||||
| select(.name|startswith(\"${CLUSTER_NAME}.default.dc1.internal.\"))
|
||||
| .host_statuses[].health_status
|
||||
| select(.eds_health_status == \"HEALTHY\")
|
||||
| [.host_statuses[].health_status.eds_health_status]
|
||||
| [select(.[] == \"${HEALTH_STATUS}\")]
|
||||
| length"
|
||||
}
|
||||
|
||||
function assert_upstream_has_healthy_endpoints_once {
|
||||
function assert_upstream_has_endpoints_in_status_once {
|
||||
local HOSTPORT=$1
|
||||
local CLUSTER_NAME=$2
|
||||
local EXPECT_COUNT=$3
|
||||
local HEALTH_STATUS=$3
|
||||
local EXPECT_COUNT=$4
|
||||
|
||||
GOT_COUNT=$(get_healthy_upstream_endpoint_count $HOSTPORT $CLUSTER_NAME)
|
||||
GOT_COUNT=$(get_upstream_endpoint_in_status_count $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS)
|
||||
|
||||
[ "$GOT_COUNT" -eq $EXPECT_COUNT ]
|
||||
}
|
||||
|
||||
function assert_upstream_has_healthy_endpoints {
|
||||
function assert_upstream_has_endpoints_in_status {
|
||||
local HOSTPORT=$1
|
||||
local CLUSTER_NAME=$2
|
||||
local EXPECT_COUNT=$3
|
||||
run retry 30 1 assert_upstream_has_healthy_endpoints_once $HOSTPORT $CLUSTER_NAME $EXPECT_COUNT
|
||||
local HEALTH_STATUS=$3
|
||||
local EXPECT_COUNT=$4
|
||||
run retry_long assert_upstream_has_endpoints_in_status_once $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS $EXPECT_COUNT
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
|
@ -171,18 +179,37 @@ function assert_service_has_healthy_instances {
|
|||
local SERVICE_NAME=$1
|
||||
local EXPECT_COUNT=$2
|
||||
|
||||
run retry 30 1 assert_service_has_healthy_instances_once $SERVICE_NAME $EXPECT_COUNT
|
||||
run retry_long assert_service_has_healthy_instances_once $SERVICE_NAME $EXPECT_COUNT
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
function docker_consul {
|
||||
docker run -ti --rm --network container:envoy_consul_1 consul-dev $@
|
||||
docker run -i --rm --network container:envoy_consul_1 consul-dev $@
|
||||
}
|
||||
|
||||
function docker_wget {
|
||||
docker run -ti --rm --network container:envoy_consul_1 alpine:3.9 wget $@
|
||||
}
|
||||
|
||||
function get_envoy_pid {
|
||||
local BOOTSTRAP_NAME=$1
|
||||
run ps aux
|
||||
[ "$status" == 0 ]
|
||||
PID="$(echo "$output" | grep "envoy -c /workdir/envoy/${BOOTSTRAP_NAME}-bootstrap.json" | awk '{print $1}')"
|
||||
[ -n "$PID" ]
|
||||
|
||||
echo "$PID"
|
||||
}
|
||||
|
||||
function kill_envoy {
|
||||
local BOOTSTRAP_NAME=$1
|
||||
|
||||
PID="$(get_envoy_pid $BOOTSTRAP_NAME)"
|
||||
echo "PID = $PID"
|
||||
|
||||
kill -TERM $PID
|
||||
}
|
||||
|
||||
function must_match_in_statsd_logs {
|
||||
run cat /workdir/statsd/statsd.log
|
||||
COUNT=$( echo "$output" | grep -Ec $1 )
|
||||
|
@ -254,6 +281,18 @@ function gen_envoy_bootstrap {
|
|||
fi
|
||||
}
|
||||
|
||||
function read_config_entry {
|
||||
local KIND=$1
|
||||
local NAME=$2
|
||||
docker_consul config read -kind $KIND -name $NAME
|
||||
}
|
||||
|
||||
function wait_for_config_entry {
|
||||
local KIND=$1
|
||||
local NAME=$2
|
||||
retry_default read_config_entry $KIND $NAME >/dev/null
|
||||
}
|
||||
|
||||
function get_upstream_fortio_name {
|
||||
run retry_default curl -v -s -f localhost:5000/debug?env=dump
|
||||
[ "$status" == 0 ]
|
||||
|
|
Loading…
Reference in New Issue