Expose Envoy's /stats for statsd agents (#7173)

* Expose Envoy /stats for statsd agents; Add testcases

* Remove merge conflict leftover

* Add support for prefix instead of path; Fix docstring to mirror these changes

* Add new config field to docs; Add testcases to check that /stats/prometheus is exposed as well

* Parametrize matchType (prefix or path) and value

* Update website/source/docs/connect/proxies/envoy.md

Co-Authored-By: Paul Banks <banks@banksco.de>

Co-authored-by: Paul Banks <banks@banksco.de>
This commit is contained in:
Paschalis Tsilias 2020-02-03 19:19:34 +02:00 committed by GitHub
parent 91ffba64e6
commit 1b81cccbf9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 274 additions and 19 deletions

View File

@ -57,6 +57,14 @@ type BootstrapConfig struct {
// be fixed in a future Consul version as Envoy 1.10 reaches stable release.
PrometheusBindAddr string `mapstructure:"envoy_prometheus_bind_addr"`
// StatsBindAddr configures an <ip>:<port> on which the Envoy will listen
// and expose the /stats HTTP path prefix for any agent to access. It
// does this by proxying that path prefix to the internal admin server
// which allows exposing metrics on the network without the security
// risk of exposing the full admin server API. Any other URL requested will be
// a 404.
StatsBindAddr string `mapstructure:"envoy_stats_bind_addr"`
// OverrideJSONTpl allows replacing the base template used to render the
// bootstrap. This is an "escape hatch" allowing arbitrary control over the
// proxy's configuration but will the most effort to maintain and correctly
@ -188,7 +196,13 @@ func (c *BootstrapConfig) ConfigureArgs(args *BootstrapTplArgs) error {
}
// Setup prometheus if needed. This MUST happen after the Static*JSON is set above
if c.PrometheusBindAddr != "" {
if err := c.generatePrometheusConfig(args); err != nil {
if err := c.generateMetricsListenerConfig(args, c.PrometheusBindAddr, "envoy_prometheus_metrics", "path", "/metrics", "/stats/prometheus"); err != nil {
return err
}
}
// Setup /stats proxy listener if needed. This MUST happen after the Static*JSON is set above
if c.StatsBindAddr != "" {
if err := c.generateMetricsListenerConfig(args, c.StatsBindAddr, "envoy_metrics", "prefix", "/stats", "/stats"); err != nil {
return err
}
}
@ -369,10 +383,10 @@ func (c *BootstrapConfig) generateStatsConfig(args *BootstrapTplArgs) error {
return nil
}
func (c *BootstrapConfig) generatePrometheusConfig(args *BootstrapTplArgs) error {
host, port, err := net.SplitHostPort(c.PrometheusBindAddr)
func (c *BootstrapConfig) generateMetricsListenerConfig(args *BootstrapTplArgs, bindAddr, name, matchType, matchValue, prefixRewrite string) error {
host, port, err := net.SplitHostPort(bindAddr)
if err != nil {
return fmt.Errorf("invalid prometheus_bind_addr: %s", err)
return fmt.Errorf("invalid %s bind address: %s", name, err)
}
clusterJSON := `{
@ -390,7 +404,7 @@ func (c *BootstrapConfig) generatePrometheusConfig(args *BootstrapTplArgs) error
]
}`
listenerJSON := `{
"name": "envoy_prometheus_metrics_listener",
"name": "` + name + `_listener",
"address": {
"socket_address": {
"address": "` + host + `",
@ -403,7 +417,7 @@ func (c *BootstrapConfig) generatePrometheusConfig(args *BootstrapTplArgs) error
{
"name": "envoy.http_connection_manager",
"config": {
"stat_prefix": "envoy_prometheus_metrics",
"stat_prefix": "` + name + `",
"codec_type": "HTTP1",
"route_config": {
"name": "self_admin_route",
@ -416,11 +430,11 @@ func (c *BootstrapConfig) generatePrometheusConfig(args *BootstrapTplArgs) error
"routes": [
{
"match": {
"path": "/metrics"
"` + matchType + `": "` + matchValue + `"
},
"route": {
"cluster": "self_admin",
"prefix_rewrite": "/stats/prometheus"
"prefix_rewrite": "` + prefixRewrite + `"
}
},
{

View File

@ -80,6 +80,77 @@ const (
}
]
}`
expectedStatsListener = `{
"name": "envoy_metrics_listener",
"address": {
"socket_address": {
"address": "0.0.0.0",
"port_value": 9000
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.http_connection_manager",
"config": {
"stat_prefix": "envoy_metrics",
"codec_type": "HTTP1",
"route_config": {
"name": "self_admin_route",
"virtual_hosts": [
{
"name": "self_admin",
"domains": [
"*"
],
"routes": [
{
"match": {
"prefix": "/stats"
},
"route": {
"cluster": "self_admin",
"prefix_rewrite": "/stats"
}
},
{
"match": {
"prefix": "/"
},
"direct_response": {
"status": 404
}
}
]
}
]
},
"http_filters": [
{
"name": "envoy.router"
}
]
}
}
]
}
]
}`
expectedStatsCluster = `{
"name": "self_admin",
"connect_timeout": "5s",
"type": "STATIC",
"http_protocol_options": {},
"hosts": [
{
"socket_address": {
"address": "127.0.0.1",
"port_value": 19000
}
}
]
}`
)
func TestBootstrapConfig_ConfigureArgs(t *testing.T) {
@ -350,6 +421,48 @@ func TestBootstrapConfig_ConfigureArgs(t *testing.T) {
},
wantErr: false,
},
{
name: "stats-bind-addr",
input: BootstrapConfig{
StatsBindAddr: "0.0.0.0:9000",
},
baseArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
},
wantArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
// Should add a static cluster for the self-proxy to admin
StaticClustersJSON: expectedStatsCluster,
// Should add a static http listener too
StaticListenersJSON: expectedStatsListener,
StatsConfigJSON: defaultStatsConfigJSON,
},
wantErr: false,
},
{
name: "stats-bind-addr-with-overrides",
input: BootstrapConfig{
StatsBindAddr: "0.0.0.0:9000",
StaticClustersJSON: `{"foo":"bar"}`,
StaticListenersJSON: `{"baz":"qux"}`,
},
baseArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
},
wantArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
// Should add a static cluster for the self-proxy to admin
StaticClustersJSON: `{"foo":"bar"},` + expectedStatsCluster,
// Should add a static http listener too
StaticListenersJSON: `{"baz":"qux"},` + expectedStatsListener,
StatsConfigJSON: defaultStatsConfigJSON,
},
wantErr: false,
},
{
name: "stats-flush-interval",
input: BootstrapConfig{
@ -379,6 +492,13 @@ func TestBootstrapConfig_ConfigureArgs(t *testing.T) {
},
wantErr: true,
},
{
name: "err-bad-stats-addr",
input: BootstrapConfig{
StatsBindAddr: "asdasdsad",
},
wantErr: true,
},
{
name: "err-bad-statsd-addr",
input: BootstrapConfig{

View File

@ -0,0 +1,23 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "http"
}
}
]
config {
protocol = "http"
envoy_stats_bind_addr = "0.0.0.0:1239"
}
}
}
}
}

View File

@ -0,0 +1,13 @@
services {
name = "s2"
port = 8181
connect {
sidecar_service {
proxy {
config {
protocol = "http"
}
}
}
}
}

View File

@ -0,0 +1,6 @@
#!/bin/bash
set -eEuo pipefail
gen_envoy_bootstrap s1 19000 primary
gen_envoy_bootstrap s2 19001 primary

View File

@ -0,0 +1,62 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}
@test "s2 proxy should be healthy" {
assert_service_has_healthy_instances s2 1
}
@test "s1 upstream should have healthy endpoints for s2" {
# protocol is configured in an upstream override so the cluster name is customized here
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 1a47f6e1~s2.default.primary HEALTHY 1
}
@test "s1 upstream should be able to connect to s2 with http/1.1" {
run retry_default curl --http1.1 -s -f -d hello localhost:5000
[ "$status" -eq 0 ]
[ "$output" = "hello" ]
}
@test "s1 proxy should be exposing the /stats prefix" {
# Should have http metrics. This is just a sample one. Require the metric to
# be present not just found in a comment (anchor the regexp).
retry_default \
must_match_in_stats_proxy_response localhost:1239 \
'stats' '^http.envoy_metrics.downstream_rq_active'
# Response should include the the local cluster request.
retry_default \
must_match_in_stats_proxy_response localhost:1239 \
'stats' 'cluster.local_agent.upstream_rq_active'
# Response should include the http public listener.
retry_default \
must_match_in_stats_proxy_response localhost:1239 \
'stats' 'http.public_listener_http'
# /stats/prometheus should also be reachable and labelling the local cluster.
retry_default \
must_match_in_stats_proxy_response localhost:1239 \
'stats/prometheus' '[\{,]local_cluster="s1"[,}]'
# /stats/prometheus should also be reachable and exposing metrics.
retry_default \
must_match_in_stats_proxy_response localhost:1239 \
'stats/prometheus' 'envoy_http_downstream_rq_active'
}

View File

@ -324,7 +324,7 @@ function get_healthy_service_count {
local SERVICE_NAME=$1
local DC=$2
local NS=$3
run retry_default curl -s -f ${HEADERS} "127.0.0.1:8500/v1/health/connect/${SERVICE_NAME}?dc=${DC}&passing&ns=${NS}"
[ "$status" -eq 0 ]
echo "$output" | jq --raw-output '. | length'
@ -354,21 +354,21 @@ function assert_service_has_healthy_instances {
function check_intention {
local SOURCE=$1
local DESTINATION=$2
curl -s -f "localhost:8500/v1/connect/intentions/check?source=${SOURCE}&destination=${DESTINATION}" | jq ".Allowed"
}
function assert_intention_allowed {
local SOURCE=$1
local DESTINATION=$2
[ "$(check_intention "${SOURCE}" "${DESTINATION}")" == "true" ]
}
function assert_intention_denied {
local SOURCE=$1
local DESTINATION=$2
[ "$(check_intention "${SOURCE}" "${DESTINATION}")" == "false" ]
}
@ -451,6 +451,18 @@ function must_match_in_prometheus_response {
[ "$COUNT" -gt "0" ]
}
function must_match_in_stats_proxy_response {
run curl -f -s $1/$2
COUNT=$( echo "$output" | grep -Ec $3 )
echo "OUTPUT head -n 10"
echo "$output" | head -n 10
echo "COUNT of '$3' matches: $COUNT"
[ "$status" == 0 ]
[ "$COUNT" -gt "0" ]
}
# must_fail_tcp_connection checks that a request made through an upstream fails,
# probably due to authz being denied if all other tests passed already. Although
# we are using curl, this only works as expected for TCP upstreams as we are
@ -542,12 +554,12 @@ function get_intention_target_namespace {
function get_intention_by_targets {
local SOURCE=$1
local DESTINATION=$2
local SOURCE_NS=$(get_intention_target_namespace <<< "${SOURCE}")
local SOURCE_NAME=$(get_intention_target_name <<< "${SOURCE}")
local DESTINATION_NS=$(get_intention_target_namespace <<< "${DESTINATION}")
local DESTINATION_NAME=$(get_intention_target_name <<< "${DESTINATION}")
existing=$(list_intentions | jq ".[] | select(.SourceNS == \"$SOURCE_NS\" and .SourceName == \"$SOURCE_NAME\" and .DestinationNS == \"$DESTINATION_NS\" and .DestinationName == \"$DESTINATION_NAME\")")
if test -z "$existing"
then
@ -561,16 +573,16 @@ function update_intention {
local SOURCE=$1
local DESTINATION=$2
local ACTION=$3
intention=$(get_intention_by_targets "${SOURCE}" "${DESTINATION}")
if test $? -ne 0
then
return 1
fi
id=$(jq -r .ID <<< "${intention}")
id=$(jq -r .ID <<< "${intention}")
updated=$(jq ".Action = \"$ACTION\"" <<< "${intention}")
curl -s -X PUT "http://localhost:8500/v1/connect/intentions/${id}" -d "${updated}"
return $?
}

View File

@ -138,6 +138,11 @@ configuration entry](/docs/agent/config-entries/proxy-defaults.html). The env va
-> **Note:** Envoy versions prior to 1.10 do not export timing histograms
using the internal Prometheus endpoint.
- `envoy_stats_bind_addr` - Specifies that the proxy should expose the /stats prefix
to the _public_ network. It must be supplied in the form `ip:port` and
the ip/port combination must be free within the network namespace the proxy runs.
Typically the IP would be `0.0.0.0` to bind to all available interfaces or a pod IP address.
- `envoy_stats_tags` - Specifies one or more static tags that will be added to
all metrics produced by the proxy.
@ -170,7 +175,7 @@ and `proxy.upstreams[*].config` fields of the [proxy service
definition](/docs/connect/registration/service-registration.html) that is
actually registered.
To learn about other options that can be configured centrally see the
To learn about other options that can be configured centrally see the
[Configuration Entries](/docs/agent/config_entries.html) docs.
### Proxy Config Options