Connect: allow configuring Envoy for L7 Observability (#5558)

* Add support for HTTP proxy listeners

* Add customizable bootstrap configuration options

* Debug logging for xDS AuthZ

* Add Envoy Integration test suite with basic test coverage

* Add envoy command tests to cover new cases

* Add tracing integration test

* Add gRPC support WIP

* Merged changes from master Docker. get CI integration to work with same Dockerfile now

* Make docker build optional for integration

* Enable integration tests again!

* http2 and grpc integration tests and fixes

* Fix up command config tests

* Store all container logs as artifacts in circle on fail

* Add retries to outer part of stats measurements as we keep missing them in CI

* Only dump logs on failing cases

* Fix typos from code review

* Review tidying and make tests pass again

* Add debug logs to exec test.

* Fix legit test failure caused by upstream rename in envoy config

* Attempt to reduce cases of bad TLS handshake in CI integration tests

* bring up the right service

* Add prometheus integration test

* Add test for denied AuthZ both HTTP and TCP

* Try ANSI term for Circle
This commit is contained in:
Paul Banks 2019-04-29 17:27:57 +01:00 committed by GitHub
parent e0778eeee9
commit d6c0557e86
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
388 changed files with 46614 additions and 8519 deletions

View File

@ -313,6 +313,41 @@ jobs:
- store_test_results:
path: ui-v2/test-results
# Envoy integration tests. Require docker dev binary to be built already
envoy-integration-test-1.8.0:
docker:
# We only really need bash and docker-compose which is installed on all
# Circle images but pick Go since we have to pick one of them.
- image: *GOLANG_IMAGE
environment:
ENVOY_VERSIONS: "1.8.0"
steps:
&ENVOY_INTEGRATION_TEST_STEPS
- checkout
# Get go binary from workspace
- attach_workspace:
at: .
- setup_remote_docker:
docker_layer_caching: true
# Build the consul-dev image from the already built binary
- run: docker build -t consul-dev -f ./build-support/docker/Consul-Dev.dockerfile .
- run:
name: Envoy Integration Tests
command: make test-envoy-integ SKIP_DOCKER_BUILD=1
environment:
# tput complains if this isn't set to something.
TERM: ansi
- store_artifacts:
path: ./test/integration/connect/envoy/workdir/logs
destination: container-logs
envoy-integration-test-1.9.1:
docker:
- image: *GOLANG_IMAGE
environment:
ENVOY_VERSIONS: "1.9.1"
steps: *ENVOY_INTEGRATION_TEST_STEPS
workflows:
version: 2
build-distros:
@ -339,6 +374,12 @@ workflows:
- nomad-integration-0_8:
requires:
- dev-build
- envoy-integration-test-1.8.0:
requires:
- dev-build
- envoy-integration-test-1.9.1:
requires:
- dev-build
website:
jobs:
- build-website

View File

@ -96,6 +96,12 @@ export GIT_DESCRIBE
export GOTAGS
export GOLDFLAGS
# Allow skipping docker build during integration tests in CI since we already
# have a built binary
ENVOY_INTEG_DEPS?=dev-docker
ifdef SKIP_DOCKER_BUILD
ENVOY_INTEG_DEPS=noop
endif
DEV_PUSH?=0
ifeq ($(DEV_PUSH),1)
@ -107,6 +113,9 @@ endif
# all builds binaries for all targets
all: bin
# used to make integration dependencies conditional
noop: ;
bin: tools
@$(SHELL) $(CURDIR)/build-support/scripts/build-local.sh
@ -269,9 +278,12 @@ consul-docker: go-build-image
ui-docker: ui-build-image
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui
test-envoy-integ: $(ENVOY_INTEG_DEPS)
@$(SHELL) $(CURDIR)/test/integration/connect/envoy/run-tests.sh
proto:
protoc agent/connect/ca/plugin/*.proto --gofast_out=plugins=grpc:../../..
.PHONY: all ci bin dev dist cov test test-ci test-internal test-install-deps cover format vet ui static-assets tools
.PHONY: docker-images go-build-image ui-build-image static-assets-docker consul-docker ui-docker
.PHONY: version proto
.PHONY: version proto test-envoy-integ

View File

@ -113,7 +113,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
Roots: roots,
Leaf: leaf,
UpstreamEndpoints: map[string]structs.CheckServiceNodes{
"service:db": TestUpstreamNodes(t),
"db": TestUpstreamNodes(t),
},
}
start := time.Now()

View File

@ -170,7 +170,7 @@ func (s *state) initWatches() error {
QueryOptions: structs.QueryOptions{Token: s.token, MaxAge: defaultPreparedQueryPollInterval},
QueryIDOrName: u.DestinationName,
Connect: true,
}, u.Identifier(), s.ch)
}, "upstream:"+u.Identifier(), s.ch)
case structs.UpstreamDestTypeService:
fallthrough
case "": // Treat unset as the default Service type
@ -179,7 +179,10 @@ func (s *state) initWatches() error {
QueryOptions: structs.QueryOptions{Token: s.token},
ServiceName: u.DestinationName,
Connect: true,
}, u.Identifier(), s.ch)
// Note that Identifier doesn't type-prefix for service any more as it's
// the default and makes metrics and other things much cleaner. It's
// simpler for us if we have the type to make things unambiguous.
}, "upstream:"+serviceIDPrefix+u.Identifier(), s.ch)
if err != nil {
return err
@ -297,19 +300,21 @@ func (s *state) handleUpdate(u cache.UpdateEvent, snap *ConfigSnapshot) error {
default:
// Service discovery result, figure out which type
switch {
case strings.HasPrefix(u.CorrelationID, serviceIDPrefix):
case strings.HasPrefix(u.CorrelationID, "upstream:"+serviceIDPrefix):
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
if !ok {
return fmt.Errorf("invalid type for service response: %T", u.Result)
}
snap.UpstreamEndpoints[u.CorrelationID] = resp.Nodes
svc := strings.TrimPrefix(u.CorrelationID, "upstream:"+serviceIDPrefix)
snap.UpstreamEndpoints[svc] = resp.Nodes
case strings.HasPrefix(u.CorrelationID, preparedQueryIDPrefix):
case strings.HasPrefix(u.CorrelationID, "upstream:"+preparedQueryIDPrefix):
resp, ok := u.Result.(*structs.PreparedQueryExecuteResponse)
if !ok {
return fmt.Errorf("invalid type for prepared query response: %T", u.Result)
}
snap.UpstreamEndpoints[u.CorrelationID] = resp.Nodes
pq := strings.TrimPrefix(u.CorrelationID, "upstream:")
snap.UpstreamEndpoints[pq] = resp.Nodes
default:
return errors.New("unknown correlation ID")

View File

@ -165,7 +165,7 @@ func TestConfigSnapshot(t testing.T) *ConfigSnapshot {
Roots: roots,
Leaf: leaf,
UpstreamEndpoints: map[string]structs.CheckServiceNodes{
"service:db": TestUpstreamNodes(t),
"db": TestUpstreamNodes(t),
},
}
}

View File

@ -168,8 +168,11 @@ func (u *Upstream) Identifier() string {
name += "?dc=" + u.Datacenter
}
typ := u.DestinationType
if typ == "" {
typ = UpstreamDestTypeService
// Service is default type so never prefix it. This is more readable and long
// term it is the only type that matters so we can drop the prefix and have
// nicer naming in metrics etc.
if typ == "" || typ == UpstreamDestTypeService {
return name
}
return typ + ":" + name
}

View File

@ -4,13 +4,13 @@ import (
"encoding/json"
"errors"
"fmt"
"strconv"
"time"
envoy "github.com/envoyproxy/go-control-plane/envoy/api/v2"
envoyauth "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth"
envoycluster "github.com/envoyproxy/go-control-plane/envoy/api/v2/cluster"
envoycore "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
envoyendpoint "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types"
@ -21,7 +21,7 @@ import (
// clustersFromSnapshot returns the xDS API representation of the "clusters"
// (upstreams) in the snapshot.
func clustersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
func (s *Server) clustersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
if cfgSnap == nil {
return nil, errors.New("nil config given")
}
@ -29,13 +29,13 @@ func clustersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]pro
clusters := make([]proto.Message, len(cfgSnap.Proxy.Upstreams)+1)
var err error
clusters[0], err = makeAppCluster(cfgSnap)
clusters[0], err = s.makeAppCluster(cfgSnap)
if err != nil {
return nil, err
}
for idx, upstream := range cfgSnap.Proxy.Upstreams {
clusters[idx+1], err = makeUpstreamCluster(upstream, cfgSnap)
clusters[idx+1], err = s.makeUpstreamCluster(upstream, cfgSnap)
if err != nil {
return nil, err
}
@ -44,18 +44,20 @@ func clustersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]pro
return clusters, nil
}
func makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluster, error) {
func (s *Server) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluster, error) {
var c *envoy.Cluster
var err error
cfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s.Logger.Printf("[WARN] envoy: failed to parse Connect.Proxy.Config: %s", err)
}
// If we have overridden local cluster config try to parse it into an Envoy cluster
if clusterJSONRaw, ok := cfgSnap.Proxy.Config["envoy_local_cluster_json"]; ok {
if clusterJSON, ok := clusterJSONRaw.(string); ok {
c, err = makeClusterFromUserConfig(clusterJSON)
if err != nil {
return c, err
}
}
if cfg.LocalClusterJSON != "" {
return makeClusterFromUserConfig(cfg.LocalClusterJSON)
}
if c == nil {
@ -65,72 +67,52 @@ func makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluster, error) {
}
c = &envoy.Cluster{
Name: LocalAppClusterName,
ConnectTimeout: 5 * time.Second,
ConnectTimeout: time.Duration(cfg.LocalConnectTimeoutMs) * time.Millisecond,
Type: envoy.Cluster_STATIC,
// API v2 docs say hosts is deprecated and should use LoadAssignment as
// below.. but it doesn't work for tcp_proxy target for some reason.
Hosts: []*envoycore.Address{makeAddressPtr(addr, cfgSnap.Proxy.LocalServicePort)},
// LoadAssignment: &envoy.ClusterLoadAssignment{
// ClusterName: LocalAppClusterName,
// Endpoints: []endpoint.LocalityLbEndpoints{
// {
// LbEndpoints: []endpoint.LbEndpoint{
// makeEndpoint(LocalAppClusterName,
// addr,
// cfgSnap.Proxy.LocalServicePort),
// },
// },
// },
// },
LoadAssignment: &envoy.ClusterLoadAssignment{
ClusterName: LocalAppClusterName,
Endpoints: []envoyendpoint.LocalityLbEndpoints{
{
LbEndpoints: []envoyendpoint.LbEndpoint{
makeEndpoint(LocalAppClusterName,
addr,
cfgSnap.Proxy.LocalServicePort),
},
},
},
},
}
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" {
c.Http2ProtocolOptions = &envoycore.Http2ProtocolOptions{}
}
}
return c, err
}
func parseTimeMillis(ms interface{}) (time.Duration, error) {
switch v := ms.(type) {
case string:
ms, err := strconv.Atoi(v)
if err != nil {
return 0, err
}
return time.Duration(ms) * time.Millisecond, nil
case float64: // This is what parsing from JSON results in
return time.Duration(v) * time.Millisecond, nil
// Not sure if this can ever really happen but just in case it does in
// some test code...
case int:
return time.Duration(v) * time.Millisecond, nil
}
return 0, errors.New("invalid type for millisecond duration")
}
func makeUpstreamCluster(upstream structs.Upstream, cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluster, error) {
func (s *Server) makeUpstreamCluster(upstream structs.Upstream, cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluster, error) {
var c *envoy.Cluster
var err error
// If we have overridden cluster config attempt to parse it into an Envoy cluster
if clusterJSONRaw, ok := upstream.Config["envoy_cluster_json"]; ok {
if clusterJSON, ok := clusterJSONRaw.(string); ok {
c, err = makeClusterFromUserConfig(clusterJSON)
if err != nil {
return c, err
}
cfg, err := ParseUpstreamConfig(upstream.Config)
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s.Logger.Printf("[WARN] envoy: failed to parse Upstream[%s].Config: %s",
upstream.Identifier(), err)
}
if cfg.ClusterJSON != "" {
c, err = makeClusterFromUserConfig(cfg.ClusterJSON)
if err != nil {
return c, err
}
// In the happy path don't return yet as we need to inject TLS config still.
}
if c == nil {
conTimeout := 5 * time.Second
if toRaw, ok := upstream.Config["connect_timeout_ms"]; ok {
if ms, err := parseTimeMillis(toRaw); err == nil {
conTimeout = ms
}
}
c = &envoy.Cluster{
Name: upstream.Identifier(),
ConnectTimeout: conTimeout,
ConnectTimeout: time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond,
Type: envoy.Cluster_EDS,
EdsClusterConfig: &envoy.Cluster_EdsClusterConfig{
EdsConfig: &envoycore.ConfigSource{
@ -142,6 +124,9 @@ func makeUpstreamCluster(upstream structs.Upstream, cfgSnap *proxycfg.ConfigSnap
// Having an empty config enables outlier detection with default config.
OutlierDetection: &envoycluster.OutlierDetection{},
}
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" {
c.Http2ProtocolOptions = &envoycore.Http2ProtocolOptions{}
}
}
// Enable TLS upstream with the configured client certificate.
@ -188,8 +173,7 @@ func makeClusterFromUserConfig(configJSON string) (*envoy.Cluster, error) {
// And then unmarshal the listener again...
err = proto.Unmarshal(any.Value, &c)
if err != nil {
panic(err)
//return nil, err
return nil, err
}
return &c, err
}

View File

@ -1,76 +1,292 @@
package xds
import (
"bytes"
"log"
"os"
"path"
"testing"
"time"
envoy "github.com/envoyproxy/go-control-plane/envoy/api/v2"
envoyauth "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth"
"github.com/envoyproxy/go-control-plane/envoy/api/v2/cluster"
envoycore "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
"github.com/stretchr/testify/require"
"text/template"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
"github.com/stretchr/testify/require"
)
func Test_makeUpstreamCluster(t *testing.T) {
func TestClustersFromSnapshot(t *testing.T) {
tests := []struct {
name string
snap proxycfg.ConfigSnapshot
upstream structs.Upstream
want *envoy.Cluster
name string
// Setup is called before the test starts. It is passed the snapshot from
// TestConfigSnapshot and is allowed to modify it in any way to setup the
// test input.
setup func(snap *proxycfg.ConfigSnapshot)
overrideGoldenName string
}{
{
name: "timeout override",
snap: proxycfg.ConfigSnapshot{},
upstream: structs.TestUpstreams(t)[0],
want: &envoy.Cluster{
Name: "service:db",
Type: envoy.Cluster_EDS,
EdsClusterConfig: &envoy.Cluster_EdsClusterConfig{
EdsConfig: &envoycore.ConfigSource{
ConfigSourceSpecifier: &envoycore.ConfigSource_Ads{
Ads: &envoycore.AggregatedConfigSource{},
},
},
},
ConnectTimeout: 1 * time.Second, // TestUpstreams overrides to 1000ms
OutlierDetection: &cluster.OutlierDetection{},
TlsContext: &envoyauth.UpstreamTlsContext{
CommonTlsContext: makeCommonTLSContext(&proxycfg.ConfigSnapshot{}),
},
name: "defaults",
setup: nil, // Default snapshot
},
{
name: "custom-local-app",
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Config["envoy_local_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "mylocal",
IncludeType: false,
})
},
},
{
name: "timeout default",
snap: proxycfg.ConfigSnapshot{},
upstream: structs.TestUpstreams(t)[1],
want: &envoy.Cluster{
Name: "prepared_query:geo-cache",
Type: envoy.Cluster_EDS,
EdsClusterConfig: &envoy.Cluster_EdsClusterConfig{
EdsConfig: &envoycore.ConfigSource{
ConfigSourceSpecifier: &envoycore.ConfigSource_Ads{
Ads: &envoycore.AggregatedConfigSource{},
},
},
},
ConnectTimeout: 5 * time.Second, // Default
OutlierDetection: &cluster.OutlierDetection{},
TlsContext: &envoyauth.UpstreamTlsContext{
CommonTlsContext: makeCommonTLSContext(&proxycfg.ConfigSnapshot{}),
},
name: "custom-local-app-typed",
overrideGoldenName: "custom-local-app",
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Config["envoy_local_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "mylocal",
IncludeType: true,
})
},
},
{
name: "custom-upstream",
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "myservice",
IncludeType: false,
})
},
},
{
name: "custom-upstream-typed",
overrideGoldenName: "custom-upstream",
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "myservice",
IncludeType: true,
})
},
},
{
name: "custom-upstream-ignores-tls",
overrideGoldenName: "custom-upstream", // should be the same
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "myservice",
IncludeType: true,
// Attempt to override the TLS context should be ignored
TLSContext: `{"commonTlsContext": {}}`,
})
},
},
{
name: "custom-timeouts",
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Config["local_connect_timeout_ms"] = 1234
snap.Proxy.Upstreams[0].Config["connect_timeout_ms"] = 2345
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
require := require.New(t)
got, err := makeUpstreamCluster(tt.upstream, &tt.snap)
// Sanity check default with no overrides first
snap := proxycfg.TestConfigSnapshot(t)
// We need to replace the TLS certs with deterministic ones to make golden
// files workable. Note we don't update these otherwise they'd change
// golder files for every test case and so not be any use!
snap.Leaf.CertPEM = golden(t, "test-leaf-cert", "")
snap.Leaf.PrivateKeyPEM = golden(t, "test-leaf-key", "")
snap.Roots.Roots[0].RootCert = golden(t, "test-root-cert", "")
if tt.setup != nil {
tt.setup(snap)
}
// Need server just for logger dependency
s := Server{Logger: log.New(os.Stderr, "", log.LstdFlags)}
clusters, err := s.clustersFromSnapshot(snap, "my-token")
require.NoError(err)
r, err := createResponse(ClusterType, "00000001", "00000001", clusters)
require.NoError(err)
require.Equal(tt.want, got)
gotJSON := responseToJSON(t, r)
gName := tt.name
if tt.overrideGoldenName != "" {
gName = tt.overrideGoldenName
}
require.JSONEq(golden(t, path.Join("clusters", gName), gotJSON), gotJSON)
})
}
}
func expectClustersJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) map[string]string {
return map[string]string{
"local_app": `
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "local_app",
"connectTimeout": "5s",
"loadAssignment": {
"clusterName": "local_app",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
}
}
]
}
]
}
}`,
"db": `
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "db",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"outlierDetection": {
},
"connectTimeout": "1s",
"tlsContext": ` + expectedUpstreamTLSContextJSON(t, snap) + `
}`,
"prepared_query:geo-cache": `
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "prepared_query:geo-cache",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"outlierDetection": {
},
"connectTimeout": "5s",
"tlsContext": ` + expectedUpstreamTLSContextJSON(t, snap) + `
}`,
}
}
func expectClustersJSONFromResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64, resourcesJSON map[string]string) string {
resJSON := ""
// Sort resources into specific order because that matters in JSONEq
// comparison later.
keyOrder := []string{"local_app"}
for _, u := range snap.Proxy.Upstreams {
keyOrder = append(keyOrder, u.Identifier())
}
for _, k := range keyOrder {
j, ok := resourcesJSON[k]
if !ok {
continue
}
if resJSON != "" {
resJSON += ",\n"
}
resJSON += j
}
return `{
"versionInfo": "` + hexString(v) + `",
"resources": [` + resJSON + `],
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
"nonce": "` + hexString(n) + `"
}`
}
func expectClustersJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) string {
return expectClustersJSONFromResources(t, snap, token, v, n,
expectClustersJSONResources(t, snap, token, v, n))
}
type customClusterJSONOptions struct {
Name string
IncludeType bool
TLSContext string
}
var customEDSClusterJSONTpl = `{
{{ if .IncludeType -}}
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
{{- end }}
{{ if .TLSContext -}}
"tlsContext": {{ .TLSContext }},
{{- end }}
"name": "{{ .Name }}",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"connectTimeout": "5s"
}`
var customEDSClusterJSONTemplate = template.Must(template.New("").Parse(customEDSClusterJSONTpl))
func customEDSClusterJSON(t *testing.T, opts customClusterJSONOptions) string {
t.Helper()
var buf bytes.Buffer
err := customEDSClusterJSONTemplate.Execute(&buf, opts)
require.NoError(t, err)
return buf.String()
}
var customAppClusterJSONTpl = `{
{{ if .IncludeType -}}
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
{{- end }}
{{ if .TLSContext -}}
"tlsContext": {{ .TLSContext }},
{{- end }}
"name": "{{ .Name }}",
"connectTimeout": "5s",
"hosts": [
{
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
]
}`
var customAppClusterJSONTemplate = template.Must(template.New("").Parse(customAppClusterJSONTpl))
func customAppClusterJSON(t *testing.T, opts customClusterJSONOptions) string {
t.Helper()
var buf bytes.Buffer
err := customAppClusterJSONTemplate.Execute(&buf, opts)
require.NoError(t, err)
return buf.String()
}

94
agent/xds/config.go Normal file
View File

@ -0,0 +1,94 @@
package xds
import (
"strings"
"github.com/mitchellh/mapstructure"
)
// ProxyConfig describes the keys we understand from Connect.Proxy.Config. Note
// that this only includes config keys that affects runtime config delivered by
// xDS. For Envoy config keys that affect bootstrap generation see
// command/connect/envoy/bootstrap_config.go.
type ProxyConfig struct {
// PublicListenerJSON is a complete override ("escape hatch") for the
// upstream's public listener. The Connect server TLS certificate and
// validation context will be injected overriding any TLS settings present. An
// AuthZ filter will also be prepended to each filterChain provided to enforce
// Connect's access control.
PublicListenerJSON string `mapstructure:"envoy_public_listener_json"`
// LocalClusterJSON is a complete override ("escape hatch") for the
// local application cluster.
LocalClusterJSON string `mapstructure:"envoy_local_cluster_json"`
// LocalConnectTimeoutMs is the number of milliseconds to timeout making a new
// connection to the local app instance. Defaults to 5000 (5 seconds) if not
// set.
LocalConnectTimeoutMs int `mapstructure:"local_connect_timeout_ms"`
// Protocol describes the service's protocol. Valid values are "tcp",
// "http" and "grpc". Anything else is treated as tcp. The enables protocol
// aware features like per-request metrics and connection pooling, tracing,
// routing etc.
Protocol string `mapstructure:"protocol"`
}
// ParseProxyConfig returns the ProxyConfig parsed from the an opaque map. If an
// error occurs during parsing it is returned along with the default config this
// allows caller to choose whether and how to report the error.
func ParseProxyConfig(m map[string]interface{}) (ProxyConfig, error) {
var cfg ProxyConfig
err := mapstructure.WeakDecode(m, &cfg)
// Set defaults (even if error is returned)
if cfg.Protocol == "" {
cfg.Protocol = "tcp"
} else {
cfg.Protocol = strings.ToLower(cfg.Protocol)
}
if cfg.LocalConnectTimeoutMs < 1 {
cfg.LocalConnectTimeoutMs = 5000
}
return cfg, err
}
// UpstreamConfig describes the keys we understand from
// Connect.Proxy.Upstream[*].Config.
type UpstreamConfig struct {
// ListenerJSON is a complete override ("escape hatch") for the upstream's
// listener.
ListenerJSON string `mapstructure:"envoy_listener_json"`
// ClusterJSON is a complete override ("escape hatch") for the upstream's
// cluster. The Connect client TLS certificate and context will be injected
// overriding any TLS settings present.
ClusterJSON string `mapstructure:"envoy_cluster_json"`
// Protocol describes the upstream's service protocol. Valid values are "tcp",
// "http" and "grpc". Anything else is treated as tcp. The enables protocol
// aware features like per-request metrics and connection pooling, tracing,
// routing etc.
Protocol string `mapstructure:"protocol"`
// ConnectTimeoutMs is the number of milliseconds to timeout making a new
// connection to this upstream. Defaults to 5000 (5 seconds) if not set.
ConnectTimeoutMs int `mapstructure:"connect_timeout_ms"`
}
// ParseUpstreamConfig returns the UpstreamConfig parsed from the an opaque map.
// If an error occurs during parsing it is returned along with the default
// config this allows caller to choose whether and how to report the error.
func ParseUpstreamConfig(m map[string]interface{}) (UpstreamConfig, error) {
var cfg UpstreamConfig
err := mapstructure.WeakDecode(m, &cfg)
// Set defaults (even if error is returned)
if cfg.Protocol == "" {
cfg.Protocol = "tcp"
} else {
cfg.Protocol = strings.ToLower(cfg.Protocol)
}
if cfg.ConnectTimeoutMs < 1 {
cfg.ConnectTimeoutMs = 5000
}
return cfg, err
}

183
agent/xds/config_test.go Normal file
View File

@ -0,0 +1,183 @@
package xds
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestParseProxyConfig(t *testing.T) {
tests := []struct {
name string
input map[string]interface{}
want ProxyConfig
}{
{
name: "defaults - nil",
input: nil,
want: ProxyConfig{
Protocol: "tcp",
LocalConnectTimeoutMs: 5000,
},
},
{
name: "defaults - empty",
input: map[string]interface{}{},
want: ProxyConfig{
Protocol: "tcp",
LocalConnectTimeoutMs: 5000,
},
},
{
name: "defaults - other stuff",
input: map[string]interface{}{
"foo": "bar",
"envoy_foo": "envoy_bar",
},
want: ProxyConfig{
Protocol: "tcp",
LocalConnectTimeoutMs: 5000,
},
},
{
name: "protocol override",
input: map[string]interface{}{
"protocol": "http",
},
want: ProxyConfig{
Protocol: "http",
LocalConnectTimeoutMs: 5000,
},
},
{
name: "protocol uppercase override",
input: map[string]interface{}{
"protocol": "HTTP",
},
want: ProxyConfig{
Protocol: "http",
LocalConnectTimeoutMs: 5000,
},
},
{
name: "local connect timeout override, string",
input: map[string]interface{}{
"local_connect_timeout_ms": "1000",
},
want: ProxyConfig{
LocalConnectTimeoutMs: 1000,
Protocol: "tcp",
},
},
{
name: "local connect timeout override, float ",
input: map[string]interface{}{
"local_connect_timeout_ms": float64(1000.0),
},
want: ProxyConfig{
LocalConnectTimeoutMs: 1000,
Protocol: "tcp",
},
},
{
name: "local connect timeout override, int ",
input: map[string]interface{}{
"local_connect_timeout_ms": 1000,
},
want: ProxyConfig{
LocalConnectTimeoutMs: 1000,
Protocol: "tcp",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ParseProxyConfig(tt.input)
require.NoError(t, err)
require.Equal(t, tt.want, got)
})
}
}
func TestParseUpstreamConfig(t *testing.T) {
tests := []struct {
name string
input map[string]interface{}
want UpstreamConfig
}{
{
name: "defaults - nil",
input: nil,
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "tcp",
},
},
{
name: "defaults - empty",
input: map[string]interface{}{},
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "tcp",
},
},
{
name: "defaults - other stuff",
input: map[string]interface{}{
"foo": "bar",
"envoy_foo": "envoy_bar",
},
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "tcp",
},
},
{
name: "protocol override",
input: map[string]interface{}{
"protocol": "http",
},
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "http",
},
},
{
name: "connect timeout override, string",
input: map[string]interface{}{
"connect_timeout_ms": "1000",
},
want: UpstreamConfig{
ConnectTimeoutMs: 1000,
Protocol: "tcp",
},
},
{
name: "connect timeout override, float ",
input: map[string]interface{}{
"connect_timeout_ms": float64(1000.0),
},
want: UpstreamConfig{
ConnectTimeoutMs: 1000,
Protocol: "tcp",
},
},
{
name: "connect timeout override, int ",
input: map[string]interface{}{
"connect_timeout_ms": 1000,
},
want: UpstreamConfig{
ConnectTimeoutMs: 1000,
Protocol: "tcp",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ParseUpstreamConfig(tt.input)
require.NoError(t, err)
require.Equal(t, tt.want, got)
})
}
}

43
agent/xds/golden_test.go Normal file
View File

@ -0,0 +1,43 @@
package xds
import (
"flag"
"io/ioutil"
"path/filepath"
"testing"
envoy "github.com/envoyproxy/go-control-plane/envoy/api/v2"
"github.com/gogo/protobuf/jsonpb"
"github.com/stretchr/testify/require"
)
// update allows golden files to be updated based on the current output.
var update = flag.Bool("update", false, "update golden files")
// golden reads and optionally writes the expected data to the golden file,
// returning the contents as a string.
func golden(t *testing.T, name, got string) string {
t.Helper()
golden := filepath.Join("testdata", name+".golden")
if *update && got != "" {
err := ioutil.WriteFile(golden, []byte(got), 0644)
require.NoError(t, err)
}
expected, err := ioutil.ReadFile(golden)
require.NoError(t, err)
return string(expected)
}
func responseToJSON(t *testing.T, r *envoy.DiscoveryResponse) string {
t.Helper()
m := jsonpb.Marshaler{
Indent: " ",
}
gotJSON, err := m.MarshalToString(r)
require.NoError(t, err)
return gotJSON
}

View File

@ -4,13 +4,17 @@ import (
"encoding/json"
"errors"
"fmt"
"strings"
envoy "github.com/envoyproxy/go-control-plane/envoy/api/v2"
envoyauth "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth"
envoycore "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
envoylistener "github.com/envoyproxy/go-control-plane/envoy/api/v2/listener"
envoyroute "github.com/envoyproxy/go-control-plane/envoy/api/v2/route"
extauthz "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/ext_authz/v2"
envoyhttp "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
envoytcp "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/tcp_proxy/v2"
envoytype "github.com/envoyproxy/go-control-plane/envoy/type"
"github.com/envoyproxy/go-control-plane/pkg/util"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
@ -22,7 +26,7 @@ import (
// listenersFromSnapshot returns the xDS API representation of the "listeners"
// in the snapshot.
func listenersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
func (s *Server) listenersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
if cfgSnap == nil {
return nil, errors.New("nil config given")
}
@ -32,12 +36,12 @@ func listenersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]pr
// Configure public listener
var err error
resources[0], err = makePublicListener(cfgSnap, token)
resources[0], err = s.makePublicListener(cfgSnap, token)
if err != nil {
return nil, err
}
for i, u := range cfgSnap.Proxy.Upstreams {
resources[i+1], err = makeUpstreamListener(&u)
resources[i+1], err = s.makeUpstreamListener(&u)
if err != nil {
return nil, err
}
@ -137,17 +141,23 @@ func injectConnectFilters(cfgSnap *proxycfg.ConfigSnapshot, token string, listen
return nil
}
func makePublicListener(cfgSnap *proxycfg.ConfigSnapshot, token string) (proto.Message, error) {
func (s *Server) makePublicListener(cfgSnap *proxycfg.ConfigSnapshot, token string) (proto.Message, error) {
var l *envoy.Listener
var err error
if listenerJSONRaw, ok := cfgSnap.Proxy.Config["envoy_public_listener_json"]; ok {
if listenerJSON, ok := listenerJSONRaw.(string); ok {
l, err = makeListenerFromUserConfig(listenerJSON)
if err != nil {
return l, err
}
cfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s.Logger.Printf("[WARN] envoy: failed to parse Connect.Proxy.Config: %s", err)
}
if cfg.PublicListenerJSON != "" {
l, err = makeListenerFromUserConfig(cfg.PublicListenerJSON)
if err != nil {
return l, err
}
// In the happy path don't return yet as we need to inject TLS config still.
}
if l == nil {
@ -157,16 +167,15 @@ func makePublicListener(cfgSnap *proxycfg.ConfigSnapshot, token string) (proto.M
addr = "0.0.0.0"
}
l = makeListener(PublicListenerName, addr, cfgSnap.Port)
tcpProxy, err := makeTCPProxyFilter("public_listener", LocalAppClusterName)
filter, err := makeListenerFilter(cfg.Protocol, "public_listener", LocalAppClusterName, "", true)
if err != nil {
return l, err
return nil, err
}
// Setup TCP proxy for now. We inject TLS and authz below for both default
// and custom config cases.
l.FilterChains = []envoylistener.FilterChain{
{
Filters: []envoylistener.Filter{
tcpProxy,
filter,
},
},
}
@ -176,39 +185,141 @@ func makePublicListener(cfgSnap *proxycfg.ConfigSnapshot, token string) (proto.M
return l, err
}
func makeUpstreamListener(u *structs.Upstream) (proto.Message, error) {
if listenerJSONRaw, ok := u.Config["envoy_listener_json"]; ok {
if listenerJSON, ok := listenerJSONRaw.(string); ok {
return makeListenerFromUserConfig(listenerJSON)
}
func (s *Server) makeUpstreamListener(u *structs.Upstream) (proto.Message, error) {
cfg, err := ParseUpstreamConfig(u.Config)
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s.Logger.Printf("[WARN] envoy: failed to parse Upstream[%s].Config: %s",
u.Identifier(), err)
}
if cfg.ListenerJSON != "" {
return makeListenerFromUserConfig(cfg.ListenerJSON)
}
addr := u.LocalBindAddress
if addr == "" {
addr = "127.0.0.1"
}
l := makeListener(u.Identifier(), addr, u.LocalBindPort)
tcpProxy, err := makeTCPProxyFilter(u.Identifier(), u.Identifier())
filter, err := makeListenerFilter(cfg.Protocol, u.Identifier(), u.Identifier(), "upstream_", false)
if err != nil {
return l, err
return nil, err
}
l.FilterChains = []envoylistener.FilterChain{
{
Filters: []envoylistener.Filter{
tcpProxy,
filter,
},
},
}
return l, nil
}
func makeTCPProxyFilter(name, cluster string) (envoylistener.Filter, error) {
func makeListenerFilter(protocol, filterName, cluster, statPrefix string, ingress bool) (envoylistener.Filter, error) {
switch protocol {
case "grpc":
return makeHTTPFilter(filterName, cluster, statPrefix, ingress, true, true)
case "http2":
return makeHTTPFilter(filterName, cluster, statPrefix, ingress, false, true)
case "http":
return makeHTTPFilter(filterName, cluster, statPrefix, ingress, false, false)
case "tcp":
fallthrough
default:
return makeTCPProxyFilter(filterName, cluster, statPrefix)
}
}
func makeTCPProxyFilter(filterName, cluster, statPrefix string) (envoylistener.Filter, error) {
cfg := &envoytcp.TcpProxy{
StatPrefix: name,
StatPrefix: makeStatPrefix("tcp", statPrefix, filterName),
Cluster: cluster,
}
return makeFilter("envoy.tcp_proxy", cfg)
}
func makeStatPrefix(protocol, prefix, filterName string) string {
// Replace colons here because Envoy does that in the metrics for the actual
// clusters but doesn't in the stat prefix here while dashboards assume they
// will match.
return fmt.Sprintf("%s%s_%s", prefix, strings.Replace(filterName, ":", "_", -1), protocol)
}
func makeHTTPFilter(filterName, cluster, statPrefix string, ingress, grpc, http2 bool) (envoylistener.Filter, error) {
op := envoyhttp.INGRESS
if !ingress {
op = envoyhttp.EGRESS
}
proto := "http"
if grpc {
proto = "grpc"
}
cfg := &envoyhttp.HttpConnectionManager{
StatPrefix: makeStatPrefix(proto, statPrefix, filterName),
CodecType: envoyhttp.AUTO,
RouteSpecifier: &envoyhttp.HttpConnectionManager_RouteConfig{
RouteConfig: &envoy.RouteConfiguration{
Name: filterName,
VirtualHosts: []envoyroute.VirtualHost{
envoyroute.VirtualHost{
Name: filterName,
Domains: []string{"*"},
Routes: []envoyroute.Route{
envoyroute.Route{
Match: envoyroute.RouteMatch{
PathSpecifier: &envoyroute.RouteMatch_Prefix{
Prefix: "/",
},
// TODO(banks) Envoy supports matching only valid GRPC
// requests which might be nice to add here for gRPC services
// but it's not supported in our current envoy SDK version
// although docs say it was supported by 1.8.0. Going to defer
// that until we've updated the deps.
},
Action: &envoyroute.Route_Route{
Route: &envoyroute.RouteAction{
ClusterSpecifier: &envoyroute.RouteAction_Cluster{
Cluster: cluster,
},
},
},
},
},
},
},
},
},
HttpFilters: []*envoyhttp.HttpFilter{
&envoyhttp.HttpFilter{
Name: "envoy.router",
},
},
Tracing: &envoyhttp.HttpConnectionManager_Tracing{
OperationName: op,
// Don't trace any requests by default unless the client application
// explicitly propagates trace headers that indicate this should be
// sampled.
RandomSampling: &envoytype.Percent{Value: 0.0},
},
}
if http2 {
cfg.Http2ProtocolOptions = &envoycore.Http2ProtocolOptions{}
}
if grpc {
// Add grpc bridge before router
cfg.HttpFilters = append([]*envoyhttp.HttpFilter{&envoyhttp.HttpFilter{
Name: "envoy.grpc_http1_bridge",
Config: &types.Struct{},
}}, cfg.HttpFilters...)
}
return makeFilter("envoy.http_connection_manager", cfg)
}
func makeExtAuthFilter(token string) (envoylistener.Filter, error) {
cfg := &extauthz.ExtAuthz{
StatPrefix: "connect_authz",

327
agent/xds/listeners_test.go Normal file
View File

@ -0,0 +1,327 @@
package xds
import (
"bytes"
"fmt"
"log"
"os"
"path"
"testing"
"text/template"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/stretchr/testify/require"
)
func TestListenersFromSnapshot(t *testing.T) {
tests := []struct {
name string
// Setup is called before the test starts. It is passed the snapshot from
// TestConfigSnapshot and is allowed to modify it in any way to setup the
// test input.
setup func(snap *proxycfg.ConfigSnapshot)
overrideGoldenName string
}{
{
name: "defaults",
setup: nil, // Default snapshot
},
{
name: "http-public-listener",
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Config["protocol"] = "http"
},
},
{
name: "http-upstream",
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Upstreams[0].Config["protocol"] = "http"
},
},
{
name: "custom-public-listener",
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Config["envoy_public_listener_json"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-public-listen",
IncludeType: false,
})
},
},
{
name: "custom-public-listener-typed",
overrideGoldenName: "custom-public-listener", // should be the same
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Config["envoy_public_listener_json"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-public-listen",
IncludeType: true,
})
},
},
{
name: "custom-public-listener-ignores-tls",
overrideGoldenName: "custom-public-listener", // should be the same
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Config["envoy_public_listener_json"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-public-listen",
IncludeType: true,
// Attempt to override the TLS context should be ignored
TLSContext: `{"requireClientCertificate": false}`,
})
},
},
{
name: "custom-upstream",
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Upstreams[0].Config["envoy_listener_json"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-upstream",
IncludeType: false,
})
},
},
{
name: "custom-upstream-typed",
overrideGoldenName: "custom-upstream", // should be the same
setup: func(snap *proxycfg.ConfigSnapshot) {
snap.Proxy.Upstreams[0].Config["envoy_listener_json"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-upstream",
IncludeType: true,
})
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
require := require.New(t)
// Sanity check default with no overrides first
snap := proxycfg.TestConfigSnapshot(t)
// We need to replace the TLS certs with deterministic ones to make golden
// files workable. Note we don't update these otherwise they'd change
// golder files for every test case and so not be any use!
snap.Leaf.CertPEM = golden(t, "test-leaf-cert", "")
snap.Leaf.PrivateKeyPEM = golden(t, "test-leaf-key", "")
snap.Roots.Roots[0].RootCert = golden(t, "test-root-cert", "")
if tt.setup != nil {
tt.setup(snap)
}
// Need server just for logger dependency
s := Server{Logger: log.New(os.Stderr, "", log.LstdFlags)}
listeners, err := s.listenersFromSnapshot(snap, "my-token")
require.NoError(err)
r, err := createResponse(ListenerType, "00000001", "00000001", listeners)
require.NoError(err)
gotJSON := responseToJSON(t, r)
gName := tt.name
if tt.overrideGoldenName != "" {
gName = tt.overrideGoldenName
}
require.JSONEq(golden(t, path.Join("listeners", gName), gotJSON), gotJSON)
})
}
}
func expectListenerJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) map[string]string {
tokenVal := ""
if token != "" {
tokenVal = fmt.Sprintf(",\n"+`"value": "%s"`, token)
}
return map[string]string{
"public_listener": `{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "public_listener:0.0.0.0:9999",
"address": {
"socketAddress": {
"address": "0.0.0.0",
"portValue": 9999
}
},
"filterChains": [
{
"tlsContext": ` + expectedPublicTLSContextJSON(t, snap) + `,
"filters": [
{
"name": "envoy.ext_authz",
"config": {
"grpc_service": {
"envoy_grpc": {
"cluster_name": "local_agent"
},
"initial_metadata": [
{
"key": "x-consul-token"
` + tokenVal + `
}
]
},
"stat_prefix": "connect_authz"
}
},
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "local_app",
"stat_prefix": "public_listener_tcp"
}
}
]
}
]
}`,
"db": `{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "db:127.0.0.1:9191",
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 9191
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "db",
"stat_prefix": "upstream_db_tcp"
}
}
]
}
]
}`,
"prepared_query:geo-cache": `{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "prepared_query:geo-cache:127.10.10.10:8181",
"address": {
"socketAddress": {
"address": "127.10.10.10",
"portValue": 8181
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "prepared_query:geo-cache",
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
}
}
]
}
]
}`,
}
}
func expectListenerJSONFromResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64, resourcesJSON map[string]string) string {
resJSON := ""
// Sort resources into specific order because that matters in JSONEq
// comparison later.
keyOrder := []string{"public_listener"}
for _, u := range snap.Proxy.Upstreams {
keyOrder = append(keyOrder, u.Identifier())
}
for _, k := range keyOrder {
j, ok := resourcesJSON[k]
if !ok {
continue
}
if resJSON != "" {
resJSON += ",\n"
}
resJSON += j
}
return `{
"versionInfo": "` + hexString(v) + `",
"resources": [` + resJSON + `],
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
"nonce": "` + hexString(n) + `"
}`
}
func expectListenerJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) string {
return expectListenerJSONFromResources(t, snap, token, v, n,
expectListenerJSONResources(t, snap, token, v, n))
}
type customListenerJSONOptions struct {
Name string
IncludeType bool
OverrideAuthz bool
TLSContext string
}
const customListenerJSONTpl = `{
{{ if .IncludeType -}}
"@type": "type.googleapis.com/envoy.api.v2.Listener",
{{- end }}
"name": "{{ .Name }}",
"address": {
"socketAddress": {
"address": "11.11.11.11",
"portValue": 11111
}
},
"filterChains": [
{
{{ if .TLSContext -}}
"tlsContext": {{ .TLSContext }},
{{- end }}
"filters": [
{{ if .OverrideAuthz -}}
{
"name": "envoy.ext_authz",
"config": {
"grpc_service": {
"envoy_grpc": {
"cluster_name": "local_agent"
},
"initial_metadata": [
{
"key": "x-consul-token",
"value": "my-token"
}
]
},
"stat_prefix": "connect_authz"
}
},
{{- end }}
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "random-cluster",
"stat_prefix": "foo-stats"
}
}
]
}
]
}`
var customListenerJSONTemplate = template.Must(template.New("").Parse(customListenerJSONTpl))
func customListenerJSON(t *testing.T, opts customListenerJSONOptions) string {
t.Helper()
var buf bytes.Buffer
err := customListenerJSONTemplate.Execute(&buf, opts)
require.NoError(t, err)
return buf.String()
}

View File

@ -51,11 +51,31 @@ const (
PublicListenerName = "public_listener"
// LocalAppClusterName is the name we give the local application "cluster" in
// Envoy config.
// Envoy config. Note that all cluster names may collide with service names
// since we want cluster names and service names to match to enable nice
// metrics correlation without massaging prefixes on cluster names.
//
// We should probably make this more unlikely to collide however changing it
// potentially breaks upgrade compatibility without restarting all Envoy's as
// it will no longer match their existing cluster name. Changing this will
// affect metrics output so could break dashboards (for local app traffic).
//
// We should probably just make it configurable if anyone actually has
// services named "local_app" in the future.
LocalAppClusterName = "local_app"
// LocalAgentClusterName is the name we give the local agent "cluster" in
// Envoy config.
// Envoy config. Note that all cluster names may collide with service names
// since we want cluster names and service names to match to enable nice
// metrics correlation without massaging prefixes on cluster names.
//
// We should probably make this more unlikely to collied however changing it
// potentially breaks upgrade compatibility without restarting all Envoy's as
// it will no longer match their existing cluster name. Changing this will
// affect metrics output so could break dashboards (for local agent traffic).
//
// We should probably just make it configurable if anyone actually has
// services named "local_agent" in the future.
LocalAgentClusterName = "local_agent"
// DefaultAuthCheckFrequency is the default value for
@ -178,7 +198,7 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
},
ClusterType: &xDSType{
typeURL: ClusterType,
resources: clustersFromSnapshot,
resources: s.clustersFromSnapshot,
stream: stream,
},
RouteType: &xDSType{
@ -188,7 +208,7 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
},
ListenerType: &xDSType{
typeURL: ListenerType,
resources: listenersFromSnapshot,
resources: s.listenersFromSnapshot,
stream: stream,
},
}
@ -421,6 +441,8 @@ func (s *Server) Check(ctx context.Context, r *envoyauthz.CheckRequest) (*envoya
// Parse destination to know the target service
dest, err := connect.ParseCertURIFromString(r.Attributes.Destination.Principal)
if err != nil {
s.Logger.Printf("[DEBUG] grpc: Connect AuthZ DENIED: bad destination URI: src=%s dest=%s",
r.Attributes.Source.Principal, r.Attributes.Destination.Principal)
// Treat this as an auth error since Envoy has sent something it considers
// valid, it's just not an identity we trust.
return deniedResponse("Destination Principal is not a valid Connect identity")
@ -428,6 +450,8 @@ func (s *Server) Check(ctx context.Context, r *envoyauthz.CheckRequest) (*envoya
destID, ok := dest.(*connect.SpiffeIDService)
if !ok {
s.Logger.Printf("[DEBUG] grpc: Connect AuthZ DENIED: bad destination service ID: src=%s dest=%s",
r.Attributes.Source.Principal, r.Attributes.Destination.Principal)
return deniedResponse("Destination Principal is not a valid Service identity")
}
@ -452,14 +476,22 @@ func (s *Server) Check(ctx context.Context, r *envoyauthz.CheckRequest) (*envoya
authed, reason, _, err := s.Authz.ConnectAuthorize(token, req)
if err != nil {
if err == acl.ErrPermissionDenied {
s.Logger.Printf("[DEBUG] grpc: Connect AuthZ failed ACL check: %s: src=%s dest=%s",
err, r.Attributes.Source.Principal, r.Attributes.Destination.Principal)
return nil, status.Error(codes.PermissionDenied, err.Error())
}
s.Logger.Printf("[DEBUG] grpc: Connect AuthZ failed: %s: src=%s dest=%s",
err, r.Attributes.Source.Principal, r.Attributes.Destination.Principal)
return nil, status.Error(codes.Internal, err.Error())
}
if !authed {
s.Logger.Printf("[DEBUG] grpc: Connect AuthZ DENIED: src=%s dest=%s reason=%s",
r.Attributes.Source.Principal, r.Attributes.Destination.Principal, reason)
return deniedResponse(reason)
}
s.Logger.Printf("[DEBUG] grpc: Connect AuthZ ALLOWED: src=%s dest=%s reason=%s",
r.Attributes.Source.Principal, r.Attributes.Destination.Principal, reason)
return &envoyauthz.CheckResponse{
Status: &rpc.Status{
Code: int32(rpc.OK),

View File

@ -1,21 +1,17 @@
package xds
import (
"bytes"
"context"
"errors"
"fmt"
"log"
"os"
"strings"
"sync"
"sync/atomic"
"testing"
"text/template"
"time"
envoy "github.com/envoyproxy/go-control-plane/envoy/api/v2"
"github.com/gogo/protobuf/jsonpb"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
@ -234,228 +230,13 @@ func TestServer_StreamAggregatedResources_BasicProtocol(t *testing.T) {
assertResponseSent(t, envoy.stream.sendCh, expectListenerJSON(t, snap, "", 3, 9))
}
func expectListenerJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) map[string]string {
tokenVal := ""
if token != "" {
tokenVal = fmt.Sprintf(",\n"+`"value": "%s"`, token)
}
return map[string]string{
"public_listener": `{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "public_listener:0.0.0.0:9999",
"address": {
"socketAddress": {
"address": "0.0.0.0",
"portValue": 9999
}
},
"filterChains": [
{
"tlsContext": ` + expectedPublicTLSContextJSON(t, snap) + `,
"filters": [
{
"name": "envoy.ext_authz",
"config": {
"grpc_service": {
"envoy_grpc": {
"cluster_name": "local_agent"
},
"initial_metadata": [
{
"key": "x-consul-token"
` + tokenVal + `
}
]
},
"stat_prefix": "connect_authz"
}
},
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "local_app",
"stat_prefix": "public_listener"
}
}
]
}
]
}`,
"service:db": `{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "service:db:127.0.0.1:9191",
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 9191
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "service:db",
"stat_prefix": "service:db"
}
}
]
}
]
}`,
"prepared_query:geo-cache": `{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "prepared_query:geo-cache:127.10.10.10:8181",
"address": {
"socketAddress": {
"address": "127.10.10.10",
"portValue": 8181
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "prepared_query:geo-cache",
"stat_prefix": "prepared_query:geo-cache"
}
}
]
}
]
}`,
}
}
func expectListenerJSONFromResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64, resourcesJSON map[string]string) string {
resJSON := ""
// Sort resources into specific order because that matters in JSONEq
// comparison later.
keyOrder := []string{"public_listener"}
for _, u := range snap.Proxy.Upstreams {
keyOrder = append(keyOrder, u.Identifier())
}
for _, k := range keyOrder {
j, ok := resourcesJSON[k]
if !ok {
continue
}
if resJSON != "" {
resJSON += ",\n"
}
resJSON += j
}
return `{
"versionInfo": "` + hexString(v) + `",
"resources": [` + resJSON + `],
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
"nonce": "` + hexString(n) + `"
}`
}
func expectListenerJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) string {
return expectListenerJSONFromResources(t, snap, token, v, n,
expectListenerJSONResources(t, snap, token, v, n))
}
func expectClustersJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) map[string]string {
return map[string]string{
"local_app": `
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "local_app",
"connectTimeout": "5s",
"hosts": [
{
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
]
}`,
"service:db": `
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "service:db",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"outlierDetection": {
},
"connectTimeout": "1s",
"tlsContext": ` + expectedUpstreamTLSContextJSON(t, snap) + `
}`,
"prepared_query:geo-cache": `
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "prepared_query:geo-cache",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"outlierDetection": {
},
"connectTimeout": "5s",
"tlsContext": ` + expectedUpstreamTLSContextJSON(t, snap) + `
}`,
}
}
func expectClustersJSONFromResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64, resourcesJSON map[string]string) string {
resJSON := ""
// Sort resources into specific order because that matters in JSONEq
// comparison later.
keyOrder := []string{"local_app"}
for _, u := range snap.Proxy.Upstreams {
keyOrder = append(keyOrder, u.Identifier())
}
for _, k := range keyOrder {
j, ok := resourcesJSON[k]
if !ok {
continue
}
if resJSON != "" {
resJSON += ",\n"
}
resJSON += j
}
return `{
"versionInfo": "` + hexString(v) + `",
"resources": [` + resJSON + `],
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
"nonce": "` + hexString(n) + `"
}`
}
func expectClustersJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) string {
return expectClustersJSONFromResources(t, snap, token, v, n,
expectClustersJSONResources(t, snap, token, v, n))
}
func expectEndpointsJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) string {
return `{
"versionInfo": "` + hexString(v) + `",
"resources": [
{
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
"clusterName": "service:db",
"clusterName": "db",
"endpoints": [
{
"lbEndpoints": [
@ -558,11 +339,7 @@ func assertResponseSent(t *testing.T, ch chan *envoy.DiscoveryResponse, wantJSON
// expected structs require the same code that is under test!
func assertResponse(t *testing.T, r *envoy.DiscoveryResponse, wantJSON string) {
t.Helper()
m := jsonpb.Marshaler{
Indent: " ",
}
gotJSON, err := m.MarshalToString(r)
require.NoError(t, err)
gotJSON := responseToJSON(t, r)
require.JSONEqf(t, wantJSON, gotJSON, "got:\n%s", gotJSON)
}
@ -591,21 +368,21 @@ func TestServer_StreamAggregatedResources_ACLEnforcement(t *testing.T) {
wantDenied: true,
},
{
name: "default deny, service:write token",
name: "default deny, write token",
defaultDeny: true,
acl: `service "web" { policy = "write" }`,
token: "service-write-on-web",
wantDenied: false,
},
{
name: "default deny, service:read token",
name: "default deny, read token",
defaultDeny: true,
acl: `service "web" { policy = "read" }`,
token: "service-write-on-web",
wantDenied: true,
},
{
name: "default deny, service:write token on different service",
name: "default deny, write token on different service",
defaultDeny: true,
acl: `service "not-web" { policy = "write" }`,
token: "service-write-on-not-web",
@ -1012,394 +789,3 @@ func TestServer_Check(t *testing.T) {
})
}
}
func TestServer_ConfigOverridesListeners(t *testing.T) {
tests := []struct {
name string
setup func(snap *proxycfg.ConfigSnapshot) string
}{
{
name: "sanity check no custom",
setup: func(snap *proxycfg.ConfigSnapshot) string {
// Default snap and expectation
return expectListenerJSON(t, snap, "my-token", 1, 1)
},
},
{
name: "custom public_listener no type",
setup: func(snap *proxycfg.ConfigSnapshot) string {
snap.Proxy.Config["envoy_public_listener_json"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-public-listen",
IncludeType: false,
})
resources := expectListenerJSONResources(t, snap, "my-token", 1, 1)
// Replace the public listener with the custom one WITH type since
// that's how it comes out the other end, and with TLS and authz
// overridden.
resources["public_listener"] = customListenerJSON(t, customListenerJSONOptions{
Name: "custom-public-listen",
// We should add type, TLS and authz
IncludeType: true,
OverrideAuthz: true,
TLSContext: expectedPublicTLSContextJSON(t, snap),
})
return expectListenerJSONFromResources(t, snap, "my-token", 1, 1, resources)
},
},
{
name: "custom public_listener with type",
setup: func(snap *proxycfg.ConfigSnapshot) string {
snap.Proxy.Config["envoy_public_listener_json"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-public-listen",
IncludeType: true,
})
resources := expectListenerJSONResources(t, snap, "my-token", 1, 1)
// Replace the public listener with the custom one WITH type since
// that's how it comes out the other end, and with TLS and authz
// overridden.
resources["public_listener"] = customListenerJSON(t, customListenerJSONOptions{
Name: "custom-public-listen",
// We should add type, TLS and authz
IncludeType: true,
OverrideAuthz: true,
TLSContext: expectedPublicTLSContextJSON(t, snap),
})
return expectListenerJSONFromResources(t, snap, "my-token", 1, 1, resources)
},
},
{
name: "custom public_listener with TLS should be overridden",
setup: func(snap *proxycfg.ConfigSnapshot) string {
snap.Proxy.Config["envoy_public_listener_json"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-public-listen",
IncludeType: true,
TLSContext: `{"requireClientCertificate": false}`,
})
resources := expectListenerJSONResources(t, snap, "my-token", 1, 1)
// Replace the public listener with the custom one WITH type since
// that's how it comes out the other end, and with TLS and authz
// overridden.
resources["public_listener"] = customListenerJSON(t, customListenerJSONOptions{
Name: "custom-public-listen",
// We should add type, TLS and authz
IncludeType: true,
OverrideAuthz: true,
TLSContext: expectedPublicTLSContextJSON(t, snap),
})
return expectListenerJSONFromResources(t, snap, "my-token", 1, 1, resources)
},
},
{
name: "custom upstream no type",
setup: func(snap *proxycfg.ConfigSnapshot) string {
snap.Proxy.Upstreams[0].Config["envoy_listener_json"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-upstream",
IncludeType: false,
})
resources := expectListenerJSONResources(t, snap, "my-token", 1, 1)
// Replace an upstream listener with the custom one WITH type since
// that's how it comes out the other end. Note we do override TLS
resources["service:db"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-upstream",
// We should add type
IncludeType: true,
})
return expectListenerJSONFromResources(t, snap, "my-token", 1, 1, resources)
},
},
{
name: "custom upstream with type",
setup: func(snap *proxycfg.ConfigSnapshot) string {
snap.Proxy.Upstreams[0].Config["envoy_listener_json"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-upstream",
IncludeType: true,
})
resources := expectListenerJSONResources(t, snap, "my-token", 1, 1)
// Replace an upstream listener with the custom one WITH type since
// that's how it comes out the other end.
resources["service:db"] =
customListenerJSON(t, customListenerJSONOptions{
Name: "custom-upstream",
// We should add type
IncludeType: true,
})
return expectListenerJSONFromResources(t, snap, "my-token", 1, 1, resources)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
require := require.New(t)
// Sanity check default with no overrides first
snap := proxycfg.TestConfigSnapshot(t)
expect := tt.setup(snap)
listeners, err := listenersFromSnapshot(snap, "my-token")
require.NoError(err)
r, err := createResponse(ListenerType, "00000001", "00000001", listeners)
require.NoError(err)
assertResponse(t, r, expect)
})
}
}
func TestServer_ConfigOverridesClusters(t *testing.T) {
tests := []struct {
name string
setup func(snap *proxycfg.ConfigSnapshot) string
}{
{
name: "sanity check no custom",
setup: func(snap *proxycfg.ConfigSnapshot) string {
// Default snap and expectation
return expectClustersJSON(t, snap, "my-token", 1, 1)
},
},
{
name: "custom public with no type",
setup: func(snap *proxycfg.ConfigSnapshot) string {
snap.Proxy.Config["envoy_local_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "mylocal",
IncludeType: false,
})
resources := expectClustersJSONResources(t, snap, "my-token", 1, 1)
// Replace an upstream listener with the custom one WITH type since
// that's how it comes out the other end.
resources["local_app"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "mylocal",
IncludeType: true,
})
return expectClustersJSONFromResources(t, snap, "my-token", 1, 1, resources)
},
},
{
name: "custom public with type",
setup: func(snap *proxycfg.ConfigSnapshot) string {
snap.Proxy.Config["envoy_local_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "mylocal",
IncludeType: true,
})
resources := expectClustersJSONResources(t, snap, "my-token", 1, 1)
// Replace an upstream listener with the custom one WITH type since
// that's how it comes out the other end.
resources["local_app"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "mylocal",
IncludeType: true,
})
return expectClustersJSONFromResources(t, snap, "my-token", 1, 1, resources)
},
},
{
name: "custom upstream with no type",
setup: func(snap *proxycfg.ConfigSnapshot) string {
snap.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
customEDSClusterJSON(t, customClusterJSONOptions{
Name: "myservice",
IncludeType: false,
})
resources := expectClustersJSONResources(t, snap, "my-token", 1, 1)
// Replace an upstream listener with the custom one WITH type since
// that's how it comes out the other end.
resources["service:db"] =
customEDSClusterJSON(t, customClusterJSONOptions{
Name: "myservice",
IncludeType: true,
TLSContext: expectedUpstreamTLSContextJSON(t, snap),
})
return expectClustersJSONFromResources(t, snap, "my-token", 1, 1, resources)
},
},
{
name: "custom upstream with type",
setup: func(snap *proxycfg.ConfigSnapshot) string {
snap.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
customEDSClusterJSON(t, customClusterJSONOptions{
Name: "myservice",
IncludeType: true,
})
resources := expectClustersJSONResources(t, snap, "my-token", 1, 1)
// Replace an upstream listener with the custom one WITH type since
// that's how it comes out the other end.
resources["service:db"] =
customEDSClusterJSON(t, customClusterJSONOptions{
Name: "myservice",
IncludeType: true,
TLSContext: expectedUpstreamTLSContextJSON(t, snap),
})
return expectClustersJSONFromResources(t, snap, "my-token", 1, 1, resources)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
require := require.New(t)
// Sanity check default with no overrides first
snap := proxycfg.TestConfigSnapshot(t)
expect := tt.setup(snap)
clusters, err := clustersFromSnapshot(snap, "my-token")
require.NoError(err)
r, err := createResponse(ClusterType, "00000001", "00000001", clusters)
require.NoError(err)
fmt.Println(r)
assertResponse(t, r, expect)
})
}
}
type customListenerJSONOptions struct {
Name string
IncludeType bool
OverrideAuthz bool
TLSContext string
}
const customListenerJSONTpl = `{
{{ if .IncludeType -}}
"@type": "type.googleapis.com/envoy.api.v2.Listener",
{{- end }}
"name": "{{ .Name }}",
"address": {
"socketAddress": {
"address": "11.11.11.11",
"portValue": 11111
}
},
"filterChains": [
{
{{ if .TLSContext -}}
"tlsContext": {{ .TLSContext }},
{{- end }}
"filters": [
{{ if .OverrideAuthz -}}
{
"name": "envoy.ext_authz",
"config": {
"grpc_service": {
"envoy_grpc": {
"cluster_name": "local_agent"
},
"initial_metadata": [
{
"key": "x-consul-token",
"value": "my-token"
}
]
},
"stat_prefix": "connect_authz"
}
},
{{- end }}
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "random-cluster",
"stat_prefix": "foo-stats"
}
}
]
}
]
}`
var customListenerJSONTemplate = template.Must(template.New("").Parse(customListenerJSONTpl))
func customListenerJSON(t *testing.T, opts customListenerJSONOptions) string {
t.Helper()
var buf bytes.Buffer
err := customListenerJSONTemplate.Execute(&buf, opts)
require.NoError(t, err)
return buf.String()
}
type customClusterJSONOptions struct {
Name string
IncludeType bool
TLSContext string
}
var customEDSClusterJSONTpl = `{
{{ if .IncludeType -}}
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
{{- end }}
{{ if .TLSContext -}}
"tlsContext": {{ .TLSContext }},
{{- end }}
"name": "{{ .Name }}",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"connectTimeout": "5s"
}`
var customEDSClusterJSONTemplate = template.Must(template.New("").Parse(customEDSClusterJSONTpl))
func customEDSClusterJSON(t *testing.T, opts customClusterJSONOptions) string {
t.Helper()
var buf bytes.Buffer
err := customEDSClusterJSONTemplate.Execute(&buf, opts)
require.NoError(t, err)
return buf.String()
}
var customAppClusterJSONTpl = `{
{{ if .IncludeType -}}
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
{{- end }}
{{ if .TLSContext -}}
"tlsContext": {{ .TLSContext }},
{{- end }}
"name": "{{ .Name }}",
"connectTimeout": "5s",
"hosts": [
{
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
]
}`
var customAppClusterJSONTemplate = template.Must(template.New("").Parse(customAppClusterJSONTpl))
func customAppClusterJSON(t *testing.T, opts customClusterJSONOptions) string {
t.Helper()
var buf bytes.Buffer
err := customAppClusterJSONTemplate.Execute(&buf, opts)
require.NoError(t, err)
return buf.String()
}

View File

@ -0,0 +1,96 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "mylocal",
"connectTimeout": "5s",
"hosts": [
{
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "db",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"connectTimeout": "1s",
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
}
},
"outlierDetection": {
}
},
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "prepared_query:geo-cache",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"connectTimeout": "5s",
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
}
},
"outlierDetection": {
}
}
],
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
"nonce": "00000001"
}

View File

@ -0,0 +1,107 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "local_app",
"connectTimeout": "1.234s",
"loadAssignment": {
"clusterName": "local_app",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
}
}
]
}
]
}
},
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "db",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"connectTimeout": "2.345s",
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
}
},
"outlierDetection": {
}
},
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "prepared_query:geo-cache",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"connectTimeout": "5s",
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
}
},
"outlierDetection": {
}
}
],
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
"nonce": "00000001"
}

View File

@ -0,0 +1,104 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "local_app",
"connectTimeout": "5s",
"loadAssignment": {
"clusterName": "local_app",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
}
}
]
}
]
}
},
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "myservice",
"connectTimeout": "5s",
"hosts": [
{
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
],
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
}
}
},
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "prepared_query:geo-cache",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"connectTimeout": "5s",
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
}
},
"outlierDetection": {
}
}
],
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
"nonce": "00000001"
}

View File

@ -0,0 +1,107 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "local_app",
"connectTimeout": "5s",
"loadAssignment": {
"clusterName": "local_app",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
}
}
]
}
]
}
},
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "db",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"connectTimeout": "1s",
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
}
},
"outlierDetection": {
}
},
{
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
"name": "prepared_query:geo-cache",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
}
}
},
"connectTimeout": "5s",
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
}
},
"outlierDetection": {
}
}
],
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
"nonce": "00000001"
}

View File

@ -0,0 +1,116 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "custom-public-listen",
"address": {
"socketAddress": {
"address": "11.11.11.11",
"portValue": 11111
}
},
"filterChains": [
{
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
},
"requireClientCertificate": true
},
"filters": [
{
"name": "envoy.ext_authz",
"config": {
"grpc_service": {
"envoy_grpc": {
"cluster_name": "local_agent"
},
"initial_metadata": [
{
"key": "x-consul-token",
"value": "my-token"
}
]
},
"stat_prefix": "connect_authz"
}
},
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "random-cluster",
"stat_prefix": "foo-stats"
}
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "db:127.0.0.1:9191",
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 9191
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "db",
"stat_prefix": "upstream_db_tcp"
}
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "prepared_query:geo-cache:127.10.10.10:8181",
"address": {
"socketAddress": {
"address": "127.10.10.10",
"portValue": 8181
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "prepared_query:geo-cache",
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
}
}
]
}
]
}
],
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
"nonce": "00000001"
}

View File

@ -0,0 +1,116 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "public_listener:0.0.0.0:9999",
"address": {
"socketAddress": {
"address": "0.0.0.0",
"portValue": 9999
}
},
"filterChains": [
{
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
},
"requireClientCertificate": true
},
"filters": [
{
"name": "envoy.ext_authz",
"config": {
"grpc_service": {
"envoy_grpc": {
"cluster_name": "local_agent"
},
"initial_metadata": [
{
"key": "x-consul-token",
"value": "my-token"
}
]
},
"stat_prefix": "connect_authz"
}
},
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "local_app",
"stat_prefix": "public_listener_tcp"
}
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "custom-upstream",
"address": {
"socketAddress": {
"address": "11.11.11.11",
"portValue": 11111
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "random-cluster",
"stat_prefix": "foo-stats"
}
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "prepared_query:geo-cache:127.10.10.10:8181",
"address": {
"socketAddress": {
"address": "127.10.10.10",
"portValue": 8181
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "prepared_query:geo-cache",
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
}
}
]
}
]
}
],
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
"nonce": "00000001"
}

View File

@ -0,0 +1,116 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "public_listener:0.0.0.0:9999",
"address": {
"socketAddress": {
"address": "0.0.0.0",
"portValue": 9999
}
},
"filterChains": [
{
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
},
"requireClientCertificate": true
},
"filters": [
{
"name": "envoy.ext_authz",
"config": {
"grpc_service": {
"envoy_grpc": {
"cluster_name": "local_agent"
},
"initial_metadata": [
{
"key": "x-consul-token",
"value": "my-token"
}
]
},
"stat_prefix": "connect_authz"
}
},
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "local_app",
"stat_prefix": "public_listener_tcp"
}
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "db:127.0.0.1:9191",
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 9191
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "db",
"stat_prefix": "upstream_db_tcp"
}
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "prepared_query:geo-cache:127.10.10.10:8181",
"address": {
"socketAddress": {
"address": "127.10.10.10",
"portValue": 8181
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "prepared_query:geo-cache",
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
}
}
]
}
]
}
],
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
"nonce": "00000001"
}

View File

@ -0,0 +1,145 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "public_listener:0.0.0.0:9999",
"address": {
"socketAddress": {
"address": "0.0.0.0",
"portValue": 9999
}
},
"filterChains": [
{
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
},
"requireClientCertificate": true
},
"filters": [
{
"name": "envoy.ext_authz",
"config": {
"grpc_service": {
"envoy_grpc": {
"cluster_name": "local_agent"
},
"initial_metadata": [
{
"key": "x-consul-token",
"value": "my-token"
}
]
},
"stat_prefix": "connect_authz"
}
},
{
"name": "envoy.http_connection_manager",
"config": {
"http_filters": [
{
"name": "envoy.router"
}
],
"route_config": {
"name": "public_listener",
"virtual_hosts": [
{
"domains": [
"*"
],
"name": "public_listener",
"routes": [
{
"match": {
"prefix": "/"
},
"route": {
"cluster": "local_app"
}
}
]
}
]
},
"stat_prefix": "public_listener_http",
"tracing": {
"random_sampling": {
}
}
}
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "db:127.0.0.1:9191",
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 9191
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "db",
"stat_prefix": "upstream_db_tcp"
}
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "prepared_query:geo-cache:127.10.10.10:8181",
"address": {
"socketAddress": {
"address": "127.10.10.10",
"portValue": 8181
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "prepared_query:geo-cache",
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
}
}
]
}
]
}
],
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
"nonce": "00000001"
}

View File

@ -0,0 +1,146 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "public_listener:0.0.0.0:9999",
"address": {
"socketAddress": {
"address": "0.0.0.0",
"portValue": 9999
}
},
"filterChains": [
{
"tlsContext": {
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
},
"requireClientCertificate": true
},
"filters": [
{
"name": "envoy.ext_authz",
"config": {
"grpc_service": {
"envoy_grpc": {
"cluster_name": "local_agent"
},
"initial_metadata": [
{
"key": "x-consul-token",
"value": "my-token"
}
]
},
"stat_prefix": "connect_authz"
}
},
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "local_app",
"stat_prefix": "public_listener_tcp"
}
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "db:127.0.0.1:9191",
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 9191
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.http_connection_manager",
"config": {
"http_filters": [
{
"name": "envoy.router"
}
],
"route_config": {
"name": "db",
"virtual_hosts": [
{
"domains": [
"*"
],
"name": "db",
"routes": [
{
"match": {
"prefix": "/"
},
"route": {
"cluster": "db"
}
}
]
}
]
},
"stat_prefix": "upstream_db_http",
"tracing": {
"operation_name": "EGRESS",
"random_sampling": {
}
}
}
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.api.v2.Listener",
"name": "prepared_query:geo-cache:127.10.10.10:8181",
"address": {
"socketAddress": {
"address": "127.10.10.10",
"portValue": 8181
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.tcp_proxy",
"config": {
"cluster": "prepared_query:geo-cache",
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
}
}
]
}
]
}
],
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
"nonce": "00000001"
}

View File

@ -0,0 +1,16 @@
-----BEGIN CERTIFICATE-----
MIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ
VGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG
A1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR
AB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7
SkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD
AgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6
NDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6
NWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf
ZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6
ZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw
WQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1
NTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG
SM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA
pY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=
-----END CERTIFICATE-----

View File

@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49
AwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav
q5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==
-----END EC PRIVATE KEY-----

View File

@ -0,0 +1,15 @@
-----BEGIN CERTIFICATE-----
MIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ
VGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG
A1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx
AsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2
ThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD
AQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi
ZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3
Mzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi
MjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1
ZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x
MTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG
SM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA
oR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=
-----END CERTIFICATE-----

View File

@ -0,0 +1,407 @@
package envoy
import (
"bytes"
"fmt"
"net"
"net/url"
"os"
"strings"
"text/template"
)
// BootstrapConfig is the set of keys we care about in a Connect.Proxy.Config
// map. Note that this only includes config keys that affects Envoy bootstrap
// generation. For Envoy config keys that affect runtime xDS behavior see
// agent/xds/config.go.
type BootstrapConfig struct {
// StatsdURL allows simple configuration of the statsd metrics sink. If
// tagging is required, use DogstatsdURL instead. The URL must be in one of
// the following forms:
// - udp://<ip>:<port>
// - $ENV_VAR_NAME in this case the ENV var named will have it's
// value taken and is expected to contain a URL in
// one of the supported forms above.
StatsdURL string `mapstructure:"envoy_statsd_url"`
// DogstatsdURL allows simple configuration of the dogstatsd metrics sink
// which allows tags and Unix domain sockets. The URL must be in one of the
// following forms:
// - udp://<ip>:<port>
// - unix:///full/path/to/unix.sock
// - $ENV_VAR_NAME in this case the ENV var named will have it's
// value taken and is expected to contain a URL in
// one of the supported forms above.
DogstatsdURL string `mapstructure:"envoy_dogstatsd_url"`
// StatsTags is a slice of string values that will be added as tags to
// metrics. They are used to configure
// https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/metrics/v2/stats.proto#envoy-api-msg-config-metrics-v2-statsconfig
// and add to the basic tags Consul adds by default like the local_cluster
// name. Only exact values are supported here. Full configuration of
// stats_config.stats_tags can be made by overriding envoy_stats_config_json.
StatsTags []string `mapstructure:"envoy_stats_tags"`
// PrometheusBindAddr configures an <ip>:<port> on which the Envoy will listen
// and expose a single /metrics HTTP endpoint for Prometheus to scrape. It
// does this by proxying that URL to the internal admin server's prometheus
// endpoint which allows exposing metrics on the network without the security
// risk of exposing the full admin server API. Any other URL requested will be
// a 404.
//
// Note that as of Envoy 1.9.0, the built in Prometheus endpoint only exports
// counters and gauges but not timing information via histograms. This is
// fixed in 1.10-dev currently in Envoy master. Other changes since 1.9.0 make
// master incompatible with the current release of Consul Connect. This will
// be fixed in a future Consul version as Envoy 1.10 reaches stable release.
PrometheusBindAddr string `mapstructure:"envoy_prometheus_bind_addr"`
// OverrideJSONTpl allows replacing the base template used to render the
// bootstrap. This is an "escape hatch" allowing arbitrary control over the
// proxy's configuration but will the most effort to maintain and correctly
// configure the aspects that Connect relies upon to work. It's recommended
// that this only be used if necessary, and that it be based on the default
// template in
// https://github.com/hashicorp/consul/blob/master/command/connect/envoy/bootstrap_tpl.go
// for the correct version of Consul and Envoy being used.
OverrideJSONTpl string `mapstructure:"envoy_bootstrap_json_tpl"`
// StaticClustersJSON is a JSON string containing zero or more Cluster
// definitions. They are appended to the "static_resources.clusters" list. A
// single cluster should be given as a plain object, if more than one is to be
// added, they should be separated by a comma suitable for direct injection
// into a JSON array.
//
// Note that cluster names should be chosen in such a way that they won't
// collide with service names since we use plain service names as cluster
// names in xDS to make metrics population simpler and cluster names mush be
// unique.
//
// This is mostly intended for providing clusters for tracing or metrics
// services.
//
// See https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/api/v2/cds.proto.
StaticClustersJSON string `mapstructure:"envoy_extra_static_clusters_json"`
// StaticListenersJSON is a JSON string containing zero or more Listener
// definitions. They are appended to the "static_resources.listeners" list. A
// single listener should be given as a plain object, if more than one is to
// be added, they should be separated by a comma suitable for direct injection
// into a JSON array.
//
// See https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/api/v2/lds.proto.
StaticListenersJSON string `mapstructure:"envoy_extra_static_listeners_json"`
// StatsSinksJSON is a JSON string containing zero or more StatsSink
// definititions. They are appended to the `stats_sinks` array at the top
// level of the bootstrap config. A single sink should be given as a plain
// object, if more than one is to be added, they should be separated by a
// comma suitable for direct injection into a JSON array.
//
// See
// https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/metrics/v2/stats.proto#config-metrics-v2-statssink.
//
// If this is non-empty then it will override anything configured in
// StatsTags.
StatsSinksJSON string `mapstructure:"envoy_extra_stats_sinks_json"`
// StatsConfigJSON is a JSON string containing an object in the right format
// to be rendered as the body of the `stats_config` field at the top level of
// the bootstrap config. It's format may vary based on Envoy version used. See
// https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/metrics/v2/stats.proto#envoy-api-msg-config-metrics-v2-statsconfig.
//
// If this is non-empty then it will override anything configured in
// StatsdURL or DogstatsdURL.
StatsConfigJSON string `mapstructure:"envoy_stats_config_json"`
// StatsFlushInterval is the time duration between Envoy stats flushes. It is
// in proto3 "duration" string format for example "1.12s" See
// https://developers.google.com/protocol-buffers/docs/proto3#json and
// https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/bootstrap/v2/bootstrap.proto#bootstrap
StatsFlushInterval string `mapstructure:"envoy_stats_flush_interval"`
// TracingConfigJSON is a JSON string containing an object in the right format
// to be rendered as the body of the `tracing` field at the top level of
// the bootstrap config. It's format may vary based on Envoy version used.
// See https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/trace/v2/trace.proto.
TracingConfigJSON string `mapstructure:"envoy_tracing_json"`
}
// Template returns the bootstrap template to use as a base.
func (c *BootstrapConfig) Template() string {
if c.OverrideJSONTpl != "" {
return c.OverrideJSONTpl
}
return bootstrapTemplate
}
func (c *BootstrapConfig) GenerateJSON(args *BootstrapTplArgs) ([]byte, error) {
if err := c.ConfigureArgs(args); err != nil {
return nil, err
}
t, err := template.New("bootstrap").Parse(c.Template())
if err != nil {
return nil, err
}
var buf bytes.Buffer
err = t.Execute(&buf, args)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// ConfigureArgs takes the basic template arguments generated from the command
// arguments and environment and modifies them according to the BootstrapConfig.
func (c *BootstrapConfig) ConfigureArgs(args *BootstrapTplArgs) error {
// Attempt to setup sink(s) from high-level config. Note the args are passed
// by ref and modified in place.
if err := c.generateStatsSinks(args); err != nil {
return err
}
if c.StatsConfigJSON != "" {
// StatsConfig overridden explicitly
args.StatsConfigJSON = c.StatsConfigJSON
} else {
// Attempt to setup tags from high-level config. Note the args are passed by
// ref and modified in place.
if err := c.generateStatsConfig(args); err != nil {
return err
}
}
if c.StaticClustersJSON != "" {
args.StaticClustersJSON = c.StaticClustersJSON
}
if c.StaticListenersJSON != "" {
args.StaticListenersJSON = c.StaticListenersJSON
}
// Setup prometheus if needed. This MUST happen after the Static*JSON is set above
if c.PrometheusBindAddr != "" {
if err := c.generatePrometheusConfig(args); err != nil {
return err
}
}
if c.TracingConfigJSON != "" {
args.TracingConfigJSON = c.TracingConfigJSON
}
if c.StatsFlushInterval != "" {
args.StatsFlushInterval = c.StatsFlushInterval
}
return nil
}
func (c *BootstrapConfig) generateStatsSinks(args *BootstrapTplArgs) error {
var stats_sinks []string
if c.StatsdURL != "" {
sinkJSON, err := c.generateStatsSinkJSON("envoy.statsd", c.StatsdURL)
if err != nil {
return err
}
stats_sinks = append(stats_sinks, sinkJSON)
}
if c.DogstatsdURL != "" {
sinkJSON, err := c.generateStatsSinkJSON("envoy.dog_statsd", c.DogstatsdURL)
if err != nil {
return err
}
stats_sinks = append(stats_sinks, sinkJSON)
}
if c.StatsSinksJSON != "" {
stats_sinks = append(stats_sinks, c.StatsSinksJSON)
}
if len(stats_sinks) > 0 {
args.StatsSinksJSON = "[\n" + strings.Join(stats_sinks, ",\n") + "\n]"
}
return nil
}
func (c *BootstrapConfig) generateStatsSinkJSON(name string, addr string) (string, error) {
// Resolve address ENV var
if len(addr) > 2 && addr[0] == '$' {
addr = os.Getenv(addr[1:])
}
u, err := url.Parse(addr)
if err != nil {
return "", fmt.Errorf("failed to parse %s sink address %q", name, addr)
}
var addrJSON string
switch u.Scheme {
case "udp":
addrJSON = `
"socket_address": {
"address": "` + u.Hostname() + `",
"port_value": ` + u.Port() + `
}
`
case "unix":
addrJSON = `
"pipe": {
"path": "` + u.Path + `"
}
`
default:
return "", fmt.Errorf("unsupported address protocol %q for %s sink",
u.Scheme, name)
}
return `{
"name": "` + name + `",
"config": {
"address": {
` + addrJSON + `
}
}
}`, nil
}
func (c *BootstrapConfig) generateStatsConfig(args *BootstrapTplArgs) error {
var tagJSONs []string
// Add some default tags if not already overridden
defaults := map[string]string{
"local_cluster": args.ProxyCluster,
}
for _, tag := range c.StatsTags {
parts := strings.SplitN(tag, "=", 2)
// If there is no equals, treat it as a boolean tag and just assign value of
// 1 e.g. "canary" will out put the tag "canary: 1"
v := "1"
if len(parts) == 2 {
v = parts[1]
}
k := strings.ToLower(parts[0])
tagJSON := `{
"tag_name": "` + k + `",
"fixed_value": "` + v + `"
}`
tagJSONs = append(tagJSONs, tagJSON)
// Remove this in case we override a default
delete(defaults, k)
}
for k, v := range defaults {
if v == "" {
// Skip stuff we just didn't have data for, this is only really the case
// in tests currently.
continue
}
tagJSON := `{
"tag_name": "` + k + `",
"fixed_value": "` + v + `"
}`
tagJSONs = append(tagJSONs, tagJSON)
}
if len(tagJSONs) > 0 {
// use_all_default_tags is true by default but we'll make it explicit!
args.StatsConfigJSON = `{
"stats_tags": [
` + strings.Join(tagJSONs, ",\n") + `
],
"use_all_default_tags": true
}`
}
return nil
}
func (c *BootstrapConfig) generatePrometheusConfig(args *BootstrapTplArgs) error {
host, port, err := net.SplitHostPort(c.PrometheusBindAddr)
if err != nil {
return fmt.Errorf("invalid prometheus_bind_addr: %s", err)
}
clusterJSON := `{
"name": "self_admin",
"connect_timeout": "5s",
"type": "STATIC",
"http_protocol_options": {},
"hosts": [
{
"socket_address": {
"address": "127.0.0.1",
"port_value": ` + args.AdminBindPort + `
}
}
]
}`
listenerJSON := `{
"name": "envoy_prometheus_metrics_listener",
"address": {
"socket_address": {
"address": "` + host + `",
"port_value": ` + port + `
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.http_connection_manager",
"config": {
"stat_prefix": "envoy_prometheus_metrics",
"codec_type": "HTTP1",
"route_config": {
"name": "self_admin_route",
"virtual_hosts": [
{
"name": "self_admin",
"domains": [
"*"
],
"routes": [
{
"match": {
"path": "/metrics"
},
"route": {
"cluster": "self_admin",
"prefix_rewrite": "/stats/prometheus"
}
},
{
"match": {
"prefix": "/"
},
"direct_response": {
"status": 404
}
}
]
}
]
},
"http_filters": [
{
"name": "envoy.router"
}
]
}
}
]
}
]
}`
if args.StaticClustersJSON != "" {
clusterJSON = ",\n" + clusterJSON
}
args.StaticClustersJSON += clusterJSON
if args.StaticListenersJSON != "" {
listenerJSON = ",\n" + listenerJSON
}
args.StaticListenersJSON += listenerJSON
return nil
}

View File

@ -0,0 +1,410 @@
package envoy
import (
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
const (
expectedPromListener = `{
"name": "envoy_prometheus_metrics_listener",
"address": {
"socket_address": {
"address": "0.0.0.0",
"port_value": 9000
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.http_connection_manager",
"config": {
"stat_prefix": "envoy_prometheus_metrics",
"codec_type": "HTTP1",
"route_config": {
"name": "self_admin_route",
"virtual_hosts": [
{
"name": "self_admin",
"domains": [
"*"
],
"routes": [
{
"match": {
"path": "/metrics"
},
"route": {
"cluster": "self_admin",
"prefix_rewrite": "/stats/prometheus"
}
},
{
"match": {
"prefix": "/"
},
"direct_response": {
"status": 404
}
}
]
}
]
},
"http_filters": [
{
"name": "envoy.router"
}
]
}
}
]
}
]
}`
expectedPromCluster = `{
"name": "self_admin",
"connect_timeout": "5s",
"type": "STATIC",
"http_protocol_options": {},
"hosts": [
{
"socket_address": {
"address": "127.0.0.1",
"port_value": 19000
}
}
]
}`
)
func TestBootstrapConfig_ConfigureArgs(t *testing.T) {
tests := []struct {
name string
input BootstrapConfig
env []string
baseArgs BootstrapTplArgs
wantArgs BootstrapTplArgs
wantErr bool
}{
{
name: "defaults",
input: BootstrapConfig{},
wantArgs: BootstrapTplArgs{},
wantErr: false,
},
{
name: "extra-stats-sinks",
input: BootstrapConfig{
StatsSinksJSON: `{
"name": "envoy.custom_exciting_sink",
"config": {
"foo": "bar"
}
}`,
},
wantArgs: BootstrapTplArgs{
StatsSinksJSON: `[{
"name": "envoy.custom_exciting_sink",
"config": {
"foo": "bar"
}
}]`,
},
},
{
name: "simple-statsd-sink",
input: BootstrapConfig{
StatsdURL: "udp://127.0.0.1:9125",
},
wantArgs: BootstrapTplArgs{
StatsSinksJSON: `[{
"name": "envoy.statsd",
"config": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 9125
}
}
}
}]`,
},
wantErr: false,
},
{
name: "simple-statsd-sink-plus-extra",
input: BootstrapConfig{
StatsdURL: "udp://127.0.0.1:9125",
StatsSinksJSON: `{
"name": "envoy.custom_exciting_sink",
"config": {
"foo": "bar"
}
}`,
},
wantArgs: BootstrapTplArgs{
StatsSinksJSON: `[{
"name": "envoy.statsd",
"config": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 9125
}
}
}
},
{
"name": "envoy.custom_exciting_sink",
"config": {
"foo": "bar"
}
}]`,
},
wantErr: false,
},
{
name: "simple-statsd-sink-env",
input: BootstrapConfig{
StatsdURL: "$MY_STATSD_URL",
},
env: []string{"MY_STATSD_URL=udp://127.0.0.1:9125"},
wantArgs: BootstrapTplArgs{
StatsSinksJSON: `[{
"name": "envoy.statsd",
"config": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 9125
}
}
}
}]`,
},
wantErr: false,
},
{
name: "simple-dogstatsd-sink",
input: BootstrapConfig{
DogstatsdURL: "udp://127.0.0.1:9125",
},
wantArgs: BootstrapTplArgs{
StatsSinksJSON: `[{
"name": "envoy.dog_statsd",
"config": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 9125
}
}
}
}]`,
},
wantErr: false,
},
{
name: "simple-dogstatsd-unix-sink",
input: BootstrapConfig{
DogstatsdURL: "unix:///var/run/dogstatsd.sock",
},
wantArgs: BootstrapTplArgs{
StatsSinksJSON: `[{
"name": "envoy.dog_statsd",
"config": {
"address": {
"pipe": {
"path": "/var/run/dogstatsd.sock"
}
}
}
}]`,
},
wantErr: false,
},
{
name: "simple-dogstatsd-sink-env",
input: BootstrapConfig{
DogstatsdURL: "$MY_STATSD_URL",
},
env: []string{"MY_STATSD_URL=udp://127.0.0.1:9125"},
wantArgs: BootstrapTplArgs{
StatsSinksJSON: `[{
"name": "envoy.dog_statsd",
"config": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 9125
}
}
}
}]`,
},
wantErr: false,
},
{
name: "stats-config-override",
input: BootstrapConfig{
StatsConfigJSON: `{
"use_all_default_tags": true
}`,
},
wantArgs: BootstrapTplArgs{
StatsConfigJSON: `{
"use_all_default_tags": true
}`,
},
wantErr: false,
},
{
name: "simple-tags",
input: BootstrapConfig{
StatsTags: []string{"canary", "foo=bar", "baz=2"},
},
wantArgs: BootstrapTplArgs{
StatsConfigJSON: `{
"stats_tags": [
{
"tag_name": "canary",
"fixed_value": "1"
},
{
"tag_name": "foo",
"fixed_value": "bar"
},
{
"tag_name": "baz",
"fixed_value": "2"
}
],
"use_all_default_tags": true
}`,
},
wantErr: false,
},
{
name: "prometheus-bind-addr",
input: BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000",
},
baseArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
},
wantArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
// Should add a static cluster for the self-proxy to admin
StaticClustersJSON: expectedPromCluster,
// Should add a static http listener too
StaticListenersJSON: expectedPromListener,
},
wantErr: false,
},
{
name: "prometheus-bind-addr-with-overrides",
input: BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000",
StaticClustersJSON: `{"foo":"bar"}`,
StaticListenersJSON: `{"baz":"qux"}`,
},
baseArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
},
wantArgs: BootstrapTplArgs{
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
// Should add a static cluster for the self-proxy to admin
StaticClustersJSON: `{"foo":"bar"},` + expectedPromCluster,
// Should add a static http listener too
StaticListenersJSON: `{"baz":"qux"},` + expectedPromListener,
},
wantErr: false,
},
{
name: "stats-flush-interval",
input: BootstrapConfig{
StatsFlushInterval: `10s`,
},
wantArgs: BootstrapTplArgs{
StatsFlushInterval: `10s`,
},
wantErr: false,
},
{
name: "override-tracing",
input: BootstrapConfig{
TracingConfigJSON: `{"foo": "bar"}`,
},
wantArgs: BootstrapTplArgs{
TracingConfigJSON: `{"foo": "bar"}`,
},
wantErr: false,
},
{
name: "err-bad-prometheus-addr",
input: BootstrapConfig{
PrometheusBindAddr: "asdasdsad",
},
wantErr: true,
},
{
name: "err-bad-statsd-addr",
input: BootstrapConfig{
StatsdURL: "asdasdsad",
},
wantErr: true,
},
{
name: "err-bad-dogstatsd-addr",
input: BootstrapConfig{
DogstatsdURL: "asdasdsad",
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
args := tt.baseArgs
defer testSetAndResetEnv(t, tt.env)()
err := tt.input.ConfigureArgs(&args)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
// Want to compare JSON fields with JSONEq
argV := reflect.ValueOf(args)
wantV := reflect.ValueOf(tt.wantArgs)
argT := reflect.TypeOf(args)
for i := 0; i < argT.NumField(); i++ {
f := argT.Field(i)
if strings.HasSuffix(f.Name, "JSON") && wantV.Field(i).String() != "" {
// Some of our JSON strings are comma separated objects to be
// insertedinto an array which is not valid JSON on it's own so wrap
// them all in an array. For simple values this is still valid JSON
// too.
want := "[" + wantV.Field(i).String() + "]"
got := "[" + argV.Field(i).String() + "]"
require.JSONEq(t, want, got, "field %s should be equivalent JSON", f.Name)
} else {
require.Equalf(t, wantV.Field(i).Interface(),
argV.Field(i).Interface(), "field %s should be equal", f.Name)
}
}
}
})
}
}

View File

@ -1,15 +1,87 @@
package envoy
type templateArgs struct {
ProxyCluster, ProxyID string
AgentAddress string
AgentPort string
AgentTLS bool
AgentCAFile string
AdminBindAddress string
AdminBindPort string
// BootstrapTplArgs is the set of arguments that may be interpolated into the
// Envoy bootstrap template.
type BootstrapTplArgs struct {
// ProxyCluster is the cluster name for the the Envoy `node` specification and
// is typically the same as the ProxyID.
ProxyCluster string
// ProxyID is the ID of the proxy service instance as registered with the
// local Consul agent. This must be used as the Envoy `node.id` in order for
// the agent to deliver the correct configuration.
ProxyID string
// AgentAddress is the IP address of the local agent where the proxy instance
// is registered.
AgentAddress string
// AgentPort is the gRPC port exposed on the local agent.
AgentPort string
// AgentTLS is true of the local agent gRPC service should be accessed over
// TLS.
AgentTLS bool
// AgentCAFile is the CA file to use to verify the local agent gRPC service if
// TLS is enabled.
AgentCAFile string
// AdminBindAddress is the address the Envoy admin server should bind to.
AdminBindAddress string
// AdminBindPort is the port the Envoy admin server should bind to.
AdminBindPort string
// LocalAgentClusterName is the name reserved for the local Consul agent gRPC
// service and is expected to be used for that purpose.
LocalAgentClusterName string
Token string
// Token is the Consul ACL token provided which is required to make gRPC
// discovery requests. If non-empty, this must be configured as the gRPC
// service "initial_metadata" with the key "x-consul-token" in order to
// authorize the discovery streaming RPCs.
Token string
// StaticClustersJSON is JSON string, each is expected to be a valid Cluster
// definition. They are appended to the "static_resources.clusters" list. Note
// that cluster names should be chosen in such a way that they won't collide
// with service names since we use plain service names as cluster names in xDS
// to make metrics population simpler and cluster names mush be unique. See
// https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/api/v2/cds.proto.
StaticClustersJSON string
// StaticListenersJSON is a JSON string containing zero or more Listener
// definitions. They are appended to the "static_resources.listeners" list. A
// single listener should be given as a plain object, if more than one is to
// be added, they should be separated by a comma suitable for direct injection
// into a JSON array.
// See https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/api/v2/lds.proto.
StaticListenersJSON string
// StatsSinksJSON is a JSON string containing an array in the right format
// to be rendered as the body of the `stats_sinks` field at the top level of
// the bootstrap config. It's format may vary based on Envoy version used. See
// https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/metrics/v2/stats.proto#config-metrics-v2-statssink.
StatsSinksJSON string
// StatsConfigJSON is a JSON string containing an object in the right format
// to be rendered as the body of the `stats_config` field at the top level of
// the bootstrap config. It's format may vary based on Envoy version used. See
// https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/metrics/v2/stats.proto#envoy-api-msg-config-metrics-v2-statsconfig.
StatsConfigJSON string
// StatsFlushInterval is the time duration between Envoy stats flushes. It is
// in proto3 "duration" string format for example "1.12s" See
// https://developers.google.com/protocol-buffers/docs/proto3#json and
// https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/bootstrap/v2/bootstrap.proto#bootstrap
StatsFlushInterval string
// TracingConfigJSON is a JSON string containing an object in the right format
// to be rendered as the body of the `tracing` field at the top level of
// the bootstrap config. It's format may vary based on Envoy version used.
// See https://www.envoyproxy.io/docs/envoy/v1.9.0/api-v2/config/trace/v2/trace.proto.
TracingConfigJSON string
}
const bootstrapTemplate = `{
@ -53,8 +125,29 @@ const bootstrapTemplate = `{
}
]
}
{{- if .StaticClustersJSON -}}
,
{{ .StaticClustersJSON }}
{{- end }}
]{{- if .StaticListenersJSON -}}
,
"listeners": [
{{ .StaticListenersJSON }}
]
{{- end }}
},
{{- if .StatsSinksJSON }}
"stats_sinks": {{ .StatsSinksJSON }},
{{- end }}
{{- if .StatsConfigJSON }}
"stats_config": {{ .StatsConfigJSON }},
{{- end }}
{{- if .StatsFlushInterval }}
"stats_flush_interval": "{{ .StatsFlushInterval }}",
{{- end }}
{{- if .TracingConfigJSON }}
"tracing": {{ .TracingConfigJSON }},
{{- end }}
"dynamic_resources": {
"lds_config": { "ads": {} },
"cds_config": { "ads": {} },

View File

@ -1,17 +1,17 @@
package envoy
import (
"bytes"
"errors"
"flag"
"fmt"
"html/template"
"net"
"os"
"os/exec"
"strconv"
"strings"
"github.com/mitchellh/mapstructure"
proxyAgent "github.com/hashicorp/consul/agent/proxyprocess"
"github.com/hashicorp/consul/agent/xds"
"github.com/hashicorp/consul/api"
@ -42,12 +42,13 @@ type cmd struct {
client *api.Client
// flags
proxyID string
sidecarFor string
adminBind string
envoyBin string
bootstrap bool
grpcAddr string
proxyID string
sidecarFor string
adminBind string
envoyBin string
bootstrap bool
disableCentralConfig bool
grpcAddr string
}
func (c *cmd) init() {
@ -68,12 +69,19 @@ func (c *cmd) init() {
c.flags.StringVar(&c.adminBind, "admin-bind", "localhost:19000",
"The address:port to start envoy's admin server on. Envoy requires this "+
"but care must be taked to ensure it's not exposed to untrusted network "+
"but care must be taken to ensure it's not exposed to an untrusted network "+
"as it has full control over the secrets and config of the proxy.")
c.flags.BoolVar(&c.bootstrap, "bootstrap", false,
"Generate the bootstrap.json but don't exec envoy")
c.flags.BoolVar(&c.disableCentralConfig, "no-central-config", false,
"By default the proxy's bootstrap configuration can be customized "+
"centrally. This requires that the command run on the same agent as the "+
"proxy will and that the agent is reachable when the command is run. In "+
"cases where either assumption is violated this flag will prevent the "+
"command attempting to resolve config from the local agent.")
c.flags.StringVar(&c.grpcAddr, "grpc-addr", "",
"Set the agent's gRPC address and port (in http(s)://host:port format). "+
"Alternatively, you can specify CONSUL_GRPC_ADDR in ENV.")
@ -179,7 +187,7 @@ func (c *cmd) findBinary() (string, error) {
return exec.LookPath("envoy")
}
func (c *cmd) templateArgs() (*templateArgs, error) {
func (c *cmd) templateArgs() (*BootstrapTplArgs, error) {
httpCfg := api.DefaultConfig()
c.http.MergeOntoConfig(httpCfg)
@ -235,8 +243,18 @@ func (c *cmd) templateArgs() (*templateArgs, error) {
return nil, fmt.Errorf("Failed to resolve admin bind address: %s", err)
}
return &templateArgs{
ProxyCluster: c.proxyID,
// Ideally the cluster should be the service name. We may or may not have that
// yet depending on the arguments used so make a best effort here. In the
// common case, even if the command was invoked with proxy-id and we don't
// know service name yet, we will after we resolve the proxy's config in a bit
// and will update this then.
cluster := c.proxyID
if c.sidecarFor != "" {
cluster = c.sidecarFor
}
return &BootstrapTplArgs{
ProxyCluster: cluster,
ProxyID: c.proxyID,
AgentAddress: agentIP.String(),
AgentPort: agentPort,
@ -254,13 +272,30 @@ func (c *cmd) generateConfig() ([]byte, error) {
if err != nil {
return nil, err
}
var t = template.Must(template.New("bootstrap").Parse(bootstrapTemplate))
var buf bytes.Buffer
err = t.Execute(&buf, args)
if err != nil {
return nil, err
var bsCfg BootstrapConfig
if !c.disableCentralConfig {
// Fetch any customization from the registration
svc, _, err := c.client.Agent().Service(c.proxyID, nil)
if err != nil {
return nil, fmt.Errorf("failed fetch proxy config from local agent: %s", err)
}
if svc.Proxy == nil {
return nil, errors.New("service is not a Connect proxy")
}
// Parse the bootstrap config
if err := mapstructure.WeakDecode(svc.Proxy.Config, &bsCfg); err != nil {
return nil, fmt.Errorf("failed parsing Proxy.Config: %s", err)
}
// Override cluster now we know the actual service name
args.ProxyCluster = svc.Proxy.DestinationServiceName
}
return buf.Bytes(), nil
return bsCfg.GenerateJSON(args)
}
func (c *cmd) lookupProxyIDForSidecar() (string, error) {

View File

@ -1,16 +1,21 @@
package envoy
import (
"encoding/json"
"flag"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"github.com/hashicorp/consul/agent/xds"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/xds"
"github.com/hashicorp/consul/api"
)
var update = flag.Bool("update", false, "update golden files")
@ -56,11 +61,12 @@ func testSetAndResetEnv(t *testing.T, env []string) func() {
// pass the test of having their template args generated as expected.
func TestGenerateConfig(t *testing.T) {
cases := []struct {
Name string
Flags []string
Env []string
WantArgs templateArgs
WantErr string
Name string
Flags []string
Env []string
ProxyConfig map[string]interface{}
WantArgs BootstrapTplArgs
WantErr string
}{
{
Name: "no-args",
@ -72,7 +78,7 @@ func TestGenerateConfig(t *testing.T) {
Name: "defaults",
Flags: []string{"-proxy-id", "test-proxy"},
Env: []string{},
WantArgs: templateArgs{
WantArgs: BootstrapTplArgs{
ProxyCluster: "test-proxy",
ProxyID: "test-proxy",
AgentAddress: "127.0.0.1",
@ -87,7 +93,7 @@ func TestGenerateConfig(t *testing.T) {
Flags: []string{"-proxy-id", "test-proxy",
"-grpc-addr", "localhost:9999"},
Env: []string{},
WantArgs: templateArgs{
WantArgs: BootstrapTplArgs{
ProxyCluster: "test-proxy",
ProxyID: "test-proxy",
// Should resolve IP, note this might not resolve the same way
@ -106,7 +112,7 @@ func TestGenerateConfig(t *testing.T) {
Env: []string{
"CONSUL_GRPC_ADDR=localhost:9999",
},
WantArgs: templateArgs{
WantArgs: BootstrapTplArgs{
ProxyCluster: "test-proxy",
ProxyID: "test-proxy",
// Should resolve IP, note this might not resolve the same way
@ -119,7 +125,185 @@ func TestGenerateConfig(t *testing.T) {
LocalAgentClusterName: xds.LocalAgentClusterName,
},
},
// TODO(banks): all the flags/env manipulation cases
{
Name: "custom-bootstrap",
Flags: []string{"-proxy-id", "test-proxy"},
Env: []string{},
ProxyConfig: map[string]interface{}{
// Add a completely custom bootstrap template. Never mind if this is
// invalid envoy config just as long as it works and gets the variables
// interplated.
"envoy_bootstrap_json_tpl": `
{
"admin": {
"access_log_path": "/dev/null",
"address": {
"socket_address": {
"address": "{{ .AdminBindAddress }}",
"port_value": {{ .AdminBindPort }}
}
}
},
"node": {
"cluster": "{{ .ProxyCluster }}",
"id": "{{ .ProxyID }}"
},
custom_field = "foo"
}`,
},
WantArgs: BootstrapTplArgs{
ProxyCluster: "test-proxy",
ProxyID: "test-proxy",
AgentAddress: "127.0.0.1",
AgentPort: "8502",
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
LocalAgentClusterName: xds.LocalAgentClusterName,
},
},
{
Name: "extra_-single",
Flags: []string{"-proxy-id", "test-proxy"},
Env: []string{},
ProxyConfig: map[string]interface{}{
// Add a custom sections with interpolated variables. These are all
// invalid config syntax too but we are just testing they have the right
// effect.
"envoy_extra_static_clusters_json": `
{
"name": "fake_cluster_1"
}`,
"envoy_extra_static_listeners_json": `
{
"name": "fake_listener_1"
}`,
"envoy_extra_stats_sinks_json": `
{
"name": "fake_sink_1"
}`,
},
WantArgs: BootstrapTplArgs{
ProxyCluster: "test-proxy",
ProxyID: "test-proxy",
AgentAddress: "127.0.0.1",
AgentPort: "8502",
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
LocalAgentClusterName: xds.LocalAgentClusterName,
},
},
{
Name: "extra_-multiple",
Flags: []string{"-proxy-id", "test-proxy"},
Env: []string{},
ProxyConfig: map[string]interface{}{
// Add a custom sections with interpolated variables. These are all
// invalid config syntax too but we are just testing they have the right
// effect.
"envoy_extra_static_clusters_json": `
{
"name": "fake_cluster_1"
},
{
"name": "fake_cluster_2"
}`,
"envoy_extra_static_listeners_json": `
{
"name": "fake_listener_1"
},{
"name": "fake_listener_2"
}`,
"envoy_extra_stats_sinks_json": `
{
"name": "fake_sink_1"
} , { "name": "fake_sink_2" }`,
},
WantArgs: BootstrapTplArgs{
ProxyCluster: "test-proxy",
ProxyID: "test-proxy",
AgentAddress: "127.0.0.1",
AgentPort: "8502",
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
LocalAgentClusterName: xds.LocalAgentClusterName,
},
},
{
Name: "stats-config-override",
Flags: []string{"-proxy-id", "test-proxy"},
Env: []string{},
ProxyConfig: map[string]interface{}{
// Add a custom sections with interpolated variables. These are all
// invalid config syntax too but we are just testing they have the right
// effect.
"envoy_stats_config_json": `
{
"name": "fake_config"
}`,
},
WantArgs: BootstrapTplArgs{
ProxyCluster: "test-proxy",
ProxyID: "test-proxy",
AgentAddress: "127.0.0.1",
AgentPort: "8502",
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
LocalAgentClusterName: xds.LocalAgentClusterName,
},
},
{
Name: "zipkin-tracing-config",
Flags: []string{"-proxy-id", "test-proxy"},
Env: []string{},
ProxyConfig: map[string]interface{}{
// Add a custom sections with interpolated variables. These are all
// invalid config syntax too but we are just testing they have the right
// effect.
"envoy_tracing_json": `{
"http": {
"name": "envoy.zipkin",
"config": {
"collector_cluster": "zipkin",
"collector_endpoint": "/api/v1/spans"
}
}
}`,
// Need to setup the cluster to send that too as well
"envoy_extra_static_clusters_json": `{
"name": "zipkin",
"type": "STRICT_DNS",
"connect_timeout": "5s",
"load_assignment": {
"cluster_name": "zipkin",
"endpoints": [
{
"lb_endpoints": [
{
"endpoint": {
"address": {
"socket_address": {
"address": "zipkin.service.consul",
"port_value": 9411
}
}
}
}
]
}
]
}
}`,
},
WantArgs: BootstrapTplArgs{
ProxyCluster: "test-proxy",
ProxyID: "test-proxy",
AgentAddress: "127.0.0.1",
AgentPort: "8502",
AdminBindAddress: "127.0.0.1",
AdminBindPort: "19000",
LocalAgentClusterName: xds.LocalAgentClusterName,
},
},
}
for _, tc := range cases {
@ -129,6 +313,14 @@ func TestGenerateConfig(t *testing.T) {
ui := cli.NewMockUi()
c := New(ui)
// Run a mock agent API that just always returns the proxy config in the
// test.
srv := httptest.NewServer(testMockAgentProxyConfig(tc.ProxyConfig))
defer srv.Close()
// Set the agent HTTP address in ENV to be our mock
tc.Env = append(tc.Env, "CONSUL_HTTP_ADDR="+srv.URL)
defer testSetAndResetEnv(t, tc.Env)()
// Run the command
@ -165,3 +357,31 @@ func TestGenerateConfig(t *testing.T) {
})
}
}
func testMockAgentProxyConfig(cfg map[string]interface{}) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Parse the proxy-id from the end of the URL (blindly assuming it's correct
// format)
proxyID := strings.TrimPrefix(r.URL.Path, "/v1/agent/service/")
serviceID := strings.TrimSuffix(proxyID, "-sidecar-proxy")
svc := api.AgentService{
Kind: api.ServiceKindConnectProxy,
ID: proxyID,
Service: proxyID,
Proxy: &api.AgentServiceConnectProxyConfig{
DestinationServiceName: serviceID,
DestinationServiceID: serviceID,
Config: cfg,
},
}
cfgJSON, err := json.Marshal(svc)
if err != nil {
w.WriteHeader(500)
w.Write([]byte(err.Error()))
return
}
w.Write(cfgJSON)
})
}

View File

@ -0,0 +1,17 @@
{
"admin": {
"access_log_path": "/dev/null",
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 19000
}
}
},
"node": {
"cluster": "test-proxy",
"id": "test-proxy"
},
custom_field = "foo"
}

View File

@ -30,6 +30,15 @@
}
]
},
"stats_config": {
"stats_tags": [
{
"tag_name": "local_cluster",
"fixed_value": "test-proxy"
}
],
"use_all_default_tags": true
},
"dynamic_resources": {
"lds_config": { "ads": {} },
"cds_config": { "ads": {} },

View File

@ -0,0 +1,81 @@
{
"admin": {
"access_log_path": "/dev/null",
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 19000
}
}
},
"node": {
"cluster": "test-proxy",
"id": "test-proxy"
},
"static_resources": {
"clusters": [
{
"name": "local_agent",
"connect_timeout": "1s",
"type": "STATIC",
"http2_protocol_options": {},
"hosts": [
{
"socket_address": {
"address": "127.0.0.1",
"port_value": 8502
}
}
]
},
{
"name": "fake_cluster_1"
},
{
"name": "fake_cluster_2"
}
],
"listeners": [
{
"name": "fake_listener_1"
},{
"name": "fake_listener_2"
}
]
},
"stats_sinks": [
{
"name": "fake_sink_1"
} , { "name": "fake_sink_2" }
],
"stats_config": {
"stats_tags": [
{
"tag_name": "local_cluster",
"fixed_value": "test-proxy"
}
],
"use_all_default_tags": true
},
"dynamic_resources": {
"lds_config": { "ads": {} },
"cds_config": { "ads": {} },
"ads_config": {
"api_type": "GRPC",
"grpc_services": {
"initial_metadata": [
{
"key": "x-consul-token",
"value": ""
}
],
"envoy_grpc": {
"cluster_name": "local_agent"
}
}
}
}
}

View File

@ -0,0 +1,76 @@
{
"admin": {
"access_log_path": "/dev/null",
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 19000
}
}
},
"node": {
"cluster": "test-proxy",
"id": "test-proxy"
},
"static_resources": {
"clusters": [
{
"name": "local_agent",
"connect_timeout": "1s",
"type": "STATIC",
"http2_protocol_options": {},
"hosts": [
{
"socket_address": {
"address": "127.0.0.1",
"port_value": 8502
}
}
]
},
{
"name": "fake_cluster_1"
}
],
"listeners": [
{
"name": "fake_listener_1"
}
]
},
"stats_sinks": [
{
"name": "fake_sink_1"
}
],
"stats_config": {
"stats_tags": [
{
"tag_name": "local_cluster",
"fixed_value": "test-proxy"
}
],
"use_all_default_tags": true
},
"dynamic_resources": {
"lds_config": { "ads": {} },
"cds_config": { "ads": {} },
"ads_config": {
"api_type": "GRPC",
"grpc_services": {
"initial_metadata": [
{
"key": "x-consul-token",
"value": ""
}
],
"envoy_grpc": {
"cluster_name": "local_agent"
}
}
}
}
}

View File

@ -30,6 +30,15 @@
}
]
},
"stats_config": {
"stats_tags": [
{
"tag_name": "local_cluster",
"fixed_value": "test-proxy"
}
],
"use_all_default_tags": true
},
"dynamic_resources": {
"lds_config": { "ads": {} },
"cds_config": { "ads": {} },

View File

@ -30,6 +30,15 @@
}
]
},
"stats_config": {
"stats_tags": [
{
"tag_name": "local_cluster",
"fixed_value": "test-proxy"
}
],
"use_all_default_tags": true
},
"dynamic_resources": {
"lds_config": { "ads": {} },
"cds_config": { "ads": {} },

View File

@ -0,0 +1,55 @@
{
"admin": {
"access_log_path": "/dev/null",
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 19000
}
}
},
"node": {
"cluster": "test-proxy",
"id": "test-proxy"
},
"static_resources": {
"clusters": [
{
"name": "local_agent",
"connect_timeout": "1s",
"type": "STATIC",
"http2_protocol_options": {},
"hosts": [
{
"socket_address": {
"address": "127.0.0.1",
"port_value": 8502
}
}
]
}
]
},
"stats_config":
{
"name": "fake_config"
},
"dynamic_resources": {
"lds_config": { "ads": {} },
"cds_config": { "ads": {} },
"ads_config": {
"api_type": "GRPC",
"grpc_services": {
"initial_metadata": [
{
"key": "x-consul-token",
"value": ""
}
],
"envoy_grpc": {
"cluster_name": "local_agent"
}
}
}
}
}

View File

@ -0,0 +1,93 @@
{
"admin": {
"access_log_path": "/dev/null",
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 19000
}
}
},
"node": {
"cluster": "test-proxy",
"id": "test-proxy"
},
"static_resources": {
"clusters": [
{
"name": "local_agent",
"connect_timeout": "1s",
"type": "STATIC",
"http2_protocol_options": {},
"hosts": [
{
"socket_address": {
"address": "127.0.0.1",
"port_value": 8502
}
}
]
},
{
"name": "zipkin",
"type": "STRICT_DNS",
"connect_timeout": "5s",
"load_assignment": {
"cluster_name": "zipkin",
"endpoints": [
{
"lb_endpoints": [
{
"endpoint": {
"address": {
"socket_address": {
"address": "zipkin.service.consul",
"port_value": 9411
}
}
}
}
]
}
]
}
}
]
},
"stats_config": {
"stats_tags": [
{
"tag_name": "local_cluster",
"fixed_value": "test-proxy"
}
],
"use_all_default_tags": true
},
"tracing": {
"http": {
"name": "envoy.zipkin",
"config": {
"collector_cluster": "zipkin",
"collector_endpoint": "/api/v1/spans"
}
}
},
"dynamic_resources": {
"lds_config": { "ads": {} },
"cds_config": { "ads": {} },
"ads_config": {
"api_type": "GRPC",
"grpc_services": {
"initial_metadata": [
{
"key": "x-consul-token",
"value": ""
}
],
"envoy_grpc": {
"cluster_name": "local_agent"
}
}
}
}
}

11
go.mod
View File

@ -16,7 +16,7 @@ require (
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/SAP/go-hdb v0.12.0 // indirect
github.com/SermoDigital/jose v0.0.0-20180104203859-803625baeddc // indirect
github.com/StackExchange/wmi v0.0.0-20160811214555-e54cbda6595d // indirect
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310
@ -39,7 +39,7 @@ require (
github.com/envoyproxy/go-control-plane v0.0.0-20180919002855-2137d9196328
github.com/fatih/structs v0.0.0-20180123065059-ebf56d35bba7 // indirect
github.com/go-ldap/ldap v3.0.2+incompatible // indirect
github.com/go-ole/go-ole v0.0.0-20170601135611-02d3668a0cf0 // indirect
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-sql-driver/mysql v0.0.0-20180618115901-749ddf1598b4 // indirect
github.com/go-test/deep v1.0.1 // indirect
github.com/gocql/gocql v0.0.0-20180617115710-e06f8c1bcd78 // indirect
@ -96,6 +96,7 @@ require (
github.com/mitchellh/mapstructure v1.1.2
github.com/mitchellh/reflectwalk v0.0.0-20170726202117-63d60e9d0dbc
github.com/oklog/run v0.0.0-20180308005104-6934b124db28 // indirect
github.com/onsi/gomega v1.4.2 // indirect
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v0.1.1 // indirect
@ -114,10 +115,10 @@ require (
github.com/spf13/pflag v1.0.3 // indirect
github.com/stretchr/objx v0.1.1 // indirect
github.com/stretchr/testify v1.3.0
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c
golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
google.golang.org/grpc v0.0.0-20180920234847-8997b5fa0873
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect

22
go.sum
View File

@ -21,8 +21,8 @@ github.com/SAP/go-hdb v0.12.0 h1:5hBQZ2jjyZ268qjDmoDZJuCyLzR6oRLI60eYzmTW9m4=
github.com/SAP/go-hdb v0.12.0/go.mod h1:etBT+FAi1t5k3K3tf5vQTnosgYmhDkRi8jEnQqCnxF0=
github.com/SermoDigital/jose v0.0.0-20180104203859-803625baeddc h1:LkkwnbY+S8WmwkWq1SVyRWMH9nYWO1P5XN3OD1tts/w=
github.com/SermoDigital/jose v0.0.0-20180104203859-803625baeddc/go.mod h1:ARgCUhI1MHQH+ONky/PAtmVHQrP5JlGY0F3poXOp/fA=
github.com/StackExchange/wmi v0.0.0-20160811214555-e54cbda6595d h1:L9ZGqHf7d+ZeQDjVT/qv9WukazHxT+folVGy/xgaIc8=
github.com/StackExchange/wmi v0.0.0-20160811214555-e54cbda6595d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
@ -91,8 +91,8 @@ github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ldap/ldap v3.0.2+incompatible h1:kD5HQcAzlQ7yrhfn+h+MSABeAy/jAJhvIJ/QDllP44g=
github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
github.com/go-ole/go-ole v0.0.0-20170601135611-02d3668a0cf0 h1:2GWhNNlPnb5RHKNpnYXabh75bic5xflcnWslARlSGMA=
github.com/go-ole/go-ole v0.0.0-20170601135611-02d3668a0cf0/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-sql-driver/mysql v0.0.0-20180618115901-749ddf1598b4 h1:1LlmVz15APoKz9dnm5j2ePptburJlwEH+/v/pUuoxck=
github.com/go-sql-driver/mysql v0.0.0-20180618115901-749ddf1598b4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg=
@ -106,6 +106,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -221,6 +222,7 @@ github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpR
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/keybase/go-crypto v0.0.0-20180614160407-5114a9a81e1b h1:VE6r2OwP5gj+Z9aCkSKl3MlmnZbfMAjhvR5T7abKHEo=
github.com/keybase/go-crypto v0.0.0-20180614160407-5114a9a81e1b/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -268,6 +270,8 @@ github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.1 h1:PZSj/UFNaVp3KxrzHOcS7oyuWA7LoOY/77yCTEFu21U=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
@ -337,12 +341,18 @@ github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo
github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 h1:x6rhz8Y9CjbgQkccRGmELH6K+LJj7tOoh3XWeC1yaQM=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b h1:/zjbcJPEGAyu6Is/VBOALsgdi4z9+kz/Vtdm6S+beD0=
golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -352,8 +362,12 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=

View File

@ -0,0 +1,8 @@
FROM fortio/fortio AS fortio
FROM bats/bats:latest
RUN apk add curl
RUN apk add openssl
RUN apk add jq
COPY --from=fortio /usr/bin/fortio /usr/sbin/fortio

View File

@ -0,0 +1,7 @@
# Note this arg has to be before the first FROM
ARG ENVOY_VERSION
FROM consul-dev as consul
FROM envoyproxy/envoy:v${ENVOY_VERSION}
COPY --from=consul /bin/consul /bin/consul

View File

@ -0,0 +1,17 @@
#!/bin/bash
set -euo pipefail
# Setup deny intention
docker_consul intention create -deny s1 s2
docker_consul connect envoy -bootstrap \
-proxy-id s1-sidecar-proxy \
> workdir/envoy/s1-bootstrap.json
docker_consul connect envoy -bootstrap \
-proxy-id s2-sidecar-proxy \
-admin-bind 127.0.0.1:19001 \
> workdir/envoy/s2-bootstrap.json
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"

View File

@ -0,0 +1,6 @@
#!/bin/bash
set -euo pipefail
# Remove deny intention
docker_consul intention delete s1 s2

View File

@ -0,0 +1,27 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}
@test "s1 upstream should NOT be able to connect to s2" {
run retry_default must_fail_tcp_connection localhost:5000
echo "OUTPUT $output"
[ "$status" == "0" ]
}

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
docker_consul connect envoy -bootstrap \
-proxy-id s1-sidecar-proxy \
> workdir/envoy/s1-bootstrap.json
docker_consul connect envoy -bootstrap \
-proxy-id s2-sidecar-proxy \
-admin-bind 127.0.0.1:19001 \
> workdir/envoy/s2-bootstrap.json
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"

View File

@ -0,0 +1,25 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}
@test "s1 upstream should be able to connect to s2" {
run retry_default curl -s -f -d hello localhost:5000
[ "$status" -eq 0 ]
[ "$output" = "hello" ]
}

View File

@ -0,0 +1,11 @@
#!/bin/bash
set -euo pipefail
# Bring up s1 and it's proxy as well because the check that it has a cert causes
# a proxy connection to be opened and having the backend not be available seems
# to cause Envoy to fail non-deterministically in CI (rarely on local machine).
# It might be related to this know issue
# https://github.com/envoyproxy/envoy/issues/2800 where TcpProxy will error if
# the backend is down sometimes part way through the handshake.
export REQUIRED_SERVICES="s1 s1-sidecar-proxy-consul-exec"

View File

@ -0,0 +1,14 @@
#!/usr/bin/env bats
load helpers
# This test case really only validates the exec mechanism worked and brought
# envoy up.
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}

View File

@ -0,0 +1,25 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "http"
}
}
]
config {
protocol = "http"
envoy_dogstatsd_url = "udp://127.0.0.1:8125"
envoy_stats_tags = ["foo=bar"]
envoy_stats_flush_interval = "1s"
}
}
}
}
}

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
docker_consul connect envoy -bootstrap \
-proxy-id s1-sidecar-proxy \
> workdir/envoy/s1-bootstrap.json
docker_consul connect envoy -bootstrap \
-proxy-id s2-sidecar-proxy \
-admin-bind 127.0.0.1:19001 \
> workdir/envoy/s2-bootstrap.json
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy fake-statsd"

View File

@ -0,0 +1,63 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 upstream should be able to connect to s2" {
run retry_default curl -s -f -d hello localhost:5000
echo "OUTPUT: $output"
[ "$status" == 0 ]
[ "$output" == "hello" ]
}
@test "s1 proxy should be sending metrics to statsd" {
run retry_default cat /workdir/statsd/statsd.log
echo "METRICS:"
echo "$output"
echo "COUNT: $(echo "$output" | grep -Ec '^envoy\.')"
[ "$status" == 0 ]
[ $(echo $output | grep -Ec '^envoy\.') -gt "0" ]
}
@test "s1 proxy should be sending dogstatsd tagged metrics" {
run retry_default must_match_in_statsd_logs '[#,]local_cluster:s1(,|$)'
echo "OUTPUT: $output"
[ "$status" == 0 ]
}
@test "s1 proxy should be adding cluster name as a tag" {
run retry_default must_match_in_statsd_logs '[#,]envoy.cluster_name:s2(,|$)'
echo "OUTPUT: $output"
[ "$status" == 0 ]
}
@test "s1 proxy should be sending additional configured tags" {
run retry_default must_match_in_statsd_logs '[#,]foo:bar(,|$)'
echo "OUTPUT: $output"
[ "$status" == 0 ]
}
@test "s1 proxy should have custom stats flush interval" {
INTERVAL=$(get_envoy_stats_flush_interval localhost:19000)
echo "INTERVAL = $INTERVAL"
[ "$INTERVAL" == "1s" ]
}

View File

@ -0,0 +1,25 @@
services {
name = "s1"
port = 8079
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "grpc"
}
}
]
config {
protocol = "grpc"
envoy_dogstatsd_url = "udp://127.0.0.1:8125"
envoy_stats_tags = ["foo=bar"]
envoy_stats_flush_interval = "1s"
}
}
}
}
}

View File

@ -0,0 +1,14 @@
services {
name = "s2"
# Advertise gRPC port
port = 8179
connect {
sidecar_service {
proxy {
config {
protocol = "grpc"
}
}
}
}
}

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
docker_consul connect envoy -bootstrap \
-proxy-id s1-sidecar-proxy \
> workdir/envoy/s1-bootstrap.json
docker_consul connect envoy -bootstrap \
-proxy-id s2-sidecar-proxy \
-admin-bind 127.0.0.1:19001 \
> workdir/envoy/s2-bootstrap.json
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy fake-statsd"

View File

@ -0,0 +1,27 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 upstream should be able to connect to s2 via grpc" {
run fortio grpcping localhost:5000
echo "OUTPUT: $output"
[ "$status" == 0 ]
}
@test "s1 proxy should be sending gRPC metrics to statsd" {
run retry_default must_match_in_statsd_logs 'envoy.cluster.grpc.PingServer.total'
echo "OUTPUT: $output"
[ "$status" == 0 ]
}

View File

@ -0,0 +1,22 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "http"
}
}
]
config {
protocol = "http"
}
}
}
}
}

View File

@ -0,0 +1,13 @@
services {
name = "s2"
port = 8181
connect {
sidecar_service {
proxy {
config {
protocol = "http"
}
}
}
}
}

View File

@ -0,0 +1,17 @@
#!/bin/bash
set -euo pipefail
# Setup deny intention
docker_consul intention create -deny s1 s2
docker_consul connect envoy -bootstrap \
-proxy-id s1-sidecar-proxy \
> workdir/envoy/s1-bootstrap.json
docker_consul connect envoy -bootstrap \
-proxy-id s2-sidecar-proxy \
-admin-bind 127.0.0.1:19001 \
> workdir/envoy/s2-bootstrap.json
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"

View File

@ -0,0 +1,6 @@
#!/bin/bash
set -euo pipefail
# Remove deny intention
docker_consul intention delete s1 s2

View File

@ -0,0 +1,27 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}
@test "s1 upstream should NOT be able to connect to s2" {
run retry_default must_fail_http_connection localhost:5000
echo "OUTPUT $output"
[ "$status" == "0" ]
}

View File

@ -0,0 +1,22 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "http"
}
}
]
config {
protocol = "http"
}
}
}
}
}

View File

@ -0,0 +1,13 @@
services {
name = "s2"
port = 8181
connect {
sidecar_service {
proxy {
config {
protocol = "http"
}
}
}
}
}

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
docker_consul connect envoy -bootstrap \
-proxy-id s1-sidecar-proxy \
> workdir/envoy/s1-bootstrap.json
docker_consul connect envoy -bootstrap \
-proxy-id s2-sidecar-proxy \
-admin-bind 127.0.0.1:19001 \
> workdir/envoy/s2-bootstrap.json
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"

View File

@ -0,0 +1,39 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}
@test "s1 upstream should be able to connect to s2 with http/1.1" {
run retry_default curl --http1.1 -s -f -d hello localhost:5000
[ "$status" -eq 0 ]
[ "$output" = "hello" ]
}
@test "s1 proxy should have been configured with http connection managers" {
LISTEN_FILTERS=$(get_envoy_listener_filters localhost:19000)
PUB=$(echo "$LISTEN_FILTERS" | grep -E "^public_listener:" | cut -f 2 -d ' ' )
UPS=$(echo "$LISTEN_FILTERS" | grep -E "^s2:" | cut -f 2 -d ' ' )
echo "LISTEN_FILTERS = $LISTEN_FILTERS"
echo "PUB = $PUB"
echo "UPS = $UPS"
[ "$PUB" = "envoy.ext_authz,envoy.http_connection_manager" ]
[ "$UPS" = "envoy.http_connection_manager" ]
}

View File

@ -0,0 +1,22 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "http2"
}
}
]
config {
protocol = "http2"
}
}
}
}
}

View File

@ -0,0 +1,14 @@
services {
name = "s2"
# Advertise gRPC (http2) port
port = 8179
connect {
sidecar_service {
proxy {
config {
protocol = "http2"
}
}
}
}
}

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
docker_consul connect envoy -bootstrap \
-proxy-id s1-sidecar-proxy \
> workdir/envoy/s1-bootstrap.json
docker_consul connect envoy -bootstrap \
-proxy-id s2-sidecar-proxy \
-admin-bind 127.0.0.1:19001 \
> workdir/envoy/s2-bootstrap.json
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"

View File

@ -0,0 +1,31 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}
@test "s1 upstream should be able to connect to s2 via http2" {
# We use grpc here because it's the easiest way to test http2. The server
# needs to support h2c since the proxy doesn't talk TLS to the local app.
# Most http2 servers don't support that but gRPC does. We could use curl
run curl -f -s -X POST localhost:5000/PingServer.Ping/
echo "OUTPUT: $output"
[ "$status" == 0 ]
}

View File

@ -0,0 +1,23 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "http"
}
}
]
config {
protocol = "http"
envoy_prometheus_bind_addr = "0.0.0.0:1234"
}
}
}
}
}

View File

@ -0,0 +1,13 @@
services {
name = "s2"
port = 8181
connect {
sidecar_service {
proxy {
config {
protocol = "http"
}
}
}
}
}

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
docker_consul connect envoy -bootstrap \
-proxy-id s1-sidecar-proxy \
> workdir/envoy/s1-bootstrap.json
docker_consul connect envoy -bootstrap \
-proxy-id s2-sidecar-proxy \
-admin-bind 127.0.0.1:19001 \
> workdir/envoy/s2-bootstrap.json
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}
@test "s1 upstream should be able to connect to s2 with http/1.1" {
run retry_default curl --http1.1 -s -f -d hello localhost:5000
[ "$status" -eq 0 ]
[ "$output" = "hello" ]
}
@test "s1 proxy should be exposing metrics to prometheus" {
# Should have http metrics. This is just a sample one. Require the metric to
# be present not just found in a comment (anchor the regexp).
run retry_defaults \
must_match_in_prometheus_response localhost:1234 \
'^envoy_http_downstream_rq_active'
# Should be labelling with local_cluster.
run retry_defaults \
must_match_in_prometheus_response localhost:1234 \
'[\{,]local_cluster="s1"[,}] '
# Should be labelling with http listener prefix.
run retry_defaults \
must_match_in_prometheus_response localhost:1234 \
'[\{,]envoy_http_conn_manager_prefix="public_listener_http"[,}]'
}

View File

@ -0,0 +1,23 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "http"
}
}
]
config {
protocol = "http"
envoy_statsd_url = "udp://127.0.0.1:8125"
}
}
}
}
}

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
docker_consul connect envoy -bootstrap \
-proxy-id s1-sidecar-proxy \
> workdir/envoy/s1-bootstrap.json
docker_consul connect envoy -bootstrap \
-proxy-id s2-sidecar-proxy \
-admin-bind 127.0.0.1:19001 \
> workdir/envoy/s2-bootstrap.json
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy fake-statsd"

View File

@ -0,0 +1,25 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 upstream should be able to connect to s2" {
run retry_default curl -s -f -d hello localhost:5000
[ "$status" == 0 ]
[ "$output" == "hello" ]
}
@test "s1 proxy should be sending metrics to statsd" {
run retry_default must_match_in_statsd_logs '^envoy\.'
echo "OUTPUT: $output"
[ "$status" == 0 ]
}

View File

@ -0,0 +1,60 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "http"
}
}
]
config {
protocol = "http"
envoy_tracing_json = <<EOF
{
"http": {
"name": "envoy.zipkin",
"config": {
"collector_cluster": "zipkin",
"collector_endpoint": "/api/v1/spans",
"shared_span_context": false
}
}
}
EOF
envoy_extra_static_clusters_json = <<EOF2
{
"name": "zipkin",
"type": "STRICT_DNS",
"connect_timeout": "5s",
"load_assignment": {
"cluster_name": "zipkin",
"endpoints": [
{
"lb_endpoints": [
{
"endpoint": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 9411
}
}
}
}
]
}
]
}
}
EOF2
}
}
}
}
}

View File

@ -0,0 +1,51 @@
services {
name = "s2"
port = 8181
connect {
sidecar_service {
proxy {
config {
protocol = "http"
envoy_tracing_json = <<EOF
{
"http": {
"name": "envoy.zipkin",
"config": {
"collector_cluster": "zipkin",
"collector_endpoint": "/api/v1/spans",
"shared_span_context": false
}
}
}
EOF
envoy_extra_static_clusters_json = <<EOF2
{
"name": "zipkin",
"type": "STRICT_DNS",
"connect_timeout": "5s",
"load_assignment": {
"cluster_name": "zipkin",
"endpoints": [
{
"lb_endpoints": [
{
"endpoint": {
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 9411
}
}
}
}
]
}
]
}
}
EOF2
}
}
}
}
}

View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
docker_consul connect envoy -bootstrap \
-proxy-id s1-sidecar-proxy \
> workdir/envoy/s1-bootstrap.json
docker_consul connect envoy -bootstrap \
-proxy-id s2-sidecar-proxy \
-admin-bind 127.0.0.1:19001 \
> workdir/envoy/s2-bootstrap.json
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy jaeger"

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bats
load helpers
@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}
@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}
@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}
@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}
@test "s1 upstream should be able to connect to s2" {
run retry_default curl -s -f -d hello localhost:5000
[ "$status" == "0" ]
[ "$output" == "hello" ]
}
@test "s1 proxy should send trace spans to zipkin/jaeger" {
# Send traced request through upstream. Debug echoes headers back which we can
# use to get the traceID generated (no way to force one I can find with Envoy
# currently?)
run curl -s -f -H 'x-client-trace-id:test-sentinel' localhost:5000/Debug
echo "OUTPUT $output"
[ "$status" == "0" ]
# Get the traceID from the output
TRACEID=$(echo $output | grep 'X-B3-Traceid:' | cut -c 15-)
# Get the trace from Jaeger. Won't bother parsing it just seeing it show up
# there is enough to know that the tracing config worked.
run retry_default curl -s -f "localhost:16686/api/traces/$TRACEID"
}

View File

@ -0,0 +1,16 @@
services {
name = "s1"
port = 8080
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
}
]
}
}
}
}

View File

@ -0,0 +1,5 @@
services {
name = "s2"
port = 8181
connect { sidecar_service {} }
}

View File

@ -0,0 +1,193 @@
version: '3.4'
x-workdir:
&workdir-volume
type: volume
source: workdir
target: /workdir
volume:
nocopy: true
volumes:
workdir:
services:
# This is a dummy container that we use to create volume and keep it
# accessible while other containers are down.
workdir:
image: alpine
volumes:
- *workdir-volume
command:
- sleep
- "86400"
consul:
image: "consul-dev"
command:
- "agent"
- "-dev"
- "-config-dir"
- "/workdir/consul"
- "-client"
- "0.0.0.0"
volumes:
- *workdir-volume
ports:
# Exposing to host makes debugging locally a bit easier
- "8500:8500"
- "8502:8502"
# For zipkin which uses this containers network
- 9411:9411
# Jaeger UI
- 16686:16686
s1:
depends_on:
- consul
image: "fortio/fortio"
command:
- "server"
- "-http-port"
- ":8080"
- "-grpc-port"
- ":8079"
network_mode: service:consul
s2:
depends_on:
- consul
image: "fortio/fortio"
command:
- "server"
- "-http-port"
- ":8181"
- "-grpc-port"
- ":8179"
network_mode: service:consul
s1-sidecar-proxy:
depends_on:
- consul
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
command:
- "envoy"
- "-c"
- "/workdir/envoy/s1-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul
s2-sidecar-proxy:
depends_on:
- consul
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
command:
- "envoy"
- "-c"
- "/workdir/envoy/s2-bootstrap.json"
- "-l"
- "debug"
# Hot restart breaks since both envoys seem to interact with each other
# despite separate containers that don't share IPC namespace. Not quite
# sure how this happens but may be due to unix socket being in some shared
# location?
- "--disable-hot-restart"
- "--drain-time-s"
- "1"
volumes:
- *workdir-volume
network_mode: service:consul
verify:
depends_on:
- consul
build:
context: .
dockerfile: Dockerfile-bats
tty: true
command:
- "--pretty"
- "/workdir/bats"
volumes:
- *workdir-volume
network_mode: service:consul
s1-sidecar-proxy-consul-exec:
depends_on:
- consul
build:
context: .
dockerfile: Dockerfile-consul-envoy
args:
ENVOY_VERSION: ${ENVOY_VERSION:-1.8.0}
command:
- "consul"
- "connect"
- "envoy"
- "-sidecar-for"
- "s1"
- "--"
- "-l"
- "debug"
network_mode: service:consul
fake-statsd:
depends_on:
- consul
image: "alpine/socat"
command:
- -u
- UDP-RECVFROM:8125,fork,reuseaddr
# This magic incantation is needed since Envoy doesn't add newlines and so
# we need each packet to be passed to echo to add a new line before
# appending.
- SYSTEM:'xargs -0 echo >> /workdir/statsd/statsd.log'
volumes:
- *workdir-volume
network_mode: service:consul
wipe-volumes:
volumes:
- *workdir-volume
image: alpine
command:
- sh
- -c
- 'rm -rf /workdir/*'
# This is a debugging tool run docker-compose up dump-volumes to see the
# current state.
dump-volumes:
volumes:
- *workdir-volume
- ./:/cwd
image: alpine
command:
- cp
- -r
- /workdir/.
- /cwd/workdir/
zipkin:
volumes:
- *workdir-volume
image: openzipkin/zipkin
network_mode: service:consul
jaeger:
volumes:
- *workdir-volume
image: jaegertracing/all-in-one:1.11
network_mode: service:consul
command:
- --collector.zipkin.http-port=9411

View File

@ -0,0 +1,132 @@
#!/bin/bash
# retry based on
# https://github.com/fernandoacorreia/azure-docker-registry/blob/master/tools/scripts/create-registry-server
# under MIT license.
function retry {
local n=1
local max=$1
shift
local delay=$1
shift
while true; do
"$@" && break || {
exit=$?
if [[ $n -lt $max ]]; then
((n++))
echo "Command failed. Attempt $n/$max:"
sleep $delay;
else
echo "The command has failed after $n attempts." >&2
return $exit
fi
}
done
}
function retry_default {
retry 5 1 $@
}
function echored {
tput setaf 1
tput bold
echo $@
tput sgr0
}
function echogreen {
tput setaf 2
tput bold
echo $@
tput sgr0
}
function get_cert {
local HOSTPORT=$1
openssl s_client -connect $HOSTPORT \
-showcerts 2>/dev/null \
| openssl x509 -noout -text
}
function assert_proxy_presents_cert_uri {
local HOSTPORT=$1
local SERVICENAME=$2
CERT=$(retry_default get_cert $HOSTPORT)
echo "WANT SERVICE: $SERVICENAME"
echo "GOT CERT:"
echo "$CERT"
echo "$CERT" | grep -Eo "URI:spiffe://([a-zA-Z0-9-]+).consul/ns/default/dc/dc1/svc/$SERVICENAME"
}
function get_envoy_listener_filters {
local HOSTPORT=$1
run retry_default curl -s -f $HOSTPORT/config_dump
[ "$status" -eq 0 ]
echo "$output" | jq --raw-output '.configs[2].dynamic_active_listeners[].listener | "\(.name) \( .filter_chains[0].filters | map(.name) | join(","))"'
}
function get_envoy_stats_flush_interval {
local HOSTPORT=$1
run retry_default curl -s -f $HOSTPORT/config_dump
[ "$status" -eq 0 ]
#echo "$output" > /workdir/s1_envoy_dump.json
echo "$output" | jq --raw-output '.configs[0].bootstrap.stats_flush_interval'
}
function docker_consul {
docker run -ti --network container:envoy_consul_1 consul-dev $@
}
function must_match_in_statsd_logs {
run cat /workdir/statsd/statsd.log
COUNT=$( echo "$output" | grep -Ec $1 )
echo "COUNT of '$1' matches: $COUNT"
[ "$status" == 0 ]
[ "$COUNT" -gt "0" ]
}
function must_match_in_prometheus_response {
run curl -f -s $1/metrics
COUNT=$( echo "$output" | grep -Ec $2 )
echo "COUNT of '$2' matches: $COUNT"
[ "$status" == 0 ]
[ "$COUNT" -gt "0" ]
}
# must_fail_tcp_connection checks that a request made through an upstream fails,
# probably due to authz being denied if all other tests passed already. Although
# we are using curl, this only works as expected for TCP upstreams as we are
# checking TCP-level errors. HTTP upstreams will return a valid 503 generated by
# Envoy rather than a connection-level error.
function must_fail_tcp_connection {
# Attempt to curl through upstream
run curl -s -v -f -d hello $1
echo "OUTPUT $output"
# Should fail during handshake and return "got nothing" error
[ "$status" == "52" ]
# Verbose output should enclude empty reply
echo "$output" | grep 'Empty reply from server'
}
# must_fail_http_connection see must_fail_tcp_connection but this expects Envoy
# to generate a 503 response since the upstreams have refused connection.
function must_fail_http_connection {
# Attempt to curl through upstream
run curl -s -i -d hello $1
echo "OUTPUT $output"
# Should fail request with 503
echo "$output" | grep '503 Service Unavailable'
}

View File

@ -0,0 +1,158 @@
#!/bin/bash
set -euo pipefail
# DEBUG=1 enables set -x for this script so echos every command run
DEBUG=${DEBUG:-}
# FILTER_TESTS="<pattern>" skips any test whose CASENAME doesn't match the
# pattern. CASENAME is combination of the name from the case-<name> dir and the
# envoy version for example: "http, envoy 1.8.0". The pattern is passed to grep
# over that string.
FILTER_TESTS=${FILTER_TESTS:-}
# LEAVE_CONSUL_UP=1 leaves the consul container running at the end which can be
# useful for debugging.
LEAVE_CONSUL_UP=${LEAVE_CONSUL_UP:-}
# QUIESCE_SECS=1 will cause the runner to sleep for 1 second after setup but
# before verify container is run this is useful for CI which seems to pass more
# reliably with this even though docker-compose up waits for containers to
# start, and our tests retry.
QUIESCE_SECS=${QUIESCE_SECS:-}
# ENVOY_VERSIONS is the list of envoy versions to run each test against
ENVOY_VERSIONS=${ENVOY_VERSIONS:-"1.8.0 1.9.1"}
if [ ! -z "$DEBUG" ] ; then
set -x
fi
DIR=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
cd $DIR
FILTER_TESTS=${FILTER_TESTS:-}
LEAVE_CONSUL_UP=${LEAVE_CONSUL_UP:-}
PROXY_LOGS_ON_FAIL=${PROXY_LOGS_ON_FAIL:-}
mkdir -p workdir/{consul,envoy,bats,statsd,logs}
source helpers.bash
RESULT=1
CLEANED_UP=0
function cleanup {
if [ "$CLEANED_UP" != 0 ] ; then
return
fi
CLEANED_UP=1
if [ -z "$LEAVE_CONSUL_UP" ] ; then
docker-compose down
fi
}
trap cleanup EXIT
# Start the volume container
docker-compose up -d workdir
for c in ./case-*/ ; do
for ev in $ENVOY_VERSIONS ; do
CASENAME="$( basename $c | cut -c6- ), envoy $ev"
echo ""
echo "==> CASE $CASENAME"
export ENVOY_VERSION=$ev
if [ ! -z "$FILTER_TESTS" ] && echo "$CASENAME" | grep -v "$FILTER_TESTS" > /dev/null ; then
echo " SKIPPED: doesn't match FILTER_TESTS=$FILTER_TESTS"
continue 1
fi
# Wipe state
docker-compose up wipe-volumes
# Reload consul config from defaults
cp consul-base-cfg/* workdir/consul
# Add any overrides if there are any (no op if not)
cp -f ${c}*.hcl workdir/consul 2>/dev/null || :
# Push the state to the shared docker volume (note this is because CircleCI
# can't use shared volumes)
docker cp workdir/. envoy_workdir_1:/workdir
# Start Consul first we do this here even though typically nothing stopped
# it because it sometimes seems to be killed by something else (OOM killer)?
docker-compose up -d consul
# Reload consul
echo "Reloading Consul config"
if ! retry 10 2 docker_consul reload ; then
# Clean up everything before we abort
#docker-compose down
echored " FAIL - couldn't reload consul config"
exit 1
fi
# Copy all the test files
cp ${c}*.bats workdir/bats
cp helpers.bash workdir/bats
# Run test case setup (e.g. generating Envoy bootstrap, starting containers)
source ${c}setup.sh
# Push the state to the shared docker volume (note this is because CircleCI
# can't use shared volumes)
docker cp workdir/. envoy_workdir_1:/workdir
# Start containers required
if [ ! -z "$REQUIRED_SERVICES" ] ; then
docker-compose up -d $REQUIRED_SERVICES
fi
if [ ! -z "$QUIESCE_SECS" ] ; then
echo "Sleeping for $QUIESCE_SECS seconds"
sleep $QUIESCE_SECS
fi
# Execute tests
THISRESULT=1
if docker-compose up --build --abort-on-container-exit --exit-code-from verify verify ; then
echo -n "==> CASE $CASENAME: "
echogreen "✓ PASS"
else
echo -n "==> CASE $CASENAME: "
echored " FAIL"
if [ $RESULT -eq 1 ] ; then
RESULT=0
fi
THISRESULT=0
fi
# Teardown
if [ -f "${c}teardown.sh" ] ; then
source "${c}teardown.sh"
fi
if [ ! -z "$REQUIRED_SERVICES" ] ; then
if [[ "$THISRESULT" == 0 ]] ; then
mkdir -p workdir/logs/$c/$ENVOY_VERSION
for cont in $REQUIRED_SERVICES; do
docker-compose logs --no-color $cont 2>&1 > workdir/logs/$c/$ENVOY_VERSION/$cont.log
done
fi
docker-compose stop $REQUIRED_SERVICES
fi
done
done
cleanup
if [ $RESULT -eq 1 ] ; then
echogreen "✓ PASS"
else
echored " FAIL"
exit 1
fi

View File

@ -1,4 +1,6 @@
wmi
===
Package wmi provides a WQL interface for WMI on Windows.
Package wmi provides a WQL interface to Windows WMI.
Note: It interfaces with WMI on the local machine, therefore it only runs on Windows.

260
vendor/github.com/StackExchange/wmi/swbemservices.go generated vendored Normal file
View File

@ -0,0 +1,260 @@
// +build windows
package wmi
import (
"fmt"
"reflect"
"runtime"
"sync"
"github.com/go-ole/go-ole"
"github.com/go-ole/go-ole/oleutil"
)
// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx
type SWbemServices struct {
//TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance
cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method
sWbemLocatorIUnknown *ole.IUnknown
sWbemLocatorIDispatch *ole.IDispatch
queries chan *queryRequest
closeError chan error
lQueryorClose sync.Mutex
}
type queryRequest struct {
query string
dst interface{}
args []interface{}
finished chan error
}
// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI
func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) {
//fmt.Println("InitializeSWbemServices: Starting")
//TODO: implement connectServerArgs as optional argument for init with connectServer call
s := new(SWbemServices)
s.cWMIClient = c
s.queries = make(chan *queryRequest)
initError := make(chan error)
go s.process(initError)
err, ok := <-initError
if ok {
return nil, err //Send error to caller
}
//fmt.Println("InitializeSWbemServices: Finished")
return s, nil
}
// Close will clear and release all of the SWbemServices resources
func (s *SWbemServices) Close() error {
s.lQueryorClose.Lock()
if s == nil || s.sWbemLocatorIDispatch == nil {
s.lQueryorClose.Unlock()
return fmt.Errorf("SWbemServices is not Initialized")
}
if s.queries == nil {
s.lQueryorClose.Unlock()
return fmt.Errorf("SWbemServices has been closed")
}
//fmt.Println("Close: sending close request")
var result error
ce := make(chan error)
s.closeError = ce //Race condition if multiple callers to close. May need to lock here
close(s.queries) //Tell background to shut things down
s.lQueryorClose.Unlock()
err, ok := <-ce
if ok {
result = err
}
//fmt.Println("Close: finished")
return result
}
func (s *SWbemServices) process(initError chan error) {
//fmt.Println("process: starting background thread initialization")
//All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine
runtime.LockOSThread()
defer runtime.LockOSThread()
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
if err != nil {
oleCode := err.(*ole.OleError).Code()
if oleCode != ole.S_OK && oleCode != S_FALSE {
initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err)
return
}
}
defer ole.CoUninitialize()
unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
if err != nil {
initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err)
return
} else if unknown == nil {
initError <- ErrNilCreateObject
return
}
defer unknown.Release()
s.sWbemLocatorIUnknown = unknown
dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch)
if err != nil {
initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err)
return
}
defer dispatch.Release()
s.sWbemLocatorIDispatch = dispatch
// we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs
//fmt.Println("process: initialized. closing initError")
close(initError)
//fmt.Println("process: waiting for queries")
for q := range s.queries {
//fmt.Printf("process: new query: len(query)=%d\n", len(q.query))
errQuery := s.queryBackground(q)
//fmt.Println("process: s.queryBackground finished")
if errQuery != nil {
q.finished <- errQuery
}
close(q.finished)
}
//fmt.Println("process: queries channel closed")
s.queries = nil //set channel to nil so we know it is closed
//TODO: I think the Release/Clear calls can panic if things are in a bad state.
//TODO: May need to recover from panics and send error to method caller instead.
close(s.closeError)
}
// Query runs the WQL query using a SWbemServices instance and appends the values to dst.
//
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
// the query must have the same name in dst. Supported types are all signed and
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
// Array types are not supported.
//
// By default, the local machine and default namespace are used. These can be
// changed using connectServerArgs. See
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
s.lQueryorClose.Lock()
if s == nil || s.sWbemLocatorIDispatch == nil {
s.lQueryorClose.Unlock()
return fmt.Errorf("SWbemServices is not Initialized")
}
if s.queries == nil {
s.lQueryorClose.Unlock()
return fmt.Errorf("SWbemServices has been closed")
}
//fmt.Println("Query: Sending query request")
qr := queryRequest{
query: query,
dst: dst,
args: connectServerArgs,
finished: make(chan error),
}
s.queries <- &qr
s.lQueryorClose.Unlock()
err, ok := <-qr.finished
if ok {
//fmt.Println("Query: Finished with error")
return err //Send error to caller
}
//fmt.Println("Query: Finished")
return nil
}
func (s *SWbemServices) queryBackground(q *queryRequest) error {
if s == nil || s.sWbemLocatorIDispatch == nil {
return fmt.Errorf("SWbemServices is not Initialized")
}
wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart
//fmt.Println("queryBackground: Starting")
dv := reflect.ValueOf(q.dst)
if dv.Kind() != reflect.Ptr || dv.IsNil() {
return ErrInvalidEntityType
}
dv = dv.Elem()
mat, elemType := checkMultiArg(dv)
if mat == multiArgTypeInvalid {
return ErrInvalidEntityType
}
// service is a SWbemServices
serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...)
if err != nil {
return err
}
service := serviceRaw.ToIDispatch()
defer serviceRaw.Clear()
// result is a SWBemObjectSet
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query)
if err != nil {
return err
}
result := resultRaw.ToIDispatch()
defer resultRaw.Clear()
count, err := oleInt64(result, "Count")
if err != nil {
return err
}
enumProperty, err := result.GetProperty("_NewEnum")
if err != nil {
return err
}
defer enumProperty.Clear()
enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
if err != nil {
return err
}
if enum == nil {
return fmt.Errorf("can't get IEnumVARIANT, enum is nil")
}
defer enum.Release()
// Initialize a slice with Count capacity
dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
var errFieldMismatch error
for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) {
if err != nil {
return err
}
err := func() error {
// item is a SWbemObject, but really a Win32_Process
item := itemRaw.ToIDispatch()
defer item.Release()
ev := reflect.New(elemType)
if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil {
if _, ok := err.(*ErrFieldMismatch); ok {
// We continue loading entities even in the face of field mismatch errors.
// If we encounter any other error, that other error is returned. Otherwise,
// an ErrFieldMismatch is returned.
errFieldMismatch = err
} else {
return err
}
}
if mat != multiArgTypeStructPtr {
ev = ev.Elem()
}
dv.Set(reflect.Append(dv, ev))
return nil
}()
if err != nil {
return err
}
}
//fmt.Println("queryBackground: Finished")
return errFieldMismatch
}

View File

@ -72,7 +72,10 @@ func QueryNamespace(query string, dst interface{}, namespace string) error {
//
// Query is a wrapper around DefaultClient.Query.
func Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
return DefaultClient.Query(query, dst, connectServerArgs...)
if DefaultClient.SWbemServicesClient == nil {
return DefaultClient.Query(query, dst, connectServerArgs...)
}
return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...)
}
// A Client is an WMI query client.
@ -99,6 +102,11 @@ type Client struct {
// Setting this to true allows custom queries to be used with full
// struct definitions instead of having to define multiple structs.
AllowMissingFields bool
// SWbemServiceClient is an optional SWbemServices object that can be
// initialized and then reused across multiple queries. If it is null
// then the method will initialize a new temporary client each time.
SWbemServicesClient *SWbemServices
}
// DefaultClient is the default Client and is used by Query, QueryNamespace
@ -362,17 +370,50 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat
}
}
default:
typeof := reflect.TypeOf(val)
if typeof == nil && (isPtr || c.NonePtrZero) {
if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {
of.Set(reflect.Zero(of.Type()))
if f.Kind() == reflect.Slice {
switch f.Type().Elem().Kind() {
case reflect.String:
safeArray := prop.ToArray()
if safeArray != nil {
arr := safeArray.ToValueArray()
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
for i, v := range arr {
s := fArr.Index(i)
s.SetString(v.(string))
}
f.Set(fArr)
}
case reflect.Uint8:
safeArray := prop.ToArray()
if safeArray != nil {
arr := safeArray.ToValueArray()
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
for i, v := range arr {
s := fArr.Index(i)
s.SetUint(reflect.ValueOf(v).Uint())
}
f.Set(fArr)
}
default:
return &ErrFieldMismatch{
StructType: of.Type(),
FieldName: n,
Reason: fmt.Sprintf("unsupported slice type (%T)", val),
}
}
} else {
typeof := reflect.TypeOf(val)
if typeof == nil && (isPtr || c.NonePtrZero) {
if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {
of.Set(reflect.Zero(of.Type()))
}
break
}
return &ErrFieldMismatch{
StructType: of.Type(),
FieldName: n,
Reason: fmt.Sprintf("unsupported type (%T)", val),
}
break
}
return &ErrFieldMismatch{
StructType: of.Type(),
FieldName: n,
Reason: fmt.Sprintf("unsupported type (%T)", val),
}
}
}

View File

@ -0,0 +1,660 @@
// Code generated by protoc-gen-validate
// source: envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto
// DO NOT EDIT!!!
package v2
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"strings"
"time"
"unicode/utf8"
"github.com/gogo/protobuf/types"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = types.DynamicAny{}
)
// Validate checks the field values on HttpConnectionManager with the rules
// defined in the proto definition for this message. If any rules are
// violated, an error is returned.
func (m *HttpConnectionManager) Validate() error {
if m == nil {
return nil
}
if _, ok := HttpConnectionManager_CodecType_name[int32(m.GetCodecType())]; !ok {
return HttpConnectionManagerValidationError{
Field: "CodecType",
Reason: "value must be one of the defined enum values",
}
}
if len(m.GetStatPrefix()) < 1 {
return HttpConnectionManagerValidationError{
Field: "StatPrefix",
Reason: "value length must be at least 1 bytes",
}
}
for idx, item := range m.GetHttpFilters() {
_, _ = idx, item
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: fmt.Sprintf("HttpFilters[%v]", idx),
Reason: "embedded message failed validation",
Cause: err,
}
}
}
}
if v, ok := interface{}(m.GetAddUserAgent()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "AddUserAgent",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
if v, ok := interface{}(m.GetTracing()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "Tracing",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
if v, ok := interface{}(m.GetHttpProtocolOptions()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "HttpProtocolOptions",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
if v, ok := interface{}(m.GetHttp2ProtocolOptions()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "Http2ProtocolOptions",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
// no validation rules for ServerName
if v, ok := interface{}(m.GetIdleTimeout()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "IdleTimeout",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
if v, ok := interface{}(m.GetStreamIdleTimeout()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "StreamIdleTimeout",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
if v, ok := interface{}(m.GetDrainTimeout()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "DrainTimeout",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
for idx, item := range m.GetAccessLog() {
_, _ = idx, item
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: fmt.Sprintf("AccessLog[%v]", idx),
Reason: "embedded message failed validation",
Cause: err,
}
}
}
}
if v, ok := interface{}(m.GetUseRemoteAddress()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "UseRemoteAddress",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
// no validation rules for XffNumTrustedHops
// no validation rules for SkipXffAppend
// no validation rules for Via
if v, ok := interface{}(m.GetGenerateRequestId()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "GenerateRequestId",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
if _, ok := HttpConnectionManager_ForwardClientCertDetails_name[int32(m.GetForwardClientCertDetails())]; !ok {
return HttpConnectionManagerValidationError{
Field: "ForwardClientCertDetails",
Reason: "value must be one of the defined enum values",
}
}
if v, ok := interface{}(m.GetSetCurrentClientCertDetails()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "SetCurrentClientCertDetails",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
// no validation rules for Proxy_100Continue
// no validation rules for RepresentIpv4RemoteAddressAsIpv4MappedIpv6
for idx, item := range m.GetUpgradeConfigs() {
_, _ = idx, item
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: fmt.Sprintf("UpgradeConfigs[%v]", idx),
Reason: "embedded message failed validation",
Cause: err,
}
}
}
}
switch m.RouteSpecifier.(type) {
case *HttpConnectionManager_Rds:
if v, ok := interface{}(m.GetRds()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "Rds",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
case *HttpConnectionManager_RouteConfig:
if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManagerValidationError{
Field: "RouteConfig",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
default:
return HttpConnectionManagerValidationError{
Field: "RouteSpecifier",
Reason: "value is required",
}
}
return nil
}
// HttpConnectionManagerValidationError is the validation error returned by
// HttpConnectionManager.Validate if the designated constraints aren't met.
type HttpConnectionManagerValidationError struct {
Field string
Reason string
Cause error
Key bool
}
// Error satisfies the builtin error interface
func (e HttpConnectionManagerValidationError) Error() string {
cause := ""
if e.Cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.Cause)
}
key := ""
if e.Key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHttpConnectionManager.%s: %s%s",
key,
e.Field,
e.Reason,
cause)
}
var _ error = HttpConnectionManagerValidationError{}
// Validate checks the field values on Rds with the rules defined in the proto
// definition for this message. If any rules are violated, an error is returned.
func (m *Rds) Validate() error {
if m == nil {
return nil
}
if v, ok := interface{}(m.GetConfigSource()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return RdsValidationError{
Field: "ConfigSource",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
if len(m.GetRouteConfigName()) < 1 {
return RdsValidationError{
Field: "RouteConfigName",
Reason: "value length must be at least 1 bytes",
}
}
return nil
}
// RdsValidationError is the validation error returned by Rds.Validate if the
// designated constraints aren't met.
type RdsValidationError struct {
Field string
Reason string
Cause error
Key bool
}
// Error satisfies the builtin error interface
func (e RdsValidationError) Error() string {
cause := ""
if e.Cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.Cause)
}
key := ""
if e.Key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sRds.%s: %s%s",
key,
e.Field,
e.Reason,
cause)
}
var _ error = RdsValidationError{}
// Validate checks the field values on HttpFilter with the rules defined in the
// proto definition for this message. If any rules are violated, an error is returned.
func (m *HttpFilter) Validate() error {
if m == nil {
return nil
}
if len(m.GetName()) < 1 {
return HttpFilterValidationError{
Field: "Name",
Reason: "value length must be at least 1 bytes",
}
}
if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpFilterValidationError{
Field: "Config",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
if v, ok := interface{}(m.GetDeprecatedV1()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpFilterValidationError{
Field: "DeprecatedV1",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
return nil
}
// HttpFilterValidationError is the validation error returned by
// HttpFilter.Validate if the designated constraints aren't met.
type HttpFilterValidationError struct {
Field string
Reason string
Cause error
Key bool
}
// Error satisfies the builtin error interface
func (e HttpFilterValidationError) Error() string {
cause := ""
if e.Cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.Cause)
}
key := ""
if e.Key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHttpFilter.%s: %s%s",
key,
e.Field,
e.Reason,
cause)
}
var _ error = HttpFilterValidationError{}
// Validate checks the field values on HttpConnectionManager_Tracing with the
// rules defined in the proto definition for this message. If any rules are
// violated, an error is returned.
func (m *HttpConnectionManager_Tracing) Validate() error {
if m == nil {
return nil
}
if _, ok := HttpConnectionManager_Tracing_OperationName_name[int32(m.GetOperationName())]; !ok {
return HttpConnectionManager_TracingValidationError{
Field: "OperationName",
Reason: "value must be one of the defined enum values",
}
}
if v, ok := interface{}(m.GetClientSampling()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManager_TracingValidationError{
Field: "ClientSampling",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
if v, ok := interface{}(m.GetRandomSampling()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManager_TracingValidationError{
Field: "RandomSampling",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
if v, ok := interface{}(m.GetOverallSampling()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManager_TracingValidationError{
Field: "OverallSampling",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
return nil
}
// HttpConnectionManager_TracingValidationError is the validation error
// returned by HttpConnectionManager_Tracing.Validate if the designated
// constraints aren't met.
type HttpConnectionManager_TracingValidationError struct {
Field string
Reason string
Cause error
Key bool
}
// Error satisfies the builtin error interface
func (e HttpConnectionManager_TracingValidationError) Error() string {
cause := ""
if e.Cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.Cause)
}
key := ""
if e.Key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHttpConnectionManager_Tracing.%s: %s%s",
key,
e.Field,
e.Reason,
cause)
}
var _ error = HttpConnectionManager_TracingValidationError{}
// Validate checks the field values on
// HttpConnectionManager_SetCurrentClientCertDetails with the rules defined in
// the proto definition for this message. If any rules are violated, an error
// is returned.
func (m *HttpConnectionManager_SetCurrentClientCertDetails) Validate() error {
if m == nil {
return nil
}
if v, ok := interface{}(m.GetSubject()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManager_SetCurrentClientCertDetailsValidationError{
Field: "Subject",
Reason: "embedded message failed validation",
Cause: err,
}
}
}
// no validation rules for Cert
// no validation rules for Dns
// no validation rules for Uri
return nil
}
// HttpConnectionManager_SetCurrentClientCertDetailsValidationError is the
// validation error returned by
// HttpConnectionManager_SetCurrentClientCertDetails.Validate if the
// designated constraints aren't met.
type HttpConnectionManager_SetCurrentClientCertDetailsValidationError struct {
Field string
Reason string
Cause error
Key bool
}
// Error satisfies the builtin error interface
func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Error() string {
cause := ""
if e.Cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.Cause)
}
key := ""
if e.Key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHttpConnectionManager_SetCurrentClientCertDetails.%s: %s%s",
key,
e.Field,
e.Reason,
cause)
}
var _ error = HttpConnectionManager_SetCurrentClientCertDetailsValidationError{}
// Validate checks the field values on HttpConnectionManager_UpgradeConfig with
// the rules defined in the proto definition for this message. If any rules
// are violated, an error is returned.
func (m *HttpConnectionManager_UpgradeConfig) Validate() error {
if m == nil {
return nil
}
// no validation rules for UpgradeType
for idx, item := range m.GetFilters() {
_, _ = idx, item
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpConnectionManager_UpgradeConfigValidationError{
Field: fmt.Sprintf("Filters[%v]", idx),
Reason: "embedded message failed validation",
Cause: err,
}
}
}
}
return nil
}
// HttpConnectionManager_UpgradeConfigValidationError is the validation error
// returned by HttpConnectionManager_UpgradeConfig.Validate if the designated
// constraints aren't met.
type HttpConnectionManager_UpgradeConfigValidationError struct {
Field string
Reason string
Cause error
Key bool
}
// Error satisfies the builtin error interface
func (e HttpConnectionManager_UpgradeConfigValidationError) Error() string {
cause := ""
if e.Cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.Cause)
}
key := ""
if e.Key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHttpConnectionManager_UpgradeConfig.%s: %s%s",
key,
e.Field,
e.Reason,
cause)
}
var _ error = HttpConnectionManager_UpgradeConfigValidationError{}
// Validate checks the field values on HttpFilter_DeprecatedV1 with the rules
// defined in the proto definition for this message. If any rules are
// violated, an error is returned.
func (m *HttpFilter_DeprecatedV1) Validate() error {
if m == nil {
return nil
}
// no validation rules for Type
return nil
}
// HttpFilter_DeprecatedV1ValidationError is the validation error returned by
// HttpFilter_DeprecatedV1.Validate if the designated constraints aren't met.
type HttpFilter_DeprecatedV1ValidationError struct {
Field string
Reason string
Cause error
Key bool
}
// Error satisfies the builtin error interface
func (e HttpFilter_DeprecatedV1ValidationError) Error() string {
cause := ""
if e.Cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.Cause)
}
key := ""
if e.Key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHttpFilter_DeprecatedV1.%s: %s%s",
key,
e.Field,
e.Reason,
cause)
}
var _ error = HttpFilter_DeprecatedV1ValidationError{}

21
vendor/github.com/go-ole/go-ole/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright © 2013-2017 Yasuhiro Matsumoto, <mattn.jp@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the “Software”), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -23,7 +23,7 @@ environment:
GOPATH: c:\gopath
matrix:
- GOARCH: amd64
GOVERSION: 1.4
GOVERSION: 1.5
GOROOT: c:\go
DOWNLOADPLATFORM: "x64"
@ -37,15 +37,6 @@ install:
# - set
- go version
- go env
- c:\gopath\src\github.com\go-ole\go-ole\build\compile-go.bat
- go tool dist install -v cmd/8a
- go tool dist install -v cmd/8c
- go tool dist install -v cmd/8g
- go tool dist install -v cmd/8l
- go tool dist install -v cmd/6a
- go tool dist install -v cmd/6c
- go tool dist install -v cmd/6g
- go tool dist install -v cmd/6l
- go get -u golang.org/x/tools/cmd/cover
- go get -u golang.org/x/tools/cmd/godoc
- go get -u golang.org/x/tools/cmd/stringer

View File

@ -321,9 +321,9 @@ func DispatchMessage(msg *Msg) (ret int32) {
// GetVariantDate converts COM Variant Time value to Go time.Time.
func GetVariantDate(value float64) (time.Time, error) {
var st syscall.Systemtime
r, _, _ := procVariantTimeToSystemTime.Call(uintptr(unsafe.Pointer(&value)), uintptr(unsafe.Pointer(&st)))
r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st)))
if r != 0 {
return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), nil), nil
return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
}
return time.Now(), errors.New("Could not convert to time, passing current time.")
}

View File

@ -63,6 +63,10 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}
dispnames := [1]int32{DISPID_PROPERTYPUT}
dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0]))
dispparams.cNamedArgs = 1
} else if dispatch&DISPATCH_PROPERTYPUTREF != 0 {
dispnames := [1]int32{DISPID_PROPERTYPUT}
dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0]))
dispparams.cNamedArgs = 1
}
var vargs []VARIANT
if len(params) > 0 {

View File

@ -26,6 +26,16 @@ type EXCEPINFO struct {
scode uint32
}
// WCode return wCode in EXCEPINFO.
func (e EXCEPINFO) WCode() uint16 {
return e.wCode
}
// SCODE return scode in EXCEPINFO.
func (e EXCEPINFO) SCODE() uint32 {
return e.scode
}
// String convert EXCEPINFO to string.
func (e EXCEPINFO) String() string {
var src, desc, hlp string

View File

@ -88,6 +88,20 @@ func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (r
return r
}
// PutPropertyRef mutates property reference.
func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) {
return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params)
}
// MustPutPropertyRef mutates property reference or panics.
func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) {
r, err := PutPropertyRef(disp, name, params...)
if err != nil {
panic(err.Error())
}
return r
}
func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error {
newEnum, err := disp.GetProperty("_NewEnum")
if err != nil {

View File

@ -75,19 +75,19 @@ func Sum256(data []byte) [Size256]byte {
}
// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil
// key turns the hash into a MAC. The key must between zero and 64 bytes long.
// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) }
// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil
// key turns the hash into a MAC. The key must between zero and 64 bytes long.
// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) }
// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil
// key turns the hash into a MAC. The key must between zero and 64 bytes long.
// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) }
// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length.
// A non-nil key turns the hash into a MAC. The key must between zero and 64 bytes long.
// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long.
// The hash size can be a value between 1 and 64 but it is highly recommended to use
// values equal or greater than:
// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long).

View File

@ -50,6 +50,12 @@ func NewFixedBuilder(buffer []byte) *Builder {
}
}
// SetError sets the value to be returned as the error from Bytes. Writes
// performed after calling SetError are ignored.
func (b *Builder) SetError(err error) {
b.err = err
}
// Bytes returns the bytes written by the builder or an error if one has
// occurred during building.
func (b *Builder) Bytes() ([]byte, error) {
@ -94,7 +100,7 @@ func (b *Builder) AddBytes(v []byte) {
b.add(v...)
}
// BuilderContinuation is continuation-passing interface for building
// BuilderContinuation is a continuation-passing interface for building
// length-prefixed byte sequences. Builder methods for length-prefixed
// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation
// supplied to them. The child builder passed to the continuation can be used
@ -268,9 +274,11 @@ func (b *Builder) flushChild() {
return
}
if !b.fixedSize {
b.result = child.result // In case child reallocated result.
if b.fixedSize && &b.result[0] != &child.result[0] {
panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer")
}
b.result = child.result
}
func (b *Builder) add(bytes ...byte) {
@ -278,7 +286,7 @@ func (b *Builder) add(bytes ...byte) {
return
}
if b.child != nil {
panic("attempted write while child is pending")
panic("cryptobyte: attempted write while child is pending")
}
if len(b.result)+len(bytes) < len(bytes) {
b.err = errors.New("cryptobyte: length overflow")
@ -290,6 +298,26 @@ func (b *Builder) add(bytes ...byte) {
b.result = append(b.result, bytes...)
}
// Unwrite rolls back n bytes written directly to the Builder. An attempt by a
// child builder passed to a continuation to unwrite bytes from its parent will
// panic.
func (b *Builder) Unwrite(n int) {
if b.err != nil {
return
}
if b.child != nil {
panic("cryptobyte: attempted unwrite while child is pending")
}
length := len(b.result) - b.pendingLenLen - b.offset
if length < 0 {
panic("cryptobyte: internal error")
}
if n > length {
panic("cryptobyte: attempted to unwrite more than was written")
}
b.result = b.result[:len(b.result)-n]
}
// A MarshalingValue marshals itself into a Builder.
type MarshalingValue interface {
// Marshal is called by Builder.AddValue. It receives a pointer to a builder

Some files were not shown because too many files have changed in this diff Show More