Merge remote-tracking branch 'hashicorp/main' into feature/health-checks_windows_service
Signed-off-by: Alessandro De Blasis <alex@deblasis.net>
This commit is contained in:
commit
260c37f9fd
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
api: Add filtering support to Catalog's List Services (v1/catalog/services)
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
cli: Fix Consul kv CLI 'GET' flags 'keys' and 'recurse' to be set together
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
cli: When launching a sidecar proxy with `consul connect envoy` or `consul connect proxy`, the `-sidecar-for` service ID argument is now treated as case-insensitive.
|
||||
```
|
|
@ -0,0 +1,5 @@
|
|||
```release-note:improvement
|
||||
config-entry: Validate that service-resolver `Failover`s and `Redirect`s only
|
||||
specify `Partition` and `Namespace` on Consul Enterprise. This prevents scenarios
|
||||
where OSS Consul would save service-resolvers that require Consul Enterprise.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bugfix
|
||||
rpc: Adds max jitter to client deadlines to prevent i/o deadline errors on blocking queries
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bugfix
|
||||
connect: Fix issue where `auto_config` and `auto_encrypt` could unintentionally enable TLS for gRPC xDS connections.
|
||||
```
|
|
@ -28,6 +28,10 @@ references:
|
|||
- "1.21.4"
|
||||
- "1.22.2"
|
||||
- "1.23.0"
|
||||
nomad-versions: &supported_nomad_versions
|
||||
- &default_nomad_version "1.3.3"
|
||||
- "1.2.10"
|
||||
- "1.1.16"
|
||||
images:
|
||||
# When updating the Go version, remember to also update the versions in the
|
||||
# workflows section for go-test-lib jobs.
|
||||
|
@ -105,15 +109,18 @@ commands:
|
|||
type: env_var_name
|
||||
default: ROLE_ARN
|
||||
steps:
|
||||
# Only run the assume-role command for the main repo. The AWS credentials aren't available for forks.
|
||||
- run: |
|
||||
export AWS_ACCESS_KEY_ID="${<< parameters.access-key >>}"
|
||||
export AWS_SECRET_ACCESS_KEY="${<< parameters.secret-key >>}"
|
||||
export ROLE_ARN="${<< parameters.role-arn >>}"
|
||||
# assume role has duration of 15 min (the minimum allowed)
|
||||
CREDENTIALS="$(aws sts assume-role --duration-seconds 900 --role-arn ${ROLE_ARN} --role-session-name build-${CIRCLE_SHA1} | jq '.Credentials')"
|
||||
echo "export AWS_ACCESS_KEY_ID=$(echo $CREDENTIALS | jq -r '.AccessKeyId')" >> $BASH_ENV
|
||||
echo "export AWS_SECRET_ACCESS_KEY=$(echo $CREDENTIALS | jq -r '.SecretAccessKey')" >> $BASH_ENV
|
||||
echo "export AWS_SESSION_TOKEN=$(echo $CREDENTIALS | jq -r '.SessionToken')" >> $BASH_ENV
|
||||
if [[ "${CIRCLE_BRANCH%%/*}/" != "pull/" ]]; then
|
||||
export AWS_ACCESS_KEY_ID="${<< parameters.access-key >>}"
|
||||
export AWS_SECRET_ACCESS_KEY="${<< parameters.secret-key >>}"
|
||||
export ROLE_ARN="${<< parameters.role-arn >>}"
|
||||
# assume role has duration of 15 min (the minimum allowed)
|
||||
CREDENTIALS="$(aws sts assume-role --duration-seconds 900 --role-arn ${ROLE_ARN} --role-session-name build-${CIRCLE_SHA1} | jq '.Credentials')"
|
||||
echo "export AWS_ACCESS_KEY_ID=$(echo $CREDENTIALS | jq -r '.AccessKeyId')" >> $BASH_ENV
|
||||
echo "export AWS_SECRET_ACCESS_KEY=$(echo $CREDENTIALS | jq -r '.SecretAccessKey')" >> $BASH_ENV
|
||||
echo "export AWS_SESSION_TOKEN=$(echo $CREDENTIALS | jq -r '.SessionToken')" >> $BASH_ENV
|
||||
fi
|
||||
|
||||
run-go-test-full:
|
||||
parameters:
|
||||
|
@ -560,17 +567,20 @@ jobs:
|
|||
- run: make ci.dev-docker
|
||||
- run: *notify-slack-failure
|
||||
|
||||
# Nomad 0.8 builds on go1.10
|
||||
# Run integration tests on nomad/v0.8.7
|
||||
nomad-integration-0_8:
|
||||
nomad-integration-test: &NOMAD_TESTS
|
||||
docker:
|
||||
- image: docker.mirror.hashicorp.services/cimg/go:1.10
|
||||
- image: docker.mirror.hashicorp.services/cimg/go:1.19
|
||||
parameters:
|
||||
nomad-version:
|
||||
type: enum
|
||||
enum: *supported_nomad_versions
|
||||
default: *default_nomad_version
|
||||
environment:
|
||||
<<: *ENVIRONMENT
|
||||
NOMAD_WORKING_DIR: &NOMAD_WORKING_DIR /home/circleci/go/src/github.com/hashicorp/nomad
|
||||
NOMAD_VERSION: v0.8.7
|
||||
NOMAD_VERSION: << parameters.nomad-version >>
|
||||
steps: &NOMAD_INTEGRATION_TEST_STEPS
|
||||
- run: git clone https://github.com/hashicorp/nomad.git --branch ${NOMAD_VERSION} ${NOMAD_WORKING_DIR}
|
||||
- run: git clone https://github.com/hashicorp/nomad.git --branch v${NOMAD_VERSION} ${NOMAD_WORKING_DIR}
|
||||
|
||||
# get consul binary
|
||||
- attach_workspace:
|
||||
|
@ -601,16 +611,6 @@ jobs:
|
|||
path: *TEST_RESULTS_DIR
|
||||
- run: *notify-slack-failure
|
||||
|
||||
# run integration tests on nomad/main
|
||||
nomad-integration-main:
|
||||
docker:
|
||||
- image: docker.mirror.hashicorp.services/cimg/go:1.18
|
||||
environment:
|
||||
<<: *ENVIRONMENT
|
||||
NOMAD_WORKING_DIR: /home/circleci/go/src/github.com/hashicorp/nomad
|
||||
NOMAD_VERSION: main
|
||||
steps: *NOMAD_INTEGRATION_TEST_STEPS
|
||||
|
||||
# build frontend yarn cache
|
||||
frontend-cache:
|
||||
docker:
|
||||
|
@ -1117,12 +1117,12 @@ workflows:
|
|||
- dev-upload-docker:
|
||||
<<: *dev-upload
|
||||
context: consul-ci
|
||||
- nomad-integration-main:
|
||||
requires:
|
||||
- dev-build
|
||||
- nomad-integration-0_8:
|
||||
- nomad-integration-test:
|
||||
requires:
|
||||
- dev-build
|
||||
matrix:
|
||||
parameters:
|
||||
nomad-version: *supported_nomad_versions
|
||||
- envoy-integration-test:
|
||||
requires:
|
||||
- dev-build
|
||||
|
|
21
CHANGELOG.md
21
CHANGELOG.md
|
@ -28,6 +28,9 @@ connect: Terminating gateways with a wildcard service entry should no longer pic
|
|||
BREAKING CHANGES:
|
||||
|
||||
* config-entry: Exporting a specific service name across all namespace is invalid.
|
||||
* connect: contains an upgrade compatibility issue when restoring snapshots containing service mesh proxy registrations from pre-1.13 versions of Consul [[GH-14107](https://github.com/hashicorp/consul/issues/14107)]. Fixed in 1.13.1 [[GH-14149](https://github.com/hashicorp/consul/issues/14149)]. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#all-service-mesh-deployments) for more information.
|
||||
* connect: if using auto-encrypt or auto-config, TLS is required for gRPC communication between Envoy and Consul as of 1.13.0; this TLS for gRPC requirement will be removed in a future 1.13 patch release. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#service-mesh-deployments-using-auto-encrypt-or-auto-config) for more information.
|
||||
* connect: if a pre-1.13 Consul agent's HTTPS port was not enabled, upgrading to 1.13 may turn on TLS for gRPC communication for Envoy and Consul depending on the agent's TLS configuration. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#grpc-tls) for more information.
|
||||
* connect: Removes support for Envoy 1.19 [[GH-13807](https://github.com/hashicorp/consul/issues/13807)]
|
||||
* telemetry: config flag `telemetry { disable_compat_1.9 = (true|false) }` has been removed. Before upgrading you should remove this flag from your config if the flag is being used. [[GH-13532](https://github.com/hashicorp/consul/issues/13532)]
|
||||
|
||||
|
@ -944,6 +947,24 @@ NOTES:
|
|||
|
||||
* legal: **(Enterprise only)** Enterprise binary downloads will now include a copy of the EULA and Terms of Evaluation in the zip archive
|
||||
|
||||
## 1.9.17 (April 13, 2022)
|
||||
|
||||
SECURITY:
|
||||
|
||||
* agent: Added a new check field, `disable_redirects`, that allows for disabling the following of redirects for HTTP checks. The intention is to default this to true in a future release so that redirects must explicitly be enabled. [[GH-12685](https://github.com/hashicorp/consul/issues/12685)]
|
||||
* connect: Properly set SNI when configured for services behind a terminating gateway. [[GH-12672](https://github.com/hashicorp/consul/issues/12672)]
|
||||
|
||||
DEPRECATIONS:
|
||||
|
||||
* tls: With the upgrade to Go 1.17, the ordering of `tls_cipher_suites` will no longer be honored, and `tls_prefer_server_cipher_suites` is now ignored. [[GH-12767](https://github.com/hashicorp/consul/issues/12767)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* connect/ca: cancel old Vault renewal on CA configuration. Provide a 1 - 6 second backoff on repeated token renewal requests to prevent overwhelming Vault. [[GH-12607](https://github.com/hashicorp/consul/issues/12607)]
|
||||
* memberlist: fixes a bug which prevented members from joining a cluster with
|
||||
large amounts of churn [[GH-253](https://github.com/hashicorp/memberlist/issues/253)] [[GH-12046](https://github.com/hashicorp/consul/issues/12046)]
|
||||
* replication: Fixed a bug which could prevent ACL replication from continuing successfully after a leader election. [[GH-12565](https://github.com/hashicorp/consul/issues/12565)]
|
||||
|
||||
## 1.9.16 (February 28, 2022)
|
||||
|
||||
FEATURES:
|
||||
|
|
21
Dockerfile
21
Dockerfile
|
@ -22,10 +22,11 @@ LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
|
|||
org.opencontainers.image.url="https://www.consul.io/" \
|
||||
org.opencontainers.image.documentation="https://www.consul.io/docs" \
|
||||
org.opencontainers.image.source="https://github.com/hashicorp/consul" \
|
||||
org.opencontainers.image.version=$VERSION \
|
||||
org.opencontainers.image.version=${VERSION} \
|
||||
org.opencontainers.image.vendor="HashiCorp" \
|
||||
org.opencontainers.image.title="consul" \
|
||||
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
|
||||
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \
|
||||
version=${VERSION}
|
||||
|
||||
# This is the location of the releases.
|
||||
ENV HASHICORP_RELEASES=https://releases.hashicorp.com
|
||||
|
@ -110,13 +111,13 @@ CMD ["agent", "-dev", "-client", "0.0.0.0"]
|
|||
# Remember, this image cannot be built locally.
|
||||
FROM docker.mirror.hashicorp.services/alpine:3.15 as default
|
||||
|
||||
ARG VERSION
|
||||
ARG PRODUCT_VERSION
|
||||
ARG BIN_NAME
|
||||
|
||||
# PRODUCT_NAME and PRODUCT_VERSION are the name of the software on releases.hashicorp.com
|
||||
# and the version to download. Example: PRODUCT_NAME=consul PRODUCT_VERSION=1.2.3.
|
||||
ENV BIN_NAME=$BIN_NAME
|
||||
ENV VERSION=$VERSION
|
||||
ENV PRODUCT_VERSION=$PRODUCT_VERSION
|
||||
|
||||
ARG PRODUCT_REVISION
|
||||
ARG PRODUCT_NAME=$BIN_NAME
|
||||
|
@ -128,10 +129,11 @@ LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
|
|||
org.opencontainers.image.url="https://www.consul.io/" \
|
||||
org.opencontainers.image.documentation="https://www.consul.io/docs" \
|
||||
org.opencontainers.image.source="https://github.com/hashicorp/consul" \
|
||||
org.opencontainers.image.version=$VERSION \
|
||||
org.opencontainers.image.version=${PRODUCT_VERSION} \
|
||||
org.opencontainers.image.vendor="HashiCorp" \
|
||||
org.opencontainers.image.title="consul" \
|
||||
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
|
||||
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \
|
||||
version=${PRODUCT_VERSION}
|
||||
|
||||
# Set up certificates and base tools.
|
||||
# libc6-compat is needed to symlink the shared libraries for ARM builds
|
||||
|
@ -217,10 +219,11 @@ LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
|
|||
org.opencontainers.image.url="https://www.consul.io/" \
|
||||
org.opencontainers.image.documentation="https://www.consul.io/docs" \
|
||||
org.opencontainers.image.source="https://github.com/hashicorp/consul" \
|
||||
org.opencontainers.image.version=$VERSION \
|
||||
org.opencontainers.image.version=${PRODUCT_VERSION} \
|
||||
org.opencontainers.image.vendor="HashiCorp" \
|
||||
org.opencontainers.image.title="consul" \
|
||||
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
|
||||
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \
|
||||
version=${PRODUCT_VERSION}
|
||||
|
||||
# Copy license for Red Hat certification.
|
||||
COPY LICENSE /licenses/mozilla.txt
|
||||
|
@ -284,4 +287,4 @@ USER 100
|
|||
# By default you'll get an insecure single-node development server that stores
|
||||
# everything in RAM, exposes a web UI and HTTP endpoints, and bootstraps itself.
|
||||
# Don't use this configuration for production.
|
||||
CMD ["agent", "-dev", "-client", "0.0.0.0"]
|
||||
CMD ["agent", "-dev", "-client", "0.0.0.0"]
|
||||
|
|
|
@ -2532,10 +2532,9 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
|
|||
return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza")
|
||||
}
|
||||
|
||||
// TLS is only enabled on the gRPC listener if there's an HTTPS port configured
|
||||
// for historic and backwards-compatibility reasons.
|
||||
if rt.HTTPSPort <= 0 && (t.GRPC != TLSProtocolConfig{} && t.GRPCModifiedByDeprecatedConfig == nil) {
|
||||
b.warn("tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)")
|
||||
// And UseAutoCert right now only applies to external gRPC interface.
|
||||
if t.Defaults.UseAutoCert != nil || t.HTTPS.UseAutoCert != nil || t.InternalRPC.UseAutoCert != nil {
|
||||
return c, errors.New("use_auto_cert is only valid in the tls.grpc stanza")
|
||||
}
|
||||
|
||||
defaultTLSMinVersion := b.tlsVersion("tls.defaults.tls_min_version", t.Defaults.TLSMinVersion)
|
||||
|
@ -2592,6 +2591,7 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
|
|||
|
||||
mapCommon("https", t.HTTPS, &c.HTTPS)
|
||||
mapCommon("grpc", t.GRPC, &c.GRPC)
|
||||
c.GRPC.UseAutoCert = boolValWithDefault(t.GRPC.UseAutoCert, false)
|
||||
|
||||
c.ServerName = rt.ServerName
|
||||
c.NodeName = rt.NodeName
|
||||
|
|
|
@ -868,6 +868,7 @@ type TLSProtocolConfig struct {
|
|||
VerifyIncoming *bool `mapstructure:"verify_incoming"`
|
||||
VerifyOutgoing *bool `mapstructure:"verify_outgoing"`
|
||||
VerifyServerHostname *bool `mapstructure:"verify_server_hostname"`
|
||||
UseAutoCert *bool `mapstructure:"use_auto_cert"`
|
||||
}
|
||||
|
||||
type TLS struct {
|
||||
|
|
|
@ -5550,7 +5550,70 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "tls.grpc without ports.https",
|
||||
desc: "tls.grpc.use_auto_cert defaults to false",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`
|
||||
{
|
||||
"tls": {
|
||||
"grpc": {}
|
||||
}
|
||||
}
|
||||
`},
|
||||
hcl: []string{`
|
||||
tls {
|
||||
grpc {}
|
||||
}
|
||||
`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.TLS.Domain = "consul."
|
||||
rt.TLS.NodeName = "thehostname"
|
||||
rt.TLS.GRPC.UseAutoCert = false
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "tls.grpc.use_auto_cert defaults to false (II)",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`
|
||||
{
|
||||
"tls": {}
|
||||
}
|
||||
`},
|
||||
hcl: []string{`
|
||||
tls {
|
||||
}
|
||||
`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.TLS.Domain = "consul."
|
||||
rt.TLS.NodeName = "thehostname"
|
||||
rt.TLS.GRPC.UseAutoCert = false
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "tls.grpc.use_auto_cert defaults to false (III)",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`
|
||||
{
|
||||
}
|
||||
`},
|
||||
hcl: []string{`
|
||||
`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.TLS.Domain = "consul."
|
||||
rt.TLS.NodeName = "thehostname"
|
||||
rt.TLS.GRPC.UseAutoCert = false
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "tls.grpc.use_auto_cert enabled when true",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
|
@ -5558,7 +5621,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
{
|
||||
"tls": {
|
||||
"grpc": {
|
||||
"cert_file": "cert-1234"
|
||||
"use_auto_cert": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5566,20 +5629,43 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
hcl: []string{`
|
||||
tls {
|
||||
grpc {
|
||||
cert_file = "cert-1234"
|
||||
use_auto_cert = true
|
||||
}
|
||||
}
|
||||
`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
|
||||
rt.TLS.Domain = "consul."
|
||||
rt.TLS.NodeName = "thehostname"
|
||||
|
||||
rt.TLS.GRPC.CertFile = "cert-1234"
|
||||
rt.TLS.GRPC.UseAutoCert = true
|
||||
},
|
||||
expectedWarnings: []string{
|
||||
"tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)",
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "tls.grpc.use_auto_cert disabled when false",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`
|
||||
{
|
||||
"tls": {
|
||||
"grpc": {
|
||||
"use_auto_cert": false
|
||||
}
|
||||
}
|
||||
}
|
||||
`},
|
||||
hcl: []string{`
|
||||
tls {
|
||||
grpc {
|
||||
use_auto_cert = false
|
||||
}
|
||||
}
|
||||
`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.TLS.Domain = "consul."
|
||||
rt.TLS.NodeName = "thehostname"
|
||||
rt.TLS.GRPC.UseAutoCert = false
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -6382,6 +6468,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSMinVersion: types.TLSv1_0,
|
||||
CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA},
|
||||
VerifyOutgoing: false,
|
||||
UseAutoCert: true,
|
||||
},
|
||||
HTTPS: tlsutil.ProtocolConfig{
|
||||
VerifyIncoming: true,
|
||||
|
|
|
@ -376,7 +376,8 @@
|
|||
"TLSMinVersion": "",
|
||||
"VerifyIncoming": false,
|
||||
"VerifyOutgoing": false,
|
||||
"VerifyServerHostname": false
|
||||
"VerifyServerHostname": false,
|
||||
"UseAutoCert": false
|
||||
},
|
||||
"HTTPS": {
|
||||
"CAFile": "",
|
||||
|
@ -387,7 +388,8 @@
|
|||
"TLSMinVersion": "",
|
||||
"VerifyIncoming": false,
|
||||
"VerifyOutgoing": false,
|
||||
"VerifyServerHostname": false
|
||||
"VerifyServerHostname": false,
|
||||
"UseAutoCert": false
|
||||
},
|
||||
"InternalRPC": {
|
||||
"CAFile": "",
|
||||
|
@ -398,7 +400,8 @@
|
|||
"TLSMinVersion": "",
|
||||
"VerifyIncoming": false,
|
||||
"VerifyOutgoing": false,
|
||||
"VerifyServerHostname": false
|
||||
"VerifyServerHostname": false,
|
||||
"UseAutoCert": false
|
||||
},
|
||||
"NodeName": "",
|
||||
"ServerName": ""
|
||||
|
@ -468,4 +471,4 @@
|
|||
"VersionMetadata": "",
|
||||
"VersionPrerelease": "",
|
||||
"Watches": []
|
||||
}
|
||||
}
|
||||
|
|
|
@ -705,6 +705,7 @@ tls {
|
|||
tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
|
||||
tls_min_version = "TLSv1_0"
|
||||
verify_incoming = true
|
||||
use_auto_cert = true
|
||||
}
|
||||
}
|
||||
tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
|
||||
|
|
|
@ -700,7 +700,8 @@
|
|||
"key_file": "1y4prKjl",
|
||||
"tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
"tls_min_version": "TLSv1_0",
|
||||
"verify_incoming": true
|
||||
"verify_incoming": true,
|
||||
"use_auto_cert": true
|
||||
}
|
||||
},
|
||||
"tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
|
|
|
@ -178,20 +178,43 @@ func TestQuerySNI(t *testing.T) {
|
|||
func TestTargetSNI(t *testing.T) {
|
||||
// empty namespace, empty subset
|
||||
require.Equal(t, "api.default.foo."+testTrustDomainSuffix1,
|
||||
TargetSNI(structs.NewDiscoveryTarget("api", "", "", "default", "foo"), testTrustDomain1))
|
||||
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "api",
|
||||
Partition: "default",
|
||||
Datacenter: "foo",
|
||||
}), testTrustDomain1))
|
||||
|
||||
require.Equal(t, "api.default.foo."+testTrustDomainSuffix1,
|
||||
TargetSNI(structs.NewDiscoveryTarget("api", "", "", "", "foo"), testTrustDomain1))
|
||||
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "api",
|
||||
Datacenter: "foo",
|
||||
}), testTrustDomain1))
|
||||
|
||||
// set namespace, empty subset
|
||||
require.Equal(t, "api.neighbor.foo."+testTrustDomainSuffix2,
|
||||
TargetSNI(structs.NewDiscoveryTarget("api", "", "neighbor", "default", "foo"), testTrustDomain2))
|
||||
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "api",
|
||||
Namespace: "neighbor",
|
||||
Partition: "default",
|
||||
Datacenter: "foo",
|
||||
}), testTrustDomain2))
|
||||
|
||||
// empty namespace, set subset
|
||||
require.Equal(t, "v2.api.default.foo."+testTrustDomainSuffix1,
|
||||
TargetSNI(structs.NewDiscoveryTarget("api", "v2", "", "default", "foo"), testTrustDomain1))
|
||||
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "api",
|
||||
ServiceSubset: "v2",
|
||||
Partition: "default",
|
||||
Datacenter: "foo",
|
||||
}), testTrustDomain1))
|
||||
|
||||
// set namespace, set subset
|
||||
require.Equal(t, "canary.api.neighbor.foo."+testTrustDomainSuffix2,
|
||||
TargetSNI(structs.NewDiscoveryTarget("api", "canary", "neighbor", "default", "foo"), testTrustDomain2))
|
||||
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "api",
|
||||
ServiceSubset: "canary",
|
||||
Namespace: "neighbor",
|
||||
Partition: "default",
|
||||
Datacenter: "foo",
|
||||
}), testTrustDomain2))
|
||||
}
|
||||
|
|
|
@ -565,6 +565,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
|
|||
return err
|
||||
}
|
||||
|
||||
filter, err := bexpr.CreateFilter(args.Filter, nil, []*structs.ServiceNode{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set reply enterprise metadata after resolving and validating the token so
|
||||
// that we can properly infer metadata from the token.
|
||||
reply.EnterpriseMeta = args.EnterpriseMeta
|
||||
|
@ -574,10 +579,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
|
|||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
var err error
|
||||
var serviceNodes structs.ServiceNodes
|
||||
if len(args.NodeMetaFilters) > 0 {
|
||||
reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
|
||||
reply.Index, serviceNodes, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
|
||||
} else {
|
||||
reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName)
|
||||
reply.Index, serviceNodes, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -588,11 +594,43 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
|
|||
return nil
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(serviceNodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reply.Services = servicesTagsByName(raw.(structs.ServiceNodes))
|
||||
|
||||
c.srv.filterACLWithAuthorizer(authz, reply)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func servicesTagsByName(services []*structs.ServiceNode) structs.Services {
|
||||
unique := make(map[string]map[string]struct{})
|
||||
for _, svc := range services {
|
||||
tags, ok := unique[svc.ServiceName]
|
||||
if !ok {
|
||||
unique[svc.ServiceName] = make(map[string]struct{})
|
||||
tags = unique[svc.ServiceName]
|
||||
}
|
||||
for _, tag := range svc.ServiceTags {
|
||||
tags[tag] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the output structure.
|
||||
var results = make(structs.Services)
|
||||
for service, tags := range unique {
|
||||
results[service] = make([]string, 0, len(tags))
|
||||
for tag := range tags {
|
||||
results[service] = append(results[service], tag)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// ServiceList is used to query the services in a DC.
|
||||
// Returns services as a list of ServiceNames.
|
||||
func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.IndexedServiceList) error {
|
||||
|
|
|
@ -1523,6 +1523,45 @@ func TestCatalog_ListServices_NodeMetaFilter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCatalog_ListServices_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, s1 := testServer(t)
|
||||
codec := rpcClient(t, s1)
|
||||
|
||||
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
|
||||
|
||||
// prep the cluster with some data we can use in our filters
|
||||
registerTestCatalogEntries(t, codec)
|
||||
|
||||
// Run the tests against the test server
|
||||
|
||||
t.Run("ListServices", func(t *testing.T) {
|
||||
args := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
|
||||
args.Filter = "ServiceName == redis"
|
||||
out := new(structs.IndexedServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
|
||||
require.Contains(t, out.Services, "redis")
|
||||
require.ElementsMatch(t, []string{"v1", "v2"}, out.Services["redis"])
|
||||
|
||||
args.Filter = "NodeMeta.os == NoSuchOS"
|
||||
out = new(structs.IndexedServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
|
||||
require.Len(t, out.Services, 0)
|
||||
|
||||
args.Filter = "NodeMeta.NoSuchMetadata == linux"
|
||||
out = new(structs.IndexedServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
|
||||
require.Len(t, out.Services, 0)
|
||||
|
||||
args.Filter = "InvalidField == linux"
|
||||
out = new(structs.IndexedServices)
|
||||
require.Error(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
|
||||
})
|
||||
}
|
||||
|
||||
func TestCatalog_ListServices_Blocking(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
|
|
@ -893,8 +893,8 @@ func TestClient_RPC_Timeout(t *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
// waiter will sleep for 50ms
|
||||
require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 50 * time.Millisecond}))
|
||||
// waiter will sleep for 101ms which is 1ms more than the DefaultQueryTime
|
||||
require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 101 * time.Millisecond}))
|
||||
|
||||
// Requests with QueryOptions have a default timeout of RPCHoldTimeout (10ms)
|
||||
// so we expect the RPC call to timeout.
|
||||
|
@ -903,7 +903,8 @@ func TestClient_RPC_Timeout(t *testing.T) {
|
|||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached")
|
||||
|
||||
// Blocking requests have a longer timeout (100ms) so this should pass
|
||||
// Blocking requests have a longer timeout (100ms) so this should pass since we
|
||||
// add the maximum jitter which should be 16ms
|
||||
out = struct{}{}
|
||||
err = c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{
|
||||
QueryOptions: structs.QueryOptions{
|
||||
|
|
|
@ -56,8 +56,17 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
|
|||
return &resp, nil
|
||||
}
|
||||
|
||||
newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
|
||||
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
|
||||
newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
|
||||
if opts.Namespace == "" {
|
||||
opts.Namespace = "default"
|
||||
}
|
||||
if opts.Partition == "" {
|
||||
opts.Partition = "default"
|
||||
}
|
||||
if opts.Datacenter == "" {
|
||||
opts.Datacenter = "dc1"
|
||||
}
|
||||
t := structs.NewDiscoveryTarget(opts)
|
||||
t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul")
|
||||
t.Name = t.SNI
|
||||
t.ConnectTimeout = 5 * time.Second // default
|
||||
|
@ -119,7 +128,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"),
|
||||
"web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -245,7 +254,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc1": targetWithConnectTimeout(
|
||||
newTarget("web", "", "default", "default", "dc1"),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/configentry"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
|
@ -576,7 +577,10 @@ func (c *compiler) assembleChain() error {
|
|||
if router == nil {
|
||||
// If no router is configured, move on down the line to the next hop of
|
||||
// the chain.
|
||||
node, err := c.getSplitterOrResolverNode(c.newTarget(c.serviceName, "", "", "", ""))
|
||||
node, err := c.getSplitterOrResolverNode(c.newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: c.serviceName,
|
||||
}))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -626,11 +630,20 @@ func (c *compiler) assembleChain() error {
|
|||
)
|
||||
if dest.ServiceSubset == "" {
|
||||
node, err = c.getSplitterOrResolverNode(
|
||||
c.newTarget(svc, "", destNamespace, destPartition, ""),
|
||||
)
|
||||
c.newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: svc,
|
||||
Namespace: destNamespace,
|
||||
Partition: destPartition,
|
||||
},
|
||||
))
|
||||
} else {
|
||||
node, err = c.getResolverNode(
|
||||
c.newTarget(svc, dest.ServiceSubset, destNamespace, destPartition, ""),
|
||||
c.newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: svc,
|
||||
ServiceSubset: dest.ServiceSubset,
|
||||
Namespace: destNamespace,
|
||||
Partition: destPartition,
|
||||
}),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
@ -642,7 +655,12 @@ func (c *compiler) assembleChain() error {
|
|||
|
||||
// If we have a router, we'll add a catch-all route at the end to send
|
||||
// unmatched traffic to the next hop in the chain.
|
||||
defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(router.Name, "", router.NamespaceOrDefault(), router.PartitionOrDefault(), ""))
|
||||
opts := structs.DiscoveryTargetOpts{
|
||||
Service: router.Name,
|
||||
Namespace: router.NamespaceOrDefault(),
|
||||
Partition: router.PartitionOrDefault(),
|
||||
}
|
||||
defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(opts))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -674,26 +692,36 @@ func newDefaultServiceRoute(serviceName, namespace, partition string) *structs.S
|
|||
}
|
||||
}
|
||||
|
||||
func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
|
||||
if service == "" {
|
||||
func (c *compiler) newTarget(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
|
||||
if opts.Service == "" {
|
||||
panic("newTarget called with empty service which makes no sense")
|
||||
}
|
||||
|
||||
t := structs.NewDiscoveryTarget(
|
||||
service,
|
||||
serviceSubset,
|
||||
defaultIfEmpty(namespace, c.evaluateInNamespace),
|
||||
defaultIfEmpty(partition, c.evaluateInPartition),
|
||||
defaultIfEmpty(datacenter, c.evaluateInDatacenter),
|
||||
)
|
||||
if opts.Peer == "" {
|
||||
opts.Datacenter = defaultIfEmpty(opts.Datacenter, c.evaluateInDatacenter)
|
||||
opts.Namespace = defaultIfEmpty(opts.Namespace, c.evaluateInNamespace)
|
||||
opts.Partition = defaultIfEmpty(opts.Partition, c.evaluateInPartition)
|
||||
} else {
|
||||
// Don't allow Peer and Datacenter.
|
||||
opts.Datacenter = ""
|
||||
// Peer and Partition cannot both be set.
|
||||
opts.Partition = acl.PartitionOrDefault("")
|
||||
// Default to "default" rather than c.evaluateInNamespace.
|
||||
opts.Namespace = acl.PartitionOrDefault(opts.Namespace)
|
||||
}
|
||||
|
||||
// Set default connect SNI. This will be overridden later if the service
|
||||
// has an explicit SNI value configured in service-defaults.
|
||||
t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain)
|
||||
t := structs.NewDiscoveryTarget(opts)
|
||||
|
||||
// Use the same representation for the name. This will NOT be overridden
|
||||
// later.
|
||||
t.Name = t.SNI
|
||||
// We don't have the peer's trust domain yet so we can't construct the SNI.
|
||||
if opts.Peer == "" {
|
||||
// Set default connect SNI. This will be overridden later if the service
|
||||
// has an explicit SNI value configured in service-defaults.
|
||||
t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain)
|
||||
|
||||
// Use the same representation for the name. This will NOT be overridden
|
||||
// later.
|
||||
t.Name = t.SNI
|
||||
}
|
||||
|
||||
prev, ok := c.loadedTargets[t.ID]
|
||||
if ok {
|
||||
|
@ -703,34 +731,30 @@ func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datac
|
|||
return t
|
||||
}
|
||||
|
||||
func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, service, serviceSubset, partition, namespace, datacenter string) *structs.DiscoveryTarget {
|
||||
var (
|
||||
service2 = t.Service
|
||||
serviceSubset2 = t.ServiceSubset
|
||||
partition2 = t.Partition
|
||||
namespace2 = t.Namespace
|
||||
datacenter2 = t.Datacenter
|
||||
)
|
||||
func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
|
||||
mergedOpts := t.ToDiscoveryTargetOpts()
|
||||
|
||||
if service != "" && service != service2 {
|
||||
service2 = service
|
||||
if opts.Service != "" && opts.Service != mergedOpts.Service {
|
||||
mergedOpts.Service = opts.Service
|
||||
// Reset the chosen subset if we reference a service other than our own.
|
||||
serviceSubset2 = ""
|
||||
mergedOpts.ServiceSubset = ""
|
||||
}
|
||||
if serviceSubset != "" {
|
||||
serviceSubset2 = serviceSubset
|
||||
if opts.ServiceSubset != "" {
|
||||
mergedOpts.ServiceSubset = opts.ServiceSubset
|
||||
}
|
||||
if partition != "" {
|
||||
partition2 = partition
|
||||
if opts.Partition != "" {
|
||||
mergedOpts.Partition = opts.Partition
|
||||
}
|
||||
if namespace != "" {
|
||||
namespace2 = namespace
|
||||
// Only use explicit Namespace with Peer
|
||||
if opts.Namespace != "" || opts.Peer != "" {
|
||||
mergedOpts.Namespace = opts.Namespace
|
||||
}
|
||||
if datacenter != "" {
|
||||
datacenter2 = datacenter
|
||||
if opts.Datacenter != "" {
|
||||
mergedOpts.Datacenter = opts.Datacenter
|
||||
}
|
||||
mergedOpts.Peer = opts.Peer
|
||||
|
||||
return c.newTarget(service2, serviceSubset2, namespace2, partition2, datacenter2)
|
||||
return c.newTarget(mergedOpts)
|
||||
}
|
||||
|
||||
func (c *compiler) getSplitterOrResolverNode(target *structs.DiscoveryTarget) (*structs.DiscoveryGraphNode, error) {
|
||||
|
@ -803,10 +827,13 @@ func (c *compiler) getSplitterNode(sid structs.ServiceID) (*structs.DiscoveryGra
|
|||
// fall through to group-resolver
|
||||
}
|
||||
|
||||
node, err := c.getResolverNode(
|
||||
c.newTarget(splitID.ID, split.ServiceSubset, splitID.NamespaceOrDefault(), splitID.PartitionOrDefault(), ""),
|
||||
false,
|
||||
)
|
||||
opts := structs.DiscoveryTargetOpts{
|
||||
Service: splitID.ID,
|
||||
ServiceSubset: split.ServiceSubset,
|
||||
Namespace: splitID.NamespaceOrDefault(),
|
||||
Partition: splitID.PartitionOrDefault(),
|
||||
}
|
||||
node, err := c.getResolverNode(c.newTarget(opts), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -881,11 +908,7 @@ RESOLVE_AGAIN:
|
|||
|
||||
redirectedTarget := c.rewriteTarget(
|
||||
target,
|
||||
redirect.Service,
|
||||
redirect.ServiceSubset,
|
||||
redirect.Partition,
|
||||
redirect.Namespace,
|
||||
redirect.Datacenter,
|
||||
redirect.ToDiscoveryTargetOpts(),
|
||||
)
|
||||
if redirectedTarget.ID != target.ID {
|
||||
target = redirectedTarget
|
||||
|
@ -895,14 +918,9 @@ RESOLVE_AGAIN:
|
|||
|
||||
// Handle default subset.
|
||||
if target.ServiceSubset == "" && resolver.DefaultSubset != "" {
|
||||
target = c.rewriteTarget(
|
||||
target,
|
||||
"",
|
||||
resolver.DefaultSubset,
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
)
|
||||
target = c.rewriteTarget(target, structs.DiscoveryTargetOpts{
|
||||
ServiceSubset: resolver.DefaultSubset,
|
||||
})
|
||||
goto RESOLVE_AGAIN
|
||||
}
|
||||
|
||||
|
@ -1027,56 +1045,54 @@ RESOLVE_AGAIN:
|
|||
failover, ok = f["*"]
|
||||
}
|
||||
|
||||
if ok {
|
||||
// Determine which failover definitions apply.
|
||||
var failoverTargets []*structs.DiscoveryTarget
|
||||
if len(failover.Datacenters) > 0 {
|
||||
for _, dc := range failover.Datacenters {
|
||||
// Rewrite the target as per the failover policy.
|
||||
failoverTarget := c.rewriteTarget(
|
||||
target,
|
||||
failover.Service,
|
||||
failover.ServiceSubset,
|
||||
target.Partition,
|
||||
failover.Namespace,
|
||||
dc,
|
||||
)
|
||||
if failoverTarget.ID != target.ID { // don't failover to yourself
|
||||
failoverTargets = append(failoverTargets, failoverTarget)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !ok {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// Determine which failover definitions apply.
|
||||
var failoverTargets []*structs.DiscoveryTarget
|
||||
if len(failover.Datacenters) > 0 {
|
||||
opts := failover.ToDiscoveryTargetOpts()
|
||||
for _, dc := range failover.Datacenters {
|
||||
// Rewrite the target as per the failover policy.
|
||||
failoverTarget := c.rewriteTarget(
|
||||
target,
|
||||
failover.Service,
|
||||
failover.ServiceSubset,
|
||||
target.Partition,
|
||||
failover.Namespace,
|
||||
"",
|
||||
)
|
||||
opts.Datacenter = dc
|
||||
failoverTarget := c.rewriteTarget(target, opts)
|
||||
if failoverTarget.ID != target.ID { // don't failover to yourself
|
||||
failoverTargets = append(failoverTargets, failoverTarget)
|
||||
}
|
||||
}
|
||||
|
||||
// If we filtered everything out then no point in having a failover.
|
||||
if len(failoverTargets) > 0 {
|
||||
df := &structs.DiscoveryFailover{}
|
||||
node.Resolver.Failover = df
|
||||
|
||||
// Take care of doing any redirects or configuration loading
|
||||
// related to targets by cheating a bit and recursing into
|
||||
// ourselves.
|
||||
for _, target := range failoverTargets {
|
||||
failoverResolveNode, err := c.getResolverNode(target, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
failoverTarget := failoverResolveNode.Resolver.Target
|
||||
df.Targets = append(df.Targets, failoverTarget)
|
||||
} else if len(failover.Targets) > 0 {
|
||||
for _, t := range failover.Targets {
|
||||
// Rewrite the target as per the failover policy.
|
||||
failoverTarget := c.rewriteTarget(target, t.ToDiscoveryTargetOpts())
|
||||
if failoverTarget.ID != target.ID { // don't failover to yourself
|
||||
failoverTargets = append(failoverTargets, failoverTarget)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Rewrite the target as per the failover policy.
|
||||
failoverTarget := c.rewriteTarget(target, failover.ToDiscoveryTargetOpts())
|
||||
if failoverTarget.ID != target.ID { // don't failover to yourself
|
||||
failoverTargets = append(failoverTargets, failoverTarget)
|
||||
}
|
||||
}
|
||||
|
||||
// If we filtered everything out then no point in having a failover.
|
||||
if len(failoverTargets) > 0 {
|
||||
df := &structs.DiscoveryFailover{}
|
||||
node.Resolver.Failover = df
|
||||
|
||||
// Take care of doing any redirects or configuration loading
|
||||
// related to targets by cheating a bit and recursing into
|
||||
// ourselves.
|
||||
for _, target := range failoverTargets {
|
||||
failoverResolveNode, err := c.getResolverNode(target, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
failoverTarget := failoverResolveNode.Resolver.Target
|
||||
df.Targets = append(df.Targets, failoverTarget)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ func TestCompile(t *testing.T) {
|
|||
"service and subset failover": testcase_ServiceAndSubsetFailover(),
|
||||
"datacenter failover": testcase_DatacenterFailover(),
|
||||
"datacenter failover with mesh gateways": testcase_DatacenterFailover_WithMeshGateways(),
|
||||
"target failover": testcase_Failover_Targets(),
|
||||
"noop split to resolver with default subset": testcase_NoopSplit_WithDefaultSubset(),
|
||||
"resolver with default subset": testcase_Resolve_WithDefaultSubset(),
|
||||
"default resolver with external sni": testcase_DefaultResolver_ExternalSNI(),
|
||||
|
@ -182,7 +183,7 @@ func testcase_JustRouterWithDefaults() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -244,7 +245,7 @@ func testcase_JustRouterWithNoDestination() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -294,7 +295,7 @@ func testcase_RouterWithDefaults_NoSplit_WithResolver() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": targetWithConnectTimeout(
|
||||
newTarget("main", "", "default", "default", "dc1", nil),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
@ -361,7 +362,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_DefaultResolver() compileTestCase
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -426,7 +427,10 @@ func testcase_NoopSplit_DefaultResolver_ProtocolFromProxyDefaults() compileTestC
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc1",
|
||||
}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -498,7 +502,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_WithResolver() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": targetWithConnectTimeout(
|
||||
newTarget("main", "", "default", "default", "dc1", nil),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
@ -584,8 +588,11 @@ func testcase_RouteBypassesSplit() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"bypass.other.default.default.dc1": newTarget("other", "bypass", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"bypass.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "other",
|
||||
ServiceSubset: "bypass",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == bypass",
|
||||
}
|
||||
|
@ -638,7 +645,7 @@ func testcase_NoopSplit_DefaultResolver() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -694,7 +701,7 @@ func testcase_NoopSplit_WithResolver() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": targetWithConnectTimeout(
|
||||
newTarget("main", "", "default", "default", "dc1", nil),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
@ -776,12 +783,19 @@ func testcase_SubsetSplit() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
|
||||
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 2",
|
||||
}
|
||||
}),
|
||||
"v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v1",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 1",
|
||||
}
|
||||
|
@ -855,8 +869,8 @@ func testcase_ServiceSplit() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil),
|
||||
"bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil),
|
||||
"foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil),
|
||||
"bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -935,7 +949,10 @@ func testcase_SplitBypassesSplit() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"bypassed.next.default.default.dc1": newTarget("next", "bypassed", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"bypassed.next.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "next",
|
||||
ServiceSubset: "bypassed",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == bypass",
|
||||
}
|
||||
|
@ -973,7 +990,7 @@ func testcase_ServiceRedirect() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
|
||||
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1019,7 +1036,10 @@ func testcase_ServiceAndSubsetRedirect() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"v2.other.default.default.dc1": newTarget("other", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v2.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "other",
|
||||
ServiceSubset: "v2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 2",
|
||||
}
|
||||
|
@ -1055,7 +1075,10 @@ func testcase_DatacenterRedirect() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", nil),
|
||||
"main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc9",
|
||||
}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1095,7 +1118,10 @@ func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc9",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
|
@ -1134,8 +1160,8 @@ func testcase_ServiceFailover() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1177,8 +1203,8 @@ func testcase_ServiceFailoverThroughRedirect() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"actual.default.default.dc1": newTarget("actual", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"actual.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "actual"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1220,8 +1246,8 @@ func testcase_Resolver_CircularFailover() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1261,8 +1287,11 @@ func testcase_ServiceAndSubsetFailover() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"backup.main.default.default.dc1": newTarget("main", "backup", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"backup.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "backup",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == backup",
|
||||
}
|
||||
|
@ -1301,9 +1330,15 @@ func testcase_DatacenterFailover() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", nil),
|
||||
"main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc2",
|
||||
}, nil),
|
||||
"main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc4",
|
||||
}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1350,17 +1385,105 @@ func testcase_DatacenterFailover_WithMeshGateways() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
"main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
"main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc4",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
}
|
||||
|
||||
func testcase_Failover_Targets() compileTestCase {
|
||||
entries := newEntries()
|
||||
|
||||
entries.AddProxyDefaults(&structs.ProxyConfigEntry{
|
||||
Kind: structs.ProxyDefaults,
|
||||
Name: structs.ProxyConfigGlobal,
|
||||
MeshGateway: structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
},
|
||||
})
|
||||
|
||||
entries.AddResolvers(
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: "service-resolver",
|
||||
Name: "main",
|
||||
Failover: map[string]structs.ServiceResolverFailover{
|
||||
"*": {
|
||||
Targets: []structs.ServiceResolverFailoverTarget{
|
||||
{Datacenter: "dc3"},
|
||||
{Service: "new-main"},
|
||||
{Peer: "cluster-01"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
expect := &structs.CompiledDiscoveryChain{
|
||||
Protocol: "tcp",
|
||||
StartNode: "resolver:main.default.default.dc1",
|
||||
Nodes: map[string]*structs.DiscoveryGraphNode{
|
||||
"resolver:main.default.default.dc1": {
|
||||
Type: structs.DiscoveryGraphNodeTypeResolver,
|
||||
Name: "main.default.default.dc1",
|
||||
Resolver: &structs.DiscoveryResolver{
|
||||
ConnectTimeout: 5 * time.Second,
|
||||
Target: "main.default.default.dc1",
|
||||
Failover: &structs.DiscoveryFailover{
|
||||
Targets: []string{
|
||||
"main.default.default.dc3",
|
||||
"new-main.default.default.dc1",
|
||||
"main.default.default.external.cluster-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
"main.default.default.dc3": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc3",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
"new-main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "new-main"}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
"main.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Peer: "cluster-01",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.SNI = ""
|
||||
t.Name = ""
|
||||
t.Datacenter = ""
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
|
@ -1422,7 +1545,10 @@ func testcase_NoopSplit_WithDefaultSubset() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 2",
|
||||
}
|
||||
|
@ -1452,7 +1578,7 @@ func testcase_DefaultResolver() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
// TODO-TARGET
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1488,7 +1614,7 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
|
@ -1530,7 +1656,7 @@ func testcase_ServiceMetaProjection() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1588,7 +1714,7 @@ func testcase_ServiceMetaProjectionWithRedirect() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
|
||||
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1623,7 +1749,7 @@ func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
|
||||
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1658,7 +1784,10 @@ func testcase_Resolve_WithDefaultSubset() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 2",
|
||||
}
|
||||
|
@ -1692,7 +1821,7 @@ func testcase_DefaultResolver_ExternalSNI() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
|
||||
t.SNI = "main.some.other.service.mesh"
|
||||
t.External = true
|
||||
}),
|
||||
|
@ -1857,11 +1986,17 @@ func testcase_MultiDatacenterCanary() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc2": targetWithConnectTimeout(
|
||||
newTarget("main", "", "default", "default", "dc2", nil),
|
||||
newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc2",
|
||||
}, nil),
|
||||
33*time.Second,
|
||||
),
|
||||
"main.default.default.dc3": targetWithConnectTimeout(
|
||||
newTarget("main", "", "default", "default", "dc3", nil),
|
||||
newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc3",
|
||||
}, nil),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
@ -2155,27 +2290,42 @@ func testcase_AllBellsAndWhistles() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"prod.redirected.default.default.dc1": newTarget("redirected", "prod", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"prod.redirected.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "redirected",
|
||||
ServiceSubset: "prod",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "ServiceMeta.env == prod",
|
||||
}
|
||||
}),
|
||||
"v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v1",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 1",
|
||||
}
|
||||
}),
|
||||
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 2",
|
||||
}
|
||||
}),
|
||||
"v3.main.default.default.dc1": newTarget("main", "v3", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v3.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v3",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 3",
|
||||
}
|
||||
}),
|
||||
"default-subset.main.default.default.dc1": newTarget("main", "default-subset", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"default-subset.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "default-subset",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{OnlyPassing: true}
|
||||
}),
|
||||
},
|
||||
|
@ -2379,7 +2529,7 @@ func testcase_ResolverProtocolOverride() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
// TODO-TARGET
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect,
|
||||
|
@ -2413,7 +2563,7 @@ func testcase_ResolverProtocolOverrideIgnored() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
// TODO-TARGET
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect,
|
||||
|
@ -2451,7 +2601,7 @@ func testcase_RouterIgnored_ResolverProtocolOverride() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
// TODO-TARGET
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect,
|
||||
|
@ -2685,9 +2835,9 @@ func testcase_LBSplitterAndResolver() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil),
|
||||
"bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil),
|
||||
"baz.default.default.dc1": newTarget("baz", "", "default", "default", "dc1", nil),
|
||||
"foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil),
|
||||
"bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil),
|
||||
"baz.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "baz"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -2743,7 +2893,7 @@ func testcase_LBResolver() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -2791,8 +2941,17 @@ func newEntries() *configentry.DiscoveryChainSet {
|
|||
}
|
||||
}
|
||||
|
||||
func newTarget(service, serviceSubset, namespace, partition, datacenter string, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget {
|
||||
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
|
||||
func newTarget(opts structs.DiscoveryTargetOpts, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget {
|
||||
if opts.Namespace == "" {
|
||||
opts.Namespace = "default"
|
||||
}
|
||||
if opts.Partition == "" {
|
||||
opts.Partition = "default"
|
||||
}
|
||||
if opts.Datacenter == "" {
|
||||
opts.Datacenter = "dc1"
|
||||
}
|
||||
t := structs.NewDiscoveryTarget(opts)
|
||||
t.SNI = connect.TargetSNI(t, "trustdomain.consul")
|
||||
t.Name = t.SNI
|
||||
t.ConnectTimeout = 5 * time.Second // default
|
||||
|
|
|
@ -31,11 +31,18 @@ import (
|
|||
)
|
||||
|
||||
var leaderExportedServicesCountKey = []string{"consul", "peering", "exported_services"}
|
||||
var leaderHealthyPeeringKey = []string{"consul", "peering", "healthy"}
|
||||
var LeaderPeeringMetrics = []prometheus.GaugeDefinition{
|
||||
{
|
||||
Name: leaderExportedServicesCountKey,
|
||||
Help: "A gauge that tracks how many services are exported for the peering. " +
|
||||
"The labels are \"peering\" and, for enterprise, \"partition\". " +
|
||||
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
|
||||
"We emit this metric every 9 seconds",
|
||||
},
|
||||
{
|
||||
Name: leaderHealthyPeeringKey,
|
||||
Help: "A gauge that tracks how if a peering is healthy (1) or not (0). " +
|
||||
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
|
||||
"We emit this metric every 9 seconds",
|
||||
},
|
||||
}
|
||||
|
@ -85,13 +92,6 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
|
|||
}
|
||||
|
||||
for _, peer := range peers {
|
||||
status, found := s.peerStreamServer.StreamStatus(peer.ID)
|
||||
if !found {
|
||||
logger.Trace("did not find status for", "peer_name", peer.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
esc := status.GetExportedServicesCount()
|
||||
part := peer.Partition
|
||||
labels := []metrics.Label{
|
||||
{Name: "peer_name", Value: peer.Name},
|
||||
|
@ -101,7 +101,25 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
|
|||
labels = append(labels, metrics.Label{Name: "partition", Value: part})
|
||||
}
|
||||
|
||||
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
|
||||
status, found := s.peerStreamServer.StreamStatus(peer.ID)
|
||||
if found {
|
||||
// exported services count metric
|
||||
esc := status.GetExportedServicesCount()
|
||||
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
|
||||
}
|
||||
|
||||
// peering health metric
|
||||
if status.NeverConnected {
|
||||
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
|
||||
} else {
|
||||
healthy := status.IsHealthy()
|
||||
healthyInt := 0
|
||||
if healthy {
|
||||
healthyInt = 1
|
||||
}
|
||||
|
||||
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(healthyInt), labels)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -277,13 +295,6 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
|
|||
return fmt.Errorf("failed to build TLS dial option from peering: %w", err)
|
||||
}
|
||||
|
||||
// Create a ring buffer to cycle through peer addresses in the retry loop below.
|
||||
buffer := ring.New(len(peer.PeerServerAddresses))
|
||||
for _, addr := range peer.PeerServerAddresses {
|
||||
buffer.Value = addr
|
||||
buffer = buffer.Next()
|
||||
}
|
||||
|
||||
secret, err := s.fsm.State().PeeringSecretsRead(ws, peer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read secret for peering: %w", err)
|
||||
|
@ -294,27 +305,26 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
|
|||
|
||||
logger.Trace("establishing stream to peer")
|
||||
|
||||
retryCtx, cancel := context.WithCancel(ctx)
|
||||
cancelFns[peer.ID] = cancel
|
||||
|
||||
streamStatus, err := s.peerStreamTracker.Register(peer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register stream: %v", err)
|
||||
}
|
||||
|
||||
streamCtx, cancel := context.WithCancel(ctx)
|
||||
cancelFns[peer.ID] = cancel
|
||||
|
||||
// Start a goroutine to watch for updates to peer server addresses.
|
||||
// The latest valid server address can be received from nextServerAddr.
|
||||
nextServerAddr := make(chan string)
|
||||
go s.watchPeerServerAddrs(streamCtx, peer, nextServerAddr)
|
||||
|
||||
// Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes.
|
||||
go retryLoopBackoffPeering(retryCtx, logger, func() error {
|
||||
go retryLoopBackoffPeering(streamCtx, logger, func() error {
|
||||
// Try a new address on each iteration by advancing the ring buffer on errors.
|
||||
defer func() {
|
||||
buffer = buffer.Next()
|
||||
}()
|
||||
addr, ok := buffer.Value.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("peer server address type %T is not a string", buffer.Value)
|
||||
}
|
||||
addr := <-nextServerAddr
|
||||
|
||||
logger.Trace("dialing peer", "addr", addr)
|
||||
conn, err := grpc.DialContext(retryCtx, addr,
|
||||
conn, err := grpc.DialContext(streamCtx, addr,
|
||||
// TODO(peering): use a grpc.WithStatsHandler here?)
|
||||
tlsOption,
|
||||
// For keep alive parameters there is a larger comment in ClientConnPool.dial about that.
|
||||
|
@ -331,7 +341,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
|
|||
defer conn.Close()
|
||||
|
||||
client := pbpeerstream.NewPeerStreamServiceClient(conn)
|
||||
stream, err := client.StreamResources(retryCtx)
|
||||
stream, err := client.StreamResources(streamCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -379,6 +389,74 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
|
|||
return nil
|
||||
}
|
||||
|
||||
// watchPeerServerAddrs sends an up-to-date peer server address to nextServerAddr.
|
||||
// It loads the server addresses into a ring buffer and cycles through them until:
|
||||
// 1. streamCtx is cancelled (peer is deleted)
|
||||
// 2. the peer is modified and the watchset fires.
|
||||
//
|
||||
// In case (2) we refetch the peering and rebuild the ring buffer.
|
||||
func (s *Server) watchPeerServerAddrs(ctx context.Context, peer *pbpeering.Peering, nextServerAddr chan<- string) {
|
||||
defer close(nextServerAddr)
|
||||
|
||||
// we initialize the ring buffer with the peer passed to `establishStream`
|
||||
// because the caller has pre-checked `peer.ShouldDial`, guaranteeing
|
||||
// at least one server address.
|
||||
//
|
||||
// IMPORTANT: ringbuf must always be length > 0 or else `<-nextServerAddr` may block.
|
||||
ringbuf := ring.New(len(peer.PeerServerAddresses))
|
||||
for _, addr := range peer.PeerServerAddresses {
|
||||
ringbuf.Value = addr
|
||||
ringbuf = ringbuf.Next()
|
||||
}
|
||||
innerWs := memdb.NewWatchSet()
|
||||
_, _, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
|
||||
if err != nil {
|
||||
s.logger.Warn("failed to watch for changes to peer; server addresses may become stale over time.",
|
||||
"peer_id", peer.ID,
|
||||
"error", err)
|
||||
}
|
||||
|
||||
fetchAddrs := func() error {
|
||||
// reinstantiate innerWs to prevent it from growing indefinitely
|
||||
innerWs = memdb.NewWatchSet()
|
||||
_, peering, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch peer %q: %w", peer.ID, err)
|
||||
}
|
||||
if !peering.IsActive() {
|
||||
return fmt.Errorf("peer %q is no longer active", peer.ID)
|
||||
}
|
||||
if len(peering.PeerServerAddresses) == 0 {
|
||||
return fmt.Errorf("peer %q has no addresses to dial", peer.ID)
|
||||
}
|
||||
|
||||
ringbuf = ring.New(len(peering.PeerServerAddresses))
|
||||
for _, addr := range peering.PeerServerAddresses {
|
||||
ringbuf.Value = addr
|
||||
ringbuf = ringbuf.Next()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case nextServerAddr <- ringbuf.Value.(string):
|
||||
ringbuf = ringbuf.Next()
|
||||
case err := <-innerWs.WatchCh(ctx):
|
||||
if err != nil {
|
||||
// context was cancelled
|
||||
return
|
||||
}
|
||||
// watch fired so we refetch the peering and rebuild the ring buffer
|
||||
if err := fetchAddrs(); err != nil {
|
||||
s.logger.Warn("watchset for peer was fired but failed to update server addresses",
|
||||
"peer_id", peer.ID,
|
||||
"error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
|
||||
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
|
||||
}
|
||||
|
@ -391,6 +469,12 @@ func (s *Server) runPeeringDeletions(ctx context.Context) error {
|
|||
// process. This includes deletion of the peerings themselves in addition to any peering data
|
||||
raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate))
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
state := s.fsm.State()
|
||||
_, peerings, err := s.fsm.State().PeeringListDeleted(ws)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -17,6 +18,7 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
grpcstatus "google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
|
@ -24,6 +26,7 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -974,6 +977,7 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
|
|||
var (
|
||||
s2PeerID1 = generateUUID()
|
||||
s2PeerID2 = generateUUID()
|
||||
s2PeerID3 = generateUUID()
|
||||
testContextTimeout = 60 * time.Second
|
||||
lastIdx = uint64(0)
|
||||
)
|
||||
|
@ -1063,6 +1067,24 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
|
|||
// mimic tracking exported services
|
||||
mst2.TrackExportedService(structs.ServiceName{Name: "d-service"})
|
||||
mst2.TrackExportedService(structs.ServiceName{Name: "e-service"})
|
||||
|
||||
// pretend that the hearbeat happened
|
||||
mst2.TrackRecvHeartbeat()
|
||||
}
|
||||
|
||||
// Simulate a peering that never connects
|
||||
{
|
||||
p3 := &pbpeering.Peering{
|
||||
ID: s2PeerID3,
|
||||
Name: "my-peer-s4",
|
||||
PeerID: token.PeerID, // doesn't much matter what these values are
|
||||
PeerCAPems: token.CA,
|
||||
PeerServerName: token.ServerName,
|
||||
PeerServerAddresses: token.ServerAddresses,
|
||||
}
|
||||
require.True(t, p3.ShouldDial())
|
||||
lastIdx++
|
||||
require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: p3}))
|
||||
}
|
||||
|
||||
// set up a metrics sink
|
||||
|
@ -1092,6 +1114,18 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
|
|||
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric2))
|
||||
|
||||
require.Equal(r, float32(2), metric2.Value) // for d, e services
|
||||
|
||||
keyHealthyMetric2 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s3;peer_id=%s", s2PeerID2)
|
||||
healthyMetric2, ok := intv.Gauges[keyHealthyMetric2]
|
||||
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric2))
|
||||
|
||||
require.Equal(r, float32(1), healthyMetric2.Value)
|
||||
|
||||
keyHealthyMetric3 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s4;peer_id=%s", s2PeerID3)
|
||||
healthyMetric3, ok := intv.Gauges[keyHealthyMetric3]
|
||||
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric3))
|
||||
|
||||
require.True(r, math.IsNaN(float64(healthyMetric3.Value)))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1343,3 +1377,138 @@ func Test_isFailedPreconditionErr(t *testing.T) {
|
|||
werr := fmt.Errorf("wrapped: %w", err)
|
||||
assert.True(t, isFailedPreconditionErr(werr))
|
||||
}
|
||||
|
||||
func Test_Leader_PeeringSync_ServerAddressUpdates(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
// We want 1s retries for this test
|
||||
orig := maxRetryBackoff
|
||||
maxRetryBackoff = 1
|
||||
t.Cleanup(func() { maxRetryBackoff = orig })
|
||||
|
||||
_, acceptor := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "acceptor"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
})
|
||||
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
|
||||
|
||||
// Create a peering by generating a token
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
conn, err := grpc.DialContext(ctx, acceptor.config.RPCAddr.String(),
|
||||
grpc.WithContextDialer(newServerDialer(acceptor.config.RPCAddr.String())),
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithBlock())
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
acceptorClient := pbpeering.NewPeeringServiceClient(conn)
|
||||
|
||||
req := pbpeering.GenerateTokenRequest{
|
||||
PeerName: "my-peer-dialer",
|
||||
}
|
||||
resp, err := acceptorClient.GenerateToken(ctx, &req)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Bring up dialer and establish a peering with acceptor's token so that it attempts to dial.
|
||||
_, dialer := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "dialer"
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc2"
|
||||
})
|
||||
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
|
||||
|
||||
// Create a peering at dialer by establishing a peering with acceptor's token
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
conn, err = grpc.DialContext(ctx, dialer.config.RPCAddr.String(),
|
||||
grpc.WithContextDialer(newServerDialer(dialer.config.RPCAddr.String())),
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithBlock())
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
dialerClient := pbpeering.NewPeeringServiceClient(conn)
|
||||
|
||||
establishReq := pbpeering.EstablishRequest{
|
||||
PeerName: "my-peer-acceptor",
|
||||
PeeringToken: resp.PeeringToken,
|
||||
}
|
||||
_, err = dialerClient.Establish(ctx, &establishReq)
|
||||
require.NoError(t, err)
|
||||
|
||||
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
|
||||
require.NoError(t, err)
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
|
||||
require.True(r, found)
|
||||
require.True(r, status.Connected)
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "calling establish with active connection does not overwrite server addresses", func(t *testing.T) {
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
// generate a new token from the acceptor
|
||||
req := pbpeering.GenerateTokenRequest{
|
||||
PeerName: "my-peer-dialer",
|
||||
}
|
||||
resp, err := acceptorClient.GenerateToken(ctx, &req)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := acceptor.peeringBackend.DecodeToken([]byte(resp.PeeringToken))
|
||||
require.NoError(t, err)
|
||||
|
||||
// we will update the token with bad addresses to assert it doesn't clobber existing ones
|
||||
token.ServerAddresses = []string{"1.2.3.4:1234"}
|
||||
|
||||
badToken, err := acceptor.peeringBackend.EncodeToken(token)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
// Try establishing.
|
||||
// This call will only succeed if the bad address was not used in the calls to exchange the peering secret.
|
||||
establishReq := pbpeering.EstablishRequest{
|
||||
PeerName: "my-peer-acceptor",
|
||||
PeeringToken: string(badToken),
|
||||
}
|
||||
_, err = dialerClient.Establish(ctx, &establishReq)
|
||||
require.NoError(t, err)
|
||||
|
||||
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
|
||||
require.NoError(t, err)
|
||||
require.NotContains(t, p.Peering.PeerServerAddresses, "1.2.3.4:1234")
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "updated server addresses are picked up by the leader", func(t *testing.T) {
|
||||
// force close the acceptor's gRPC server so the dialier retries with a new address.
|
||||
acceptor.externalGRPCServer.Stop()
|
||||
|
||||
clone := proto.Clone(p.Peering)
|
||||
updated := clone.(*pbpeering.Peering)
|
||||
// start with a bad address so we can assert for a specific error
|
||||
updated.PeerServerAddresses = append([]string{
|
||||
"bad",
|
||||
}, p.Peering.PeerServerAddresses...)
|
||||
|
||||
// this write will wake up the watch on the leader to refetch server addresses
|
||||
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: updated}))
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
|
||||
require.True(r, found)
|
||||
// We assert for this error to be set which would indicate that we iterated
|
||||
// through a bad address.
|
||||
require.Contains(r, status.LastSendErrorMessage, "transport: Error while dialing dial tcp: address bad: missing port in address")
|
||||
require.False(r, status.Connected)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -159,6 +159,13 @@ func computeResolvedServiceConfig(
|
|||
thisReply.Destination = *serviceConf.Destination
|
||||
}
|
||||
|
||||
if serviceConf.MaxInboundConnections > 0 {
|
||||
if thisReply.ProxyConfig == nil {
|
||||
thisReply.ProxyConfig = map[string]interface{}{}
|
||||
}
|
||||
thisReply.ProxyConfig["max_inbound_connections"] = serviceConf.MaxInboundConnections
|
||||
}
|
||||
|
||||
thisReply.Meta = serviceConf.Meta
|
||||
}
|
||||
|
||||
|
|
|
@ -3,12 +3,60 @@ package consul
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/agent/configentry"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/mitchellh/copystructure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_ComputeResolvedServiceConfig(t *testing.T) {
|
||||
type args struct {
|
||||
scReq *structs.ServiceConfigRequest
|
||||
upstreamIDs []structs.ServiceID
|
||||
entries *configentry.ResolvedServiceConfigSet
|
||||
}
|
||||
|
||||
sid := structs.ServiceID{
|
||||
ID: "sid",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *structs.ServiceConfigResponse
|
||||
}{
|
||||
{
|
||||
name: "proxy with maxinboundsconnections",
|
||||
args: args{
|
||||
scReq: &structs.ServiceConfigRequest{
|
||||
Name: "sid",
|
||||
},
|
||||
entries: &configentry.ResolvedServiceConfigSet{
|
||||
ServiceDefaults: map[structs.ServiceID]*structs.ServiceConfigEntry{
|
||||
sid: {
|
||||
MaxInboundConnections: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{
|
||||
"max_inbound_connections": 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := computeResolvedServiceConfig(tt.args.scReq, tt.args.upstreamIDs,
|
||||
false, tt.args.entries, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MergeServiceConfig_TransparentProxy(t *testing.T) {
|
||||
type args struct {
|
||||
defaults *structs.ServiceConfigResponse
|
||||
|
@ -153,6 +201,12 @@ func Test_MergeServiceConfig_UpstreamOverrides(t *testing.T) {
|
|||
DestinationNamespace: "default",
|
||||
DestinationPartition: "default",
|
||||
DestinationName: "zap",
|
||||
Config: map[string]interface{}{
|
||||
"passive_health_check": map[string]interface{}{
|
||||
"Interval": int64(20),
|
||||
"MaxFailures": int64(4),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -171,8 +225,8 @@ func Test_MergeServiceConfig_UpstreamOverrides(t *testing.T) {
|
|||
DestinationName: "zap",
|
||||
Config: map[string]interface{}{
|
||||
"passive_health_check": map[string]interface{}{
|
||||
"Interval": int64(10),
|
||||
"MaxFailures": int64(2),
|
||||
"Interval": int64(20),
|
||||
"MaxFailures": int64(4),
|
||||
},
|
||||
"protocol": "grpc",
|
||||
},
|
||||
|
|
|
@ -742,6 +742,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
|
|||
return s.ForwardGRPC(s.grpcConnPool, info, fn)
|
||||
},
|
||||
})
|
||||
s.peerStreamTracker.SetHeartbeatTimeout(s.peerStreamServer.Config.IncomingHeartbeatTimeout)
|
||||
s.peerStreamServer.Register(s.externalGRPCServer)
|
||||
|
||||
// Initialize internal gRPC server.
|
||||
|
|
|
@ -1134,7 +1134,7 @@ func terminatingGatewayVirtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool,
|
|||
}
|
||||
|
||||
// Services returns all services along with a list of associated tags.
|
||||
func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) {
|
||||
func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, []*structs.ServiceNode, error) {
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
|
@ -1148,30 +1148,11 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerNam
|
|||
}
|
||||
ws.Add(services.WatchCh())
|
||||
|
||||
// Rip through the services and enumerate them and their unique set of
|
||||
// tags.
|
||||
unique := make(map[string]map[string]struct{})
|
||||
var result []*structs.ServiceNode
|
||||
for service := services.Next(); service != nil; service = services.Next() {
|
||||
svc := service.(*structs.ServiceNode)
|
||||
tags, ok := unique[svc.ServiceName]
|
||||
if !ok {
|
||||
unique[svc.ServiceName] = make(map[string]struct{})
|
||||
tags = unique[svc.ServiceName]
|
||||
}
|
||||
for _, tag := range svc.ServiceTags {
|
||||
tags[tag] = struct{}{}
|
||||
}
|
||||
result = append(result, service.(*structs.ServiceNode))
|
||||
}
|
||||
|
||||
// Generate the output structure.
|
||||
var results = make(structs.Services)
|
||||
for service, tags := range unique {
|
||||
results[service] = make([]string, 0, len(tags))
|
||||
for tag := range tags {
|
||||
results[service] = append(results[service], tag)
|
||||
}
|
||||
}
|
||||
return idx, results, nil
|
||||
return idx, result, nil
|
||||
}
|
||||
|
||||
func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error) {
|
||||
|
@ -1212,7 +1193,7 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta,
|
|||
}
|
||||
|
||||
// ServicesByNodeMeta returns all services, filtered by the given node metadata.
|
||||
func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) {
|
||||
func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, []*structs.ServiceNode, error) {
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
|
@ -1259,8 +1240,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
|
|||
}
|
||||
allServicesCh := allServices.WatchCh()
|
||||
|
||||
// Populate the services map
|
||||
unique := make(map[string]map[string]struct{})
|
||||
var result structs.ServiceNodes
|
||||
for node := nodes.Next(); node != nil; node = nodes.Next() {
|
||||
n := node.(*structs.Node)
|
||||
if len(filters) > 1 && !structs.SatisfiesMetaFilters(n.Meta, filters) {
|
||||
|
@ -1274,30 +1254,11 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
|
|||
}
|
||||
ws.AddWithLimit(watchLimit, services.WatchCh(), allServicesCh)
|
||||
|
||||
// Rip through the services and enumerate them and their unique set of
|
||||
// tags.
|
||||
for service := services.Next(); service != nil; service = services.Next() {
|
||||
svc := service.(*structs.ServiceNode)
|
||||
tags, ok := unique[svc.ServiceName]
|
||||
if !ok {
|
||||
unique[svc.ServiceName] = make(map[string]struct{})
|
||||
tags = unique[svc.ServiceName]
|
||||
}
|
||||
for _, tag := range svc.ServiceTags {
|
||||
tags[tag] = struct{}{}
|
||||
}
|
||||
result = append(result, service.(*structs.ServiceNode))
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the output structure.
|
||||
var results = make(structs.Services)
|
||||
for service, tags := range unique {
|
||||
results[service] = make([]string, 0, len(tags))
|
||||
for tag := range tags {
|
||||
results[service] = append(results[service], tag)
|
||||
}
|
||||
}
|
||||
return idx, results, nil
|
||||
return idx, result, nil
|
||||
}
|
||||
|
||||
// maxIndexForService return the maximum Raft Index for a service
|
||||
|
@ -1717,6 +1678,9 @@ func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.Ent
|
|||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed querying service for node %q: %w", node.Node, err)
|
||||
}
|
||||
if service != nil {
|
||||
service.ID = node.ID
|
||||
}
|
||||
|
||||
return idx, service, nil
|
||||
}
|
||||
|
|
|
@ -12,6 +12,8 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -270,17 +272,20 @@ func TestStateStore_EnsureRegistration(t *testing.T) {
|
|||
require.Equal(t, uint64(2), idx)
|
||||
require.Equal(t, svcmap["redis1"], r)
|
||||
|
||||
exp := svcmap["redis1"].ToServiceNode("node1")
|
||||
exp.ID = nodeID
|
||||
|
||||
// lookup service by node name
|
||||
idx, sn, err := s.ServiceNode("", "node1", "redis1", nil, peerName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), idx)
|
||||
require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn)
|
||||
require.Equal(t, exp, sn)
|
||||
|
||||
// lookup service by node ID
|
||||
idx, sn, err = s.ServiceNode(string(nodeID), "", "redis1", nil, peerName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), idx)
|
||||
require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn)
|
||||
require.Equal(t, exp, sn)
|
||||
|
||||
// lookup service by invalid node
|
||||
_, _, err = s.ServiceNode("", "invalid-node", "redis1", nil, peerName)
|
||||
|
@ -2102,10 +2107,13 @@ func TestStateStore_Services(t *testing.T) {
|
|||
Address: "1.1.1.1",
|
||||
Port: 1111,
|
||||
}
|
||||
ns1.EnterpriseMeta.Normalize()
|
||||
if err := s.EnsureService(2, "node1", ns1); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
testRegisterService(t, s, 3, "node1", "dogs")
|
||||
ns1Dogs := testRegisterService(t, s, 3, "node1", "dogs")
|
||||
ns1Dogs.EnterpriseMeta.Normalize()
|
||||
|
||||
testRegisterNode(t, s, 4, "node2")
|
||||
ns2 := &structs.NodeService{
|
||||
ID: "service3",
|
||||
|
@ -2114,6 +2122,7 @@ func TestStateStore_Services(t *testing.T) {
|
|||
Address: "1.1.1.1",
|
||||
Port: 1111,
|
||||
}
|
||||
ns2.EnterpriseMeta.Normalize()
|
||||
if err := s.EnsureService(5, "node2", ns2); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -2131,19 +2140,13 @@ func TestStateStore_Services(t *testing.T) {
|
|||
t.Fatalf("bad index: %d", idx)
|
||||
}
|
||||
|
||||
// Verify the result. We sort the lists since the order is
|
||||
// non-deterministic (it's built using a map internally).
|
||||
expected := structs.Services{
|
||||
"redis": []string{"prod", "primary", "replica"},
|
||||
"dogs": []string{},
|
||||
}
|
||||
sort.Strings(expected["redis"])
|
||||
for _, tags := range services {
|
||||
sort.Strings(tags)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, services) {
|
||||
t.Fatalf("bad: %#v", services)
|
||||
// Verify the result.
|
||||
expected := []*structs.ServiceNode{
|
||||
ns1Dogs.ToServiceNode("node1"),
|
||||
ns1.ToServiceNode("node1"),
|
||||
ns2.ToServiceNode("node2"),
|
||||
}
|
||||
assertDeepEqual(t, expected, services, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
|
||||
|
||||
// Deleting a node with a service should fire the watch.
|
||||
if err := s.DeleteNode(6, "node1", nil, ""); err != nil {
|
||||
|
@ -2182,6 +2185,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
Address: "1.1.1.1",
|
||||
Port: 1111,
|
||||
}
|
||||
ns1.EnterpriseMeta.Normalize()
|
||||
if err := s.EnsureService(2, "node0", ns1); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -2192,6 +2196,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
Address: "1.1.1.1",
|
||||
Port: 1111,
|
||||
}
|
||||
ns2.EnterpriseMeta.Normalize()
|
||||
if err := s.EnsureService(3, "node1", ns2); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -2206,11 +2211,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected := structs.Services{
|
||||
"redis": []string{"primary", "prod"},
|
||||
expected := []*structs.ServiceNode{
|
||||
ns1.ToServiceNode("node0"),
|
||||
}
|
||||
sort.Strings(res["redis"])
|
||||
require.Equal(t, expected, res)
|
||||
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
|
||||
})
|
||||
|
||||
t.Run("Get all services using the common meta value", func(t *testing.T) {
|
||||
|
@ -2218,11 +2222,12 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected := structs.Services{
|
||||
"redis": []string{"primary", "prod", "replica"},
|
||||
require.Len(t, res, 2)
|
||||
expected := []*structs.ServiceNode{
|
||||
ns1.ToServiceNode("node0"),
|
||||
ns2.ToServiceNode("node1"),
|
||||
}
|
||||
sort.Strings(res["redis"])
|
||||
require.Equal(t, expected, res)
|
||||
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
|
||||
})
|
||||
|
||||
t.Run("Get an empty list for an invalid meta value", func(t *testing.T) {
|
||||
|
@ -2230,8 +2235,8 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected := structs.Services{}
|
||||
require.Equal(t, expected, res)
|
||||
var expected []*structs.ServiceNode
|
||||
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
|
||||
})
|
||||
|
||||
t.Run("Get the first node's service instance using multiple meta filters", func(t *testing.T) {
|
||||
|
@ -2239,11 +2244,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected := structs.Services{
|
||||
"redis": []string{"primary", "prod"},
|
||||
expected := []*structs.ServiceNode{
|
||||
ns1.ToServiceNode("node0"),
|
||||
}
|
||||
sort.Strings(res["redis"])
|
||||
require.Equal(t, expected, res)
|
||||
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
|
||||
})
|
||||
|
||||
t.Run("Registering some unrelated node + service should not fire the watch.", func(t *testing.T) {
|
||||
|
@ -8807,3 +8811,10 @@ func setVirtualIPFlags(t *testing.T, s *Store) {
|
|||
Value: "true",
|
||||
}))
|
||||
}
|
||||
|
||||
func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) {
|
||||
t.Helper()
|
||||
if diff := cmp.Diff(x, y, opts...); diff != "" {
|
||||
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,12 +7,13 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/configentry"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib/maps"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -981,7 +982,7 @@ func peeringsForServiceTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, en
|
|||
if idx > maxIdx {
|
||||
maxIdx = idx
|
||||
}
|
||||
if peering == nil || !peering.IsActive() {
|
||||
if !peering.IsActive() {
|
||||
continue
|
||||
}
|
||||
peerings = append(peerings, peering)
|
||||
|
|
|
@ -1461,7 +1461,13 @@ func TestStateStore_ExportedServicesForPeer(t *testing.T) {
|
|||
}
|
||||
|
||||
newTarget := func(service, serviceSubset, datacenter string) *structs.DiscoveryTarget {
|
||||
t := structs.NewDiscoveryTarget(service, serviceSubset, "default", "default", datacenter)
|
||||
t := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: service,
|
||||
ServiceSubset: serviceSubset,
|
||||
Partition: "default",
|
||||
Namespace: "default",
|
||||
Datacenter: datacenter,
|
||||
})
|
||||
t.SNI = connect.TargetSNI(t, connect.TestTrustDomain)
|
||||
t.Name = t.SNI
|
||||
t.ConnectTimeout = 5 * time.Second // default
|
||||
|
|
|
@ -146,13 +146,13 @@ func testRegisterServiceOpts(t *testing.T, s *Store, idx uint64, nodeID, service
|
|||
// testRegisterServiceWithChange registers a service and allow ensuring the consul index is updated
|
||||
// even if service already exists if using `modifyAccordingIndex`.
|
||||
// This is done by setting the transaction ID in "version" meta so service will be updated if it already exists
|
||||
func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) {
|
||||
testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, modifyAccordingIndex)
|
||||
func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) *structs.NodeService {
|
||||
return testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, modifyAccordingIndex)
|
||||
}
|
||||
|
||||
// testRegisterServiceWithChangeOpts is the same as testRegisterServiceWithChange with the addition of opts that can
|
||||
// modify the service prior to writing.
|
||||
func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool, opts ...func(service *structs.NodeService)) {
|
||||
func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool, opts ...func(service *structs.NodeService)) *structs.NodeService {
|
||||
meta := make(map[string]string)
|
||||
if modifyAccordingIndex {
|
||||
meta["version"] = fmt.Sprint(idx)
|
||||
|
@ -183,14 +183,15 @@ func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeI
|
|||
result.ServiceID != serviceID {
|
||||
t.Fatalf("bad service: %#v", result)
|
||||
}
|
||||
return svc
|
||||
}
|
||||
|
||||
// testRegisterService register a service with given transaction idx
|
||||
// If the service already exists, transaction number might not be increased
|
||||
// Use `testRegisterServiceWithChange()` if you want perform a registration that
|
||||
// ensures the transaction is updated by setting idx in Meta of Service
|
||||
func testRegisterService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) {
|
||||
testRegisterServiceWithChange(t, s, idx, nodeID, serviceID, false)
|
||||
func testRegisterService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) *structs.NodeService {
|
||||
return testRegisterServiceWithChange(t, s, idx, nodeID, serviceID, false)
|
||||
}
|
||||
|
||||
func testRegisterConnectService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -27,8 +27,17 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
|
||||
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
|
||||
newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
|
||||
if opts.Namespace == "" {
|
||||
opts.Namespace = "default"
|
||||
}
|
||||
if opts.Partition == "" {
|
||||
opts.Partition = "default"
|
||||
}
|
||||
if opts.Datacenter == "" {
|
||||
opts.Datacenter = "dc1"
|
||||
}
|
||||
t := structs.NewDiscoveryTarget(opts)
|
||||
t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul")
|
||||
t.Name = t.SNI
|
||||
t.ConnectTimeout = 5 * time.Second // default
|
||||
|
@ -99,7 +108,7 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"),
|
||||
"web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
},
|
||||
}
|
||||
require.Equal(t, expect, value.Chain)
|
||||
|
@ -144,7 +153,7 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc2": newTarget("web", "", "default", "default", "dc2"),
|
||||
"web.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
|
||||
},
|
||||
}
|
||||
require.Equal(t, expect, value.Chain)
|
||||
|
@ -198,7 +207,7 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"),
|
||||
"web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
},
|
||||
}
|
||||
require.Equal(t, expect, value.Chain)
|
||||
|
@ -264,11 +273,11 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc1": targetWithConnectTimeout(
|
||||
newTarget("web", "", "default", "default", "dc1"),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
33*time.Second,
|
||||
),
|
||||
"web.default.default.dc2": targetWithConnectTimeout(
|
||||
newTarget("web", "", "default", "default", "dc2"),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
@ -280,7 +289,7 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
}))
|
||||
|
||||
expectTarget_DC1 := targetWithConnectTimeout(
|
||||
newTarget("web", "", "default", "default", "dc1"),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
22*time.Second,
|
||||
)
|
||||
expectTarget_DC1.MeshGateway = structs.MeshGatewayConfig{
|
||||
|
@ -288,7 +297,7 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
}
|
||||
|
||||
expectTarget_DC2 := targetWithConnectTimeout(
|
||||
newTarget("web", "", "default", "default", "dc2"),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
|
||||
22*time.Second,
|
||||
)
|
||||
expectTarget_DC2.MeshGateway = structs.MeshGatewayConfig{
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
package external
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"time"
|
||||
|
||||
agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware"
|
||||
"github.com/hashicorp/consul/tlsutil"
|
||||
|
@ -34,7 +35,7 @@ func NewServer(logger agentmiddleware.Logger, tls *tlsutil.Configurator) *grpc.S
|
|||
MinTime: 15 * time.Second,
|
||||
}),
|
||||
}
|
||||
if tls != nil && tls.GRPCTLSConfigured() {
|
||||
if tls != nil && tls.GRPCServerUseTLS() {
|
||||
creds := credentials.NewTLS(tls.IncomingGRPCConfig())
|
||||
opts = append(opts, grpc.Creds(creds))
|
||||
}
|
||||
|
|
|
@ -52,13 +52,21 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G
|
|||
}
|
||||
|
||||
// Build out the response
|
||||
var serviceName string
|
||||
if svc.ServiceKind == structs.ServiceKindConnectProxy {
|
||||
serviceName = svc.ServiceProxy.DestinationServiceName
|
||||
} else {
|
||||
serviceName = svc.ServiceName
|
||||
}
|
||||
|
||||
resp := &pbdataplane.GetEnvoyBootstrapParamsResponse{
|
||||
Service: svc.ServiceProxy.DestinationServiceName,
|
||||
Service: serviceName,
|
||||
Partition: svc.EnterpriseMeta.PartitionOrDefault(),
|
||||
Namespace: svc.EnterpriseMeta.NamespaceOrDefault(),
|
||||
Datacenter: s.Datacenter,
|
||||
ServiceKind: convertToResponseServiceKind(svc.ServiceKind),
|
||||
NodeName: svc.Node,
|
||||
NodeId: string(svc.ID),
|
||||
}
|
||||
|
||||
bootstrapConfig, err := structpb.NewStruct(svc.ServiceProxy.Config)
|
||||
|
|
|
@ -97,14 +97,20 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) {
|
|||
resp, err := client.GetEnvoyBootstrapParams(ctx, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service)
|
||||
if tc.registerReq.Service.IsGateway() {
|
||||
require.Equal(t, tc.registerReq.Service.Service, resp.Service)
|
||||
} else {
|
||||
require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service)
|
||||
}
|
||||
|
||||
require.Equal(t, serverDC, resp.Datacenter)
|
||||
require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition)
|
||||
require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace)
|
||||
require.Contains(t, resp.Config.Fields, proxyConfigKey)
|
||||
require.Equal(t, structpb.NewStringValue(proxyConfigValue), resp.Config.Fields[proxyConfigKey])
|
||||
require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind)
|
||||
|
||||
require.Equal(t, tc.registerReq.Node, resp.NodeName)
|
||||
require.Equal(t, string(tc.registerReq.ID), resp.NodeId)
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
|
@ -42,8 +42,8 @@ type Config struct {
|
|||
// outgoingHeartbeatInterval is how often we send a heartbeat.
|
||||
outgoingHeartbeatInterval time.Duration
|
||||
|
||||
// incomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
|
||||
incomingHeartbeatTimeout time.Duration
|
||||
// IncomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
|
||||
IncomingHeartbeatTimeout time.Duration
|
||||
}
|
||||
|
||||
//go:generate mockery --name ACLResolver --inpackage
|
||||
|
@ -63,8 +63,8 @@ func NewServer(cfg Config) *Server {
|
|||
if cfg.outgoingHeartbeatInterval == 0 {
|
||||
cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval
|
||||
}
|
||||
if cfg.incomingHeartbeatTimeout == 0 {
|
||||
cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
|
||||
if cfg.IncomingHeartbeatTimeout == 0 {
|
||||
cfg.IncomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
|
||||
}
|
||||
return &Server{
|
||||
Config: cfg,
|
||||
|
|
|
@ -406,7 +406,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
|||
|
||||
// incomingHeartbeatCtx will complete if incoming heartbeats time out.
|
||||
incomingHeartbeatCtx, incomingHeartbeatCtxCancel :=
|
||||
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
|
||||
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
|
||||
// NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're
|
||||
// re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned
|
||||
// value, not the current value.
|
||||
|
@ -605,7 +605,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
|||
// They just can't trace the execution properly for some reason (possibly golang/go#29587).
|
||||
//nolint:govet
|
||||
incomingHeartbeatCtx, incomingHeartbeatCtxCancel =
|
||||
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
|
||||
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
|
||||
}
|
||||
|
||||
case update := <-subCh:
|
||||
|
@ -642,6 +642,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
|||
if err := streamSend(replResp); err != nil {
|
||||
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
|
||||
}
|
||||
status.TrackSendSuccess()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -572,7 +572,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
})
|
||||
})
|
||||
|
||||
var lastSendSuccess time.Time
|
||||
var lastSendAck, lastSendSuccess time.Time
|
||||
|
||||
testutil.RunStep(t, "ack tracked as success", func(t *testing.T) {
|
||||
ack := &pbpeerstream.ReplicationMessage{
|
||||
|
@ -587,19 +587,22 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
lastSendSuccess = it.FutureNow(1)
|
||||
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 2, 0, time.UTC)
|
||||
lastSendSuccess = time.Date(2000, time.January, 1, 0, 0, 3, 0, time.UTC)
|
||||
err := client.Send(ack)
|
||||
require.NoError(t, err)
|
||||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSendSuccess,
|
||||
Connected: true,
|
||||
LastAck: lastSendAck,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
status, ok := srv.StreamStatus(testPeerID)
|
||||
rStatus, ok := srv.StreamStatus(testPeerID)
|
||||
require.True(r, ok)
|
||||
require.Equal(r, expect, status)
|
||||
require.Equal(r, expect, rStatus)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -621,23 +624,26 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
lastNack = it.FutureNow(1)
|
||||
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 4, 0, time.UTC)
|
||||
lastNack = time.Date(2000, time.January, 1, 0, 0, 5, 0, time.UTC)
|
||||
err := client.Send(nack)
|
||||
require.NoError(t, err)
|
||||
|
||||
lastNackMsg = "client peer was unable to apply resource: bad bad not good"
|
||||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSendSuccess,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
Connected: true,
|
||||
LastAck: lastSendAck,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
status, ok := srv.StreamStatus(testPeerID)
|
||||
rStatus, ok := srv.StreamStatus(testPeerID)
|
||||
require.True(r, ok)
|
||||
require.Equal(r, expect, status)
|
||||
require.Equal(r, expect, rStatus)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -694,13 +700,15 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSendSuccess,
|
||||
LastAck: lastSendAck,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
LastRecvResourceSuccess: lastRecvResourceSuccess,
|
||||
ImportedServices: map[string]struct{}{
|
||||
api.String(): {},
|
||||
},
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -753,7 +761,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSendSuccess,
|
||||
LastAck: lastSendAck,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
LastRecvResourceSuccess: lastRecvResourceSuccess,
|
||||
|
@ -762,6 +770,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
ImportedServices: map[string]struct{}{
|
||||
api.String(): {},
|
||||
},
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -785,7 +795,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSendSuccess,
|
||||
LastAck: lastSendAck,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
LastRecvResourceSuccess: lastRecvResourceSuccess,
|
||||
|
@ -795,6 +805,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
ImportedServices: map[string]struct{}{
|
||||
api.String(): {},
|
||||
},
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -816,7 +828,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
expect := Status{
|
||||
Connected: false,
|
||||
DisconnectErrorMessage: lastRecvErrorMsg,
|
||||
LastAck: lastSendSuccess,
|
||||
LastAck: lastSendAck,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
DisconnectTime: disconnectTime,
|
||||
|
@ -827,6 +839,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
ImportedServices: map[string]struct{}{
|
||||
api.String(): {},
|
||||
},
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -1129,7 +1143,7 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) {
|
|||
|
||||
srv, store := newTestServer(t, func(c *Config) {
|
||||
c.Tracker.SetClock(it.Now)
|
||||
c.incomingHeartbeatTimeout = 5 * time.Millisecond
|
||||
c.IncomingHeartbeatTimeout = 5 * time.Millisecond
|
||||
})
|
||||
|
||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||
|
@ -1236,7 +1250,7 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) {
|
|||
|
||||
srv, store := newTestServer(t, func(c *Config) {
|
||||
c.Tracker.SetClock(it.Now)
|
||||
c.incomingHeartbeatTimeout = incomingHeartbeatTimeout
|
||||
c.IncomingHeartbeatTimeout = incomingHeartbeatTimeout
|
||||
})
|
||||
|
||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||
|
|
|
@ -16,6 +16,8 @@ type Tracker struct {
|
|||
|
||||
// timeNow is a shim for testing.
|
||||
timeNow func() time.Time
|
||||
|
||||
heartbeatTimeout time.Duration
|
||||
}
|
||||
|
||||
func NewTracker() *Tracker {
|
||||
|
@ -33,6 +35,12 @@ func (t *Tracker) SetClock(clock func() time.Time) {
|
|||
}
|
||||
}
|
||||
|
||||
func (t *Tracker) SetHeartbeatTimeout(heartbeatTimeout time.Duration) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.heartbeatTimeout = heartbeatTimeout
|
||||
}
|
||||
|
||||
// Register a stream for a given peer but do not mark it as connected.
|
||||
func (t *Tracker) Register(id string) (*MutableStatus, error) {
|
||||
t.mu.Lock()
|
||||
|
@ -44,7 +52,7 @@ func (t *Tracker) Register(id string) (*MutableStatus, error) {
|
|||
func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) {
|
||||
status, ok := t.streams[id]
|
||||
if !ok {
|
||||
status = newMutableStatus(t.timeNow, initAsConnected)
|
||||
status = newMutableStatus(t.timeNow, t.heartbeatTimeout, initAsConnected)
|
||||
t.streams[id] = status
|
||||
return status, true, nil
|
||||
}
|
||||
|
@ -101,7 +109,9 @@ func (t *Tracker) StreamStatus(id string) (resp Status, found bool) {
|
|||
|
||||
s, ok := t.streams[id]
|
||||
if !ok {
|
||||
return Status{}, false
|
||||
return Status{
|
||||
NeverConnected: true,
|
||||
}, false
|
||||
}
|
||||
return s.GetStatus(), true
|
||||
}
|
||||
|
@ -142,9 +152,14 @@ type MutableStatus struct {
|
|||
// Status contains information about the replication stream to a peer cluster.
|
||||
// TODO(peering): There's a lot of fields here...
|
||||
type Status struct {
|
||||
heartbeatTimeout time.Duration
|
||||
|
||||
// Connected is true when there is an open stream for the peer.
|
||||
Connected bool
|
||||
|
||||
// NeverConnected is true for peerings that have never connected, false otherwise.
|
||||
NeverConnected bool
|
||||
|
||||
// DisconnectErrorMessage tracks the error that caused the stream to disconnect non-gracefully.
|
||||
// If the stream is connected or it disconnected gracefully it will be empty.
|
||||
DisconnectErrorMessage string
|
||||
|
@ -167,6 +182,9 @@ type Status struct {
|
|||
// LastSendErrorMessage tracks the last error message when sending into the stream.
|
||||
LastSendErrorMessage string
|
||||
|
||||
// LastSendSuccess tracks the time of the last success response sent into the stream.
|
||||
LastSendSuccess time.Time
|
||||
|
||||
// LastRecvHeartbeat tracks when we last received a heartbeat from our peer.
|
||||
LastRecvHeartbeat time.Time
|
||||
|
||||
|
@ -196,10 +214,40 @@ func (s *Status) GetExportedServicesCount() uint64 {
|
|||
return uint64(len(s.ExportedServices))
|
||||
}
|
||||
|
||||
func newMutableStatus(now func() time.Time, connected bool) *MutableStatus {
|
||||
// IsHealthy is a convenience func that returns true/ false for a peering status.
|
||||
// We define a peering as unhealthy if its status satisfies one of the following:
|
||||
// - If heartbeat hasn't been received within the IncomingHeartbeatTimeout
|
||||
// - If the last sent error is newer than last sent success
|
||||
// - If the last received error is newer than last received success
|
||||
// If none of these conditions apply, we call the peering healthy.
|
||||
func (s *Status) IsHealthy() bool {
|
||||
if time.Now().Sub(s.LastRecvHeartbeat) > s.heartbeatTimeout {
|
||||
// 1. If heartbeat hasn't been received for a while - report unhealthy
|
||||
return false
|
||||
}
|
||||
|
||||
if s.LastSendError.After(s.LastSendSuccess) {
|
||||
// 2. If last sent error is newer than last sent success - report unhealthy
|
||||
return false
|
||||
}
|
||||
|
||||
if s.LastRecvError.After(s.LastRecvResourceSuccess) {
|
||||
// 3. If last recv error is newer than last recv success - report unhealthy
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func newMutableStatus(now func() time.Time, heartbeatTimeout time.Duration, connected bool) *MutableStatus {
|
||||
if heartbeatTimeout.Microseconds() == 0 {
|
||||
heartbeatTimeout = defaultIncomingHeartbeatTimeout
|
||||
}
|
||||
return &MutableStatus{
|
||||
Status: Status{
|
||||
Connected: connected,
|
||||
Connected: connected,
|
||||
heartbeatTimeout: heartbeatTimeout,
|
||||
NeverConnected: !connected,
|
||||
},
|
||||
timeNow: now,
|
||||
doneCh: make(chan struct{}),
|
||||
|
@ -223,6 +271,12 @@ func (s *MutableStatus) TrackSendError(error string) {
|
|||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *MutableStatus) TrackSendSuccess() {
|
||||
s.mu.Lock()
|
||||
s.LastSendSuccess = s.timeNow().UTC()
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// TrackRecvResourceSuccess tracks receiving a replicated resource.
|
||||
func (s *MutableStatus) TrackRecvResourceSuccess() {
|
||||
s.mu.Lock()
|
||||
|
|
|
@ -10,6 +10,97 @@ import (
|
|||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
const (
|
||||
aPeerID = "63b60245-c475-426b-b314-4588d210859d"
|
||||
)
|
||||
|
||||
func TestStatus_IsHealthy(t *testing.T) {
|
||||
type testcase struct {
|
||||
name string
|
||||
dontConnect bool
|
||||
modifierFunc func(status *MutableStatus)
|
||||
expectedVal bool
|
||||
heartbeatTimeout time.Duration
|
||||
}
|
||||
|
||||
tcs := []testcase{
|
||||
{
|
||||
name: "never connected, unhealthy",
|
||||
expectedVal: false,
|
||||
dontConnect: true,
|
||||
},
|
||||
{
|
||||
name: "no heartbeat, unhealthy",
|
||||
expectedVal: false,
|
||||
},
|
||||
{
|
||||
name: "heartbeat is not received, unhealthy",
|
||||
expectedVal: false,
|
||||
modifierFunc: func(status *MutableStatus) {
|
||||
// set heartbeat
|
||||
status.LastRecvHeartbeat = time.Now().Add(-1 * time.Second)
|
||||
},
|
||||
heartbeatTimeout: 1 * time.Second,
|
||||
},
|
||||
{
|
||||
name: "send error before send success",
|
||||
expectedVal: false,
|
||||
modifierFunc: func(status *MutableStatus) {
|
||||
// set heartbeat
|
||||
status.LastRecvHeartbeat = time.Now()
|
||||
|
||||
status.LastSendSuccess = time.Now()
|
||||
status.LastSendError = time.Now()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "received error before received success",
|
||||
expectedVal: false,
|
||||
modifierFunc: func(status *MutableStatus) {
|
||||
// set heartbeat
|
||||
status.LastRecvHeartbeat = time.Now()
|
||||
|
||||
status.LastRecvResourceSuccess = time.Now()
|
||||
status.LastRecvError = time.Now()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "healthy",
|
||||
expectedVal: true,
|
||||
modifierFunc: func(status *MutableStatus) {
|
||||
// set heartbeat
|
||||
status.LastRecvHeartbeat = time.Now()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tracker := NewTracker()
|
||||
if tc.heartbeatTimeout.Microseconds() != 0 {
|
||||
tracker.SetHeartbeatTimeout(tc.heartbeatTimeout)
|
||||
}
|
||||
|
||||
if !tc.dontConnect {
|
||||
st, err := tracker.Connected(aPeerID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, st.Connected)
|
||||
|
||||
if tc.modifierFunc != nil {
|
||||
tc.modifierFunc(st)
|
||||
}
|
||||
|
||||
require.Equal(t, tc.expectedVal, st.IsHealthy())
|
||||
|
||||
} else {
|
||||
st, found := tracker.StreamStatus(aPeerID)
|
||||
require.False(t, found)
|
||||
require.Equal(t, tc.expectedVal, st.IsHealthy())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||
tracker := NewTracker()
|
||||
peerID := "63b60245-c475-426b-b314-4588d210859d"
|
||||
|
@ -29,7 +120,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
Connected: true,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
}
|
||||
|
||||
status, ok := tracker.StreamStatus(peerID)
|
||||
|
@ -55,8 +147,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
|||
|
||||
lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC()
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSuccess,
|
||||
Connected: true,
|
||||
LastAck: lastSuccess,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
}
|
||||
require.Equal(t, expect, status)
|
||||
})
|
||||
|
@ -66,9 +159,10 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
|||
sequence++
|
||||
|
||||
expect := Status{
|
||||
Connected: false,
|
||||
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
|
||||
LastAck: lastSuccess,
|
||||
Connected: false,
|
||||
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
|
||||
LastAck: lastSuccess,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
}
|
||||
status, ok := tracker.StreamStatus(peerID)
|
||||
require.True(t, ok)
|
||||
|
@ -80,8 +174,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSuccess,
|
||||
Connected: true,
|
||||
LastAck: lastSuccess,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
|
||||
// DisconnectTime gets cleared on re-connect.
|
||||
}
|
||||
|
@ -96,7 +191,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
|||
|
||||
status, ok := tracker.StreamStatus(peerID)
|
||||
require.False(t, ok)
|
||||
require.Zero(t, status)
|
||||
require.Equal(t, Status{NeverConnected: true}, status)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -124,15 +124,21 @@ func (c *cacheProxyDataSource[ReqType]) Notify(
|
|||
|
||||
func dispatchCacheUpdate(ch chan<- proxycfg.UpdateEvent) cache.Callback {
|
||||
return func(ctx context.Context, e cache.UpdateEvent) {
|
||||
u := proxycfg.UpdateEvent{
|
||||
CorrelationID: e.CorrelationID,
|
||||
Result: e.Result,
|
||||
Err: e.Err,
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- u:
|
||||
case ch <- newUpdateEvent(e.CorrelationID, e.Result, e.Err):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newUpdateEvent(correlationID string, result any, err error) proxycfg.UpdateEvent {
|
||||
// This roughly matches the logic in agent/submatview.LocalMaterializer.isTerminalError.
|
||||
if acl.IsErrNotFound(err) {
|
||||
err = proxycfg.TerminalError(err)
|
||||
}
|
||||
return proxycfg.UpdateEvent{
|
||||
CorrelationID: correlationID,
|
||||
Result: result,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,13 +54,8 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi
|
|||
|
||||
func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) {
|
||||
return func(ctx context.Context, correlationID string, result ResultType, err error) {
|
||||
event := proxycfg.UpdateEvent{
|
||||
CorrelationID: correlationID,
|
||||
Result: result,
|
||||
Err: err,
|
||||
}
|
||||
select {
|
||||
case ch <- event:
|
||||
case ch <- newUpdateEvent(correlationID, result, err):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,12 +39,8 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi
|
|||
QueryOptions: structs.QueryOptions{Token: req.QueryOptions.Token},
|
||||
}
|
||||
return c.c.NotifyCallback(ctx, cachetype.IntentionMatchName, query, correlationID, func(ctx context.Context, event cache.UpdateEvent) {
|
||||
e := proxycfg.UpdateEvent{
|
||||
CorrelationID: correlationID,
|
||||
Err: event.Err,
|
||||
}
|
||||
|
||||
if e.Err == nil {
|
||||
var result any
|
||||
if event.Err == nil {
|
||||
rsp, ok := event.Result.(*structs.IndexedIntentionMatches)
|
||||
if !ok {
|
||||
return
|
||||
|
@ -54,11 +50,11 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi
|
|||
if len(rsp.Matches) != 0 {
|
||||
matches = rsp.Matches[0]
|
||||
}
|
||||
e.Result = matches
|
||||
result = matches
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- e:
|
||||
case ch <- newUpdateEvent(correlationID, result, event.Err):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
})
|
||||
|
@ -110,10 +106,7 @@ func (s *serverIntentions) Notify(ctx context.Context, req *structs.ServiceSpeci
|
|||
|
||||
sort.Sort(structs.IntentionPrecedenceSorter(intentions))
|
||||
|
||||
return proxycfg.UpdateEvent{
|
||||
CorrelationID: correlationID,
|
||||
Result: intentions,
|
||||
}, true
|
||||
return newUpdateEvent(correlationID, intentions, nil), true
|
||||
}
|
||||
|
||||
for subjectIdx, subject := range subjects {
|
||||
|
|
|
@ -2,6 +2,7 @@ package proxycfg
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
|
@ -15,6 +16,28 @@ type UpdateEvent struct {
|
|||
Err error
|
||||
}
|
||||
|
||||
// TerminalError wraps the given error to indicate that the data source is in
|
||||
// an irrecoverably broken state (e.g. because the given ACL token has been
|
||||
// deleted).
|
||||
//
|
||||
// Setting UpdateEvent.Err to a TerminalError causes all watches to be canceled
|
||||
// which, in turn, terminates the xDS streams.
|
||||
func TerminalError(err error) error {
|
||||
return terminalError{err}
|
||||
}
|
||||
|
||||
// IsTerminalError returns whether the given error indicates that the data
|
||||
// source is in an irrecoverably broken state so watches should be torn down
|
||||
// and retried at a higher level.
|
||||
func IsTerminalError(err error) bool {
|
||||
return errors.As(err, &terminalError{})
|
||||
}
|
||||
|
||||
type terminalError struct{ err error }
|
||||
|
||||
func (e terminalError) Error() string { return e.err.Error() }
|
||||
func (e terminalError) Unwrap() error { return e.err }
|
||||
|
||||
// DataSources contains the dependencies used to consume data used to configure
|
||||
// proxies.
|
||||
type DataSources struct {
|
||||
|
|
|
@ -127,7 +127,7 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour
|
|||
}
|
||||
|
||||
// We are updating the proxy, close its old state
|
||||
state.Close()
|
||||
state.Close(false)
|
||||
}
|
||||
|
||||
// TODO: move to a function that translates ManagerConfig->stateConfig
|
||||
|
@ -148,14 +148,13 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour
|
|||
return err
|
||||
}
|
||||
|
||||
ch, err := state.Watch()
|
||||
if err != nil {
|
||||
if _, err = state.Watch(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.proxies[id] = state
|
||||
|
||||
// Start a goroutine that will wait for changes and broadcast them to watchers.
|
||||
go m.notifyBroadcast(ch)
|
||||
go m.notifyBroadcast(id, state)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -175,8 +174,8 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) {
|
|||
}
|
||||
|
||||
// Closing state will let the goroutine we started in Register finish since
|
||||
// watch chan is closed.
|
||||
state.Close()
|
||||
// watch chan is closed
|
||||
state.Close(false)
|
||||
delete(m.proxies, id)
|
||||
|
||||
// We intentionally leave potential watchers hanging here - there is no new
|
||||
|
@ -186,11 +185,17 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) {
|
|||
// cleaned up naturally.
|
||||
}
|
||||
|
||||
func (m *Manager) notifyBroadcast(ch <-chan ConfigSnapshot) {
|
||||
// Run until ch is closed
|
||||
for snap := range ch {
|
||||
func (m *Manager) notifyBroadcast(proxyID ProxyID, state *state) {
|
||||
// Run until ch is closed (by a defer in state.run).
|
||||
for snap := range state.snapCh {
|
||||
m.notify(&snap)
|
||||
}
|
||||
|
||||
// If state.run exited because of an irrecoverable error, close all of the
|
||||
// watchers so that the consumers reconnect/retry at a higher level.
|
||||
if state.failed() {
|
||||
m.closeAllWatchers(proxyID)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) notify(snap *ConfigSnapshot) {
|
||||
|
@ -281,6 +286,20 @@ func (m *Manager) Watch(id ProxyID) (<-chan *ConfigSnapshot, CancelFunc) {
|
|||
}
|
||||
}
|
||||
|
||||
func (m *Manager) closeAllWatchers(proxyID ProxyID) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
watchers, ok := m.watchers[proxyID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for watchID := range watchers {
|
||||
m.closeWatchLocked(proxyID, watchID)
|
||||
}
|
||||
}
|
||||
|
||||
// closeWatchLocked cleans up state related to a single watcher. It assumes the
|
||||
// lock is held.
|
||||
func (m *Manager) closeWatchLocked(proxyID ProxyID, watchID uint64) {
|
||||
|
@ -309,7 +328,7 @@ func (m *Manager) Close() error {
|
|||
|
||||
// Then close all states
|
||||
for proxyID, state := range m.proxies {
|
||||
state.Close()
|
||||
state.Close(false)
|
||||
delete(m.proxies, proxyID)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -63,22 +63,29 @@ func NewUpstreamIDFromServiceID(sid structs.ServiceID) UpstreamID {
|
|||
return id
|
||||
}
|
||||
|
||||
// TODO(peering): confirm we don't need peername here
|
||||
func NewUpstreamIDFromTargetID(tid string) UpstreamID {
|
||||
// Drop the leading subset if one is present in the target ID.
|
||||
separators := strings.Count(tid, ".")
|
||||
if separators > 3 {
|
||||
prefix := tid[:strings.Index(tid, ".")+1]
|
||||
tid = strings.TrimPrefix(tid, prefix)
|
||||
var id UpstreamID
|
||||
split := strings.Split(tid, ".")
|
||||
|
||||
switch {
|
||||
case split[len(split)-2] == "external":
|
||||
id = UpstreamID{
|
||||
Name: split[0],
|
||||
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
|
||||
Peer: split[4],
|
||||
}
|
||||
case len(split) == 5:
|
||||
// Drop the leading subset if one is present in the target ID.
|
||||
split = split[1:]
|
||||
fallthrough
|
||||
default:
|
||||
id = UpstreamID{
|
||||
Name: split[0],
|
||||
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
|
||||
Datacenter: split[3],
|
||||
}
|
||||
}
|
||||
|
||||
split := strings.SplitN(tid, ".", 4)
|
||||
|
||||
id := UpstreamID{
|
||||
Name: split[0],
|
||||
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
|
||||
Datacenter: split[3],
|
||||
}
|
||||
id.normalize()
|
||||
return id
|
||||
}
|
||||
|
|
|
@ -35,6 +35,13 @@ func TestUpstreamIDFromTargetID(t *testing.T) {
|
|||
Datacenter: "dc2",
|
||||
},
|
||||
},
|
||||
"peered": {
|
||||
tid: "foo.default.default.external.cluster-01",
|
||||
expect: UpstreamID{
|
||||
Name: "foo",
|
||||
Peer: "cluster-01",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
@ -70,11 +71,21 @@ type state struct {
|
|||
// in Watch.
|
||||
cancel func()
|
||||
|
||||
// failedFlag is (atomically) set to 1 (by Close) when run exits because a data
|
||||
// source is in an irrecoverable state. It can be read with failed.
|
||||
failedFlag int32
|
||||
|
||||
ch chan UpdateEvent
|
||||
snapCh chan ConfigSnapshot
|
||||
reqCh chan chan *ConfigSnapshot
|
||||
}
|
||||
|
||||
// failed returns whether run exited because a data source is in an
|
||||
// irrecoverable state.
|
||||
func (s *state) failed() bool {
|
||||
return atomic.LoadInt32(&s.failedFlag) == 1
|
||||
}
|
||||
|
||||
type DNSConfig struct {
|
||||
Domain string
|
||||
AltDomain string
|
||||
|
@ -250,10 +261,13 @@ func (s *state) Watch() (<-chan ConfigSnapshot, error) {
|
|||
}
|
||||
|
||||
// Close discards the state and stops any long-running watches.
|
||||
func (s *state) Close() error {
|
||||
func (s *state) Close(failed bool) error {
|
||||
if s.cancel != nil {
|
||||
s.cancel()
|
||||
}
|
||||
if failed {
|
||||
atomic.StoreInt32(&s.failedFlag, 1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -300,7 +314,13 @@ func (s *state) run(ctx context.Context, snap *ConfigSnapshot) {
|
|||
case <-ctx.Done():
|
||||
return
|
||||
case u := <-s.ch:
|
||||
s.logger.Trace("A blocking query returned; handling snapshot update", "correlationID", u.CorrelationID)
|
||||
s.logger.Trace("Data source returned; handling snapshot update", "correlationID", u.CorrelationID)
|
||||
|
||||
if IsTerminalError(u.Err) {
|
||||
s.logger.Error("Data source in an irrecoverable state; exiting", "error", u.Err, "correlationID", u.CorrelationID)
|
||||
s.Close(true)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.handler.handleUpdate(ctx, u, snap); err != nil {
|
||||
s.logger.Error("Failed to handle update from watch",
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul/proto/pbpeerstream"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
@ -27,6 +26,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/proto/pbpeerstream"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -379,6 +379,7 @@ func (s *Server) Establish(
|
|||
}
|
||||
|
||||
var id string
|
||||
serverAddrs := tok.ServerAddresses
|
||||
if existing == nil {
|
||||
id, err = lib.GenerateUUID(s.Backend.CheckPeeringUUID)
|
||||
if err != nil {
|
||||
|
@ -386,6 +387,11 @@ func (s *Server) Establish(
|
|||
}
|
||||
} else {
|
||||
id = existing.ID
|
||||
// If there is a connected stream, assume that the existing ServerAddresses
|
||||
// are up to date and do not try to overwrite them with the token's addresses.
|
||||
if status, ok := s.Tracker.StreamStatus(id); ok && status.Connected {
|
||||
serverAddrs = existing.PeerServerAddresses
|
||||
}
|
||||
}
|
||||
|
||||
// validate that this peer name is not being used as an acceptor already
|
||||
|
@ -397,7 +403,7 @@ func (s *Server) Establish(
|
|||
ID: id,
|
||||
Name: req.PeerName,
|
||||
PeerCAPems: tok.CA,
|
||||
PeerServerAddresses: tok.ServerAddresses,
|
||||
PeerServerAddresses: serverAddrs,
|
||||
PeerServerName: tok.ServerName,
|
||||
PeerID: tok.PeerID,
|
||||
Meta: req.Meta,
|
||||
|
@ -418,9 +424,9 @@ func (s *Server) Establish(
|
|||
}
|
||||
var exchangeResp *pbpeerstream.ExchangeSecretResponse
|
||||
|
||||
// Loop through the token's addresses once, attempting to fetch the long-lived stream secret.
|
||||
// Loop through the known server addresses once, attempting to fetch the long-lived stream secret.
|
||||
var dialErrors error
|
||||
for _, addr := range peering.PeerServerAddresses {
|
||||
for _, addr := range serverAddrs {
|
||||
exchangeResp, err = exchangeSecret(ctx, addr, tlsOption, &exchangeReq)
|
||||
if err != nil {
|
||||
dialErrors = multierror.Append(dialErrors, fmt.Errorf("failed to exchange peering secret with %q: %w", addr, err))
|
||||
|
@ -720,7 +726,7 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if existing == nil || !existing.IsActive() {
|
||||
if !existing.IsActive() {
|
||||
// Return early when the Peering doesn't exist or is already marked for deletion.
|
||||
// We don't return nil because the pb will fail to marshal.
|
||||
return &pbpeering.PeeringDeleteResponse{}, nil
|
||||
|
|
|
@ -954,6 +954,10 @@ func (e *ServiceResolverConfigEntry) Validate() error {
|
|||
|
||||
r := e.Redirect
|
||||
|
||||
if err := r.ValidateEnterprise(); err != nil {
|
||||
return fmt.Errorf("Redirect: %s", err.Error())
|
||||
}
|
||||
|
||||
if len(e.Failover) > 0 {
|
||||
return fmt.Errorf("Redirect and Failover cannot both be set")
|
||||
}
|
||||
|
@ -988,18 +992,59 @@ func (e *ServiceResolverConfigEntry) Validate() error {
|
|||
return fmt.Errorf("Cross-datacenter failover is only supported in the default partition")
|
||||
}
|
||||
|
||||
if subset != "*" && !isSubset(subset) {
|
||||
return fmt.Errorf("Bad Failover[%q]: not a valid subset", subset)
|
||||
errorPrefix := fmt.Sprintf("Bad Failover[%q]: ", subset)
|
||||
|
||||
if err := f.ValidateEnterprise(); err != nil {
|
||||
return fmt.Errorf(errorPrefix + err.Error())
|
||||
}
|
||||
|
||||
if f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 {
|
||||
return fmt.Errorf("Bad Failover[%q] one of Service, ServiceSubset, Namespace, or Datacenters is required", subset)
|
||||
if subset != "*" && !isSubset(subset) {
|
||||
return fmt.Errorf(errorPrefix + "not a valid subset subset")
|
||||
}
|
||||
|
||||
if f.isEmpty() {
|
||||
return fmt.Errorf(errorPrefix + "one of Service, ServiceSubset, Namespace, Targets, or Datacenters is required")
|
||||
}
|
||||
|
||||
if f.ServiceSubset != "" {
|
||||
if f.Service == "" || f.Service == e.Name {
|
||||
if !isSubset(f.ServiceSubset) {
|
||||
return fmt.Errorf("Bad Failover[%q].ServiceSubset %q is not a valid subset of %q", subset, f.ServiceSubset, f.Service)
|
||||
return fmt.Errorf("%sServiceSubset %q is not a valid subset of %q", errorPrefix, f.ServiceSubset, f.Service)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(f.Datacenters) != 0 && len(f.Targets) != 0 {
|
||||
return fmt.Errorf("Bad Failover[%q]: Targets cannot be set with Datacenters", subset)
|
||||
}
|
||||
|
||||
if f.ServiceSubset != "" && len(f.Targets) != 0 {
|
||||
return fmt.Errorf("Bad Failover[%q]: Targets cannot be set with ServiceSubset", subset)
|
||||
}
|
||||
|
||||
if f.Service != "" && len(f.Targets) != 0 {
|
||||
return fmt.Errorf("Bad Failover[%q]: Targets cannot be set with Service", subset)
|
||||
}
|
||||
|
||||
for i, target := range f.Targets {
|
||||
errorPrefix := fmt.Sprintf("Bad Failover[%q].Targets[%d]: ", subset, i)
|
||||
|
||||
if err := target.ValidateEnterprise(); err != nil {
|
||||
return fmt.Errorf(errorPrefix + err.Error())
|
||||
}
|
||||
|
||||
switch {
|
||||
case target.Peer != "" && target.ServiceSubset != "":
|
||||
return fmt.Errorf(errorPrefix + "Peer cannot be set with ServiceSubset")
|
||||
case target.Peer != "" && target.Partition != "":
|
||||
return fmt.Errorf(errorPrefix + "Partition cannot be set with Peer")
|
||||
case target.Peer != "" && target.Datacenter != "":
|
||||
return fmt.Errorf(errorPrefix + "Peer cannot be set with Datacenter")
|
||||
case target.Partition != "" && target.Datacenter != "":
|
||||
return fmt.Errorf(errorPrefix + "Partition cannot be set with Datacenter")
|
||||
case target.ServiceSubset != "" && (target.Service == "" || target.Service == e.Name):
|
||||
if !isSubset(target.ServiceSubset) {
|
||||
return fmt.Errorf("%sServiceSubset %q is not a valid subset of %q", errorPrefix, target.ServiceSubset, e.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1107,9 +1152,24 @@ func (e *ServiceResolverConfigEntry) ListRelatedServices() []ServiceID {
|
|||
|
||||
if len(e.Failover) > 0 {
|
||||
for _, failover := range e.Failover {
|
||||
failoverID := NewServiceID(defaultIfEmpty(failover.Service, e.Name), failover.GetEnterpriseMeta(&e.EnterpriseMeta))
|
||||
if failoverID != svcID {
|
||||
found[failoverID] = struct{}{}
|
||||
if len(failover.Targets) == 0 {
|
||||
failoverID := NewServiceID(defaultIfEmpty(failover.Service, e.Name), failover.GetEnterpriseMeta(&e.EnterpriseMeta))
|
||||
if failoverID != svcID {
|
||||
found[failoverID] = struct{}{}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, target := range failover.Targets {
|
||||
// We can't know about related services on cluster peers.
|
||||
if target.Peer != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
failoverID := NewServiceID(defaultIfEmpty(target.Service, e.Name), target.GetEnterpriseMeta(failover.GetEnterpriseMeta(&e.EnterpriseMeta)))
|
||||
if failoverID != svcID {
|
||||
found[failoverID] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1173,10 +1233,21 @@ type ServiceResolverRedirect struct {
|
|||
Datacenter string `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
||||
return DiscoveryTargetOpts{
|
||||
Service: r.Service,
|
||||
ServiceSubset: r.ServiceSubset,
|
||||
Namespace: r.Namespace,
|
||||
Partition: r.Partition,
|
||||
Datacenter: r.Datacenter,
|
||||
}
|
||||
}
|
||||
|
||||
// There are some restrictions on what is allowed in here:
|
||||
//
|
||||
// - Service, ServiceSubset, Namespace, and Datacenters cannot all be
|
||||
// empty at once.
|
||||
// - Service, ServiceSubset, Namespace, Datacenters, and Targets cannot all be
|
||||
// empty at once. When Targets is defined, the other fields should not be
|
||||
// populated.
|
||||
//
|
||||
type ServiceResolverFailover struct {
|
||||
// Service is the service to resolve instead of the default as the failover
|
||||
|
@ -1205,6 +1276,56 @@ type ServiceResolverFailover struct {
|
|||
//
|
||||
// This is a DESTINATION during failover.
|
||||
Datacenters []string `json:",omitempty"`
|
||||
|
||||
// Targets specifies a fixed list of failover targets to try. We never try a
|
||||
// target multiple times, so those are subtracted from this list before
|
||||
// proceeding.
|
||||
//
|
||||
// This is a DESTINATION during failover.
|
||||
Targets []ServiceResolverFailoverTarget `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (t *ServiceResolverFailover) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
||||
return DiscoveryTargetOpts{
|
||||
Service: t.Service,
|
||||
ServiceSubset: t.ServiceSubset,
|
||||
Namespace: t.Namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ServiceResolverFailover) isEmpty() bool {
|
||||
return f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 && len(f.Targets) == 0
|
||||
}
|
||||
|
||||
type ServiceResolverFailoverTarget struct {
|
||||
// Service specifies the name of the service to try during failover.
|
||||
Service string `json:",omitempty"`
|
||||
|
||||
// ServiceSubset specifies the service subset to try during failover.
|
||||
ServiceSubset string `json:",omitempty" alias:"service_subset"`
|
||||
|
||||
// Partition specifies the partition to try during failover.
|
||||
Partition string `json:",omitempty"`
|
||||
|
||||
// Namespace specifies the namespace to try during failover.
|
||||
Namespace string `json:",omitempty"`
|
||||
|
||||
// Datacenter specifies the datacenter to try during failover.
|
||||
Datacenter string `json:",omitempty"`
|
||||
|
||||
// Peer specifies the name of the cluster peer to try during failover.
|
||||
Peer string `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (t *ServiceResolverFailoverTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
||||
return DiscoveryTargetOpts{
|
||||
Service: t.Service,
|
||||
ServiceSubset: t.ServiceSubset,
|
||||
Namespace: t.Namespace,
|
||||
Partition: t.Partition,
|
||||
Datacenter: t.Datacenter,
|
||||
Peer: t.Peer,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadBalancer determines the load balancing policy and configuration for services
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
)
|
||||
|
||||
|
@ -25,12 +27,56 @@ func (redir *ServiceResolverRedirect) GetEnterpriseMeta(_ *acl.EnterpriseMeta) *
|
|||
return DefaultEnterpriseMetaInDefaultPartition()
|
||||
}
|
||||
|
||||
// ValidateEnterprise validates that enterprise fields are only set
|
||||
// with enterprise binaries.
|
||||
func (redir *ServiceResolverRedirect) ValidateEnterprise() error {
|
||||
if redir.Partition != "" {
|
||||
return fmt.Errorf("Setting Partition requires Consul Enterprise")
|
||||
}
|
||||
|
||||
if redir.Namespace != "" {
|
||||
return fmt.Errorf("Setting Namespace requires Consul Enterprise")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from
|
||||
// fields in the ServiceResolverFailover
|
||||
func (failover *ServiceResolverFailover) GetEnterpriseMeta(_ *acl.EnterpriseMeta) *acl.EnterpriseMeta {
|
||||
return DefaultEnterpriseMetaInDefaultPartition()
|
||||
}
|
||||
|
||||
// ValidateEnterprise validates that enterprise fields are only set
|
||||
// with enterprise binaries.
|
||||
func (failover *ServiceResolverFailover) ValidateEnterprise() error {
|
||||
if failover.Namespace != "" {
|
||||
return fmt.Errorf("Setting Namespace requires Consul Enterprise")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from
|
||||
// fields in the ServiceResolverFailoverTarget
|
||||
func (target *ServiceResolverFailoverTarget) GetEnterpriseMeta(_ *acl.EnterpriseMeta) *acl.EnterpriseMeta {
|
||||
return DefaultEnterpriseMetaInDefaultPartition()
|
||||
}
|
||||
|
||||
// ValidateEnterprise validates that enterprise fields are only set
|
||||
// with enterprise binaries.
|
||||
func (redir *ServiceResolverFailoverTarget) ValidateEnterprise() error {
|
||||
if redir.Partition != "" {
|
||||
return fmt.Errorf("Setting Partition requires Consul Enterprise")
|
||||
}
|
||||
|
||||
if redir.Namespace != "" {
|
||||
return fmt.Errorf("Setting Namespace requires Consul Enterprise")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from
|
||||
// fields in the DiscoveryChainRequest
|
||||
func (req *DiscoveryChainRequest) GetEnterpriseMeta() *acl.EnterpriseMeta {
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
//go:build !consulent
|
||||
// +build !consulent
|
||||
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestServiceResolverConfigEntry_OSS(t *testing.T) {
|
||||
type testcase struct {
|
||||
name string
|
||||
entry *ServiceResolverConfigEntry
|
||||
normalizeErr string
|
||||
validateErr string
|
||||
// check is called between normalize and validate
|
||||
check func(t *testing.T, entry *ServiceResolverConfigEntry)
|
||||
}
|
||||
|
||||
cases := []testcase{
|
||||
{
|
||||
name: "failover with a namespace on OSS",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
Service: "backup",
|
||||
Namespace: "ns1",
|
||||
},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["*"]: Setting Namespace requires Consul Enterprise`,
|
||||
},
|
||||
{
|
||||
name: "failover Targets cannot set Namespace on OSS",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
Targets: []ServiceResolverFailoverTarget{{Namespace: "ns1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["*"].Targets[0]: Setting Namespace requires Consul Enterprise`,
|
||||
},
|
||||
{
|
||||
name: "failover Targets cannot set Partition on OSS",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
Targets: []ServiceResolverFailoverTarget{{Partition: "ap1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["*"].Targets[0]: Setting Partition requires Consul Enterprise`,
|
||||
},
|
||||
{
|
||||
name: "setting failover Namespace on OSS",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {Namespace: "ns1"},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["*"]: Setting Namespace requires Consul Enterprise`,
|
||||
},
|
||||
}
|
||||
|
||||
// Bulk add a bunch of similar validation cases.
|
||||
for _, invalidSubset := range invalidSubsetNames {
|
||||
tc := testcase{
|
||||
name: "invalid subset name: " + invalidSubset,
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Subsets: map[string]ServiceResolverSubset{
|
||||
invalidSubset: {OnlyPassing: true},
|
||||
},
|
||||
},
|
||||
validateErr: fmt.Sprintf("Subset %q is invalid", invalidSubset),
|
||||
}
|
||||
cases = append(cases, tc)
|
||||
}
|
||||
|
||||
for _, goodSubset := range validSubsetNames {
|
||||
tc := testcase{
|
||||
name: "valid subset name: " + goodSubset,
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Subsets: map[string]ServiceResolverSubset{
|
||||
goodSubset: {OnlyPassing: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
cases = append(cases, tc)
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := tc.entry.Normalize()
|
||||
if tc.normalizeErr != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.normalizeErr)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
if tc.check != nil {
|
||||
tc.check(t, tc.entry)
|
||||
}
|
||||
|
||||
err = tc.entry.Validate()
|
||||
if tc.validateErr != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.validateErr)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -165,6 +165,34 @@ func TestConfigEntries_ListRelatedServices_AndACLs(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "resolver: failover with targets",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
Targets: []ServiceResolverFailoverTarget{
|
||||
{Service: "other1"},
|
||||
{Datacenter: "dc2"},
|
||||
{Peer: "cluster-01"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectServices: []ServiceID{NewServiceID("other1", nil)},
|
||||
expectACLs: []testACL{
|
||||
defaultDenyCase,
|
||||
readTestCase,
|
||||
writeTestCaseDenied,
|
||||
{
|
||||
name: "can write test (with other1:read)",
|
||||
authorizer: newServiceACL(t, []string{"other1"}, []string{"test"}),
|
||||
canRead: true,
|
||||
canWrite: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "splitter: self",
|
||||
entry: &ServiceSplitterConfigEntry{
|
||||
|
@ -595,6 +623,15 @@ func TestServiceResolverConfigEntry(t *testing.T) {
|
|||
},
|
||||
validateErr: "Redirect is empty",
|
||||
},
|
||||
{
|
||||
name: "empty redirect",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Redirect: &ServiceResolverRedirect{},
|
||||
},
|
||||
validateErr: "Redirect is empty",
|
||||
},
|
||||
{
|
||||
name: "redirect subset with no service",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
|
@ -606,17 +643,6 @@ func TestServiceResolverConfigEntry(t *testing.T) {
|
|||
},
|
||||
validateErr: "Redirect.ServiceSubset defined without Redirect.Service",
|
||||
},
|
||||
{
|
||||
name: "redirect namespace with no service",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Redirect: &ServiceResolverRedirect{
|
||||
Namespace: "alternate",
|
||||
},
|
||||
},
|
||||
validateErr: "Redirect.Namespace defined without Redirect.Service",
|
||||
},
|
||||
{
|
||||
name: "self redirect with invalid subset",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
|
@ -695,7 +721,7 @@ func TestServiceResolverConfigEntry(t *testing.T) {
|
|||
"v1": {},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["v1"] one of Service, ServiceSubset, Namespace, or Datacenters is required`,
|
||||
validateErr: `Bad Failover["v1"]: one of Service, ServiceSubset, Namespace, Targets, or Datacenters is required`,
|
||||
},
|
||||
{
|
||||
name: "failover to self using invalid subset",
|
||||
|
@ -712,7 +738,7 @@ func TestServiceResolverConfigEntry(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["v1"].ServiceSubset "gone" is not a valid subset of "test"`,
|
||||
validateErr: `Bad Failover["v1"]: ServiceSubset "gone" is not a valid subset of "test"`,
|
||||
},
|
||||
{
|
||||
name: "failover to self using valid subset",
|
||||
|
@ -745,6 +771,109 @@ func TestServiceResolverConfigEntry(t *testing.T) {
|
|||
},
|
||||
validateErr: `Bad Failover["*"].Datacenters: found empty datacenter`,
|
||||
},
|
||||
{
|
||||
name: "failover target with an invalid subset",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
Targets: []ServiceResolverFailoverTarget{{ServiceSubset: "subset"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["*"].Targets[0]: ServiceSubset "subset" is not a valid subset of "test"`,
|
||||
},
|
||||
{
|
||||
name: "failover targets can't have Peer and ServiceSubset",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
Targets: []ServiceResolverFailoverTarget{{Peer: "cluster-01", ServiceSubset: "subset"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["*"].Targets[0]: Peer cannot be set with ServiceSubset`,
|
||||
},
|
||||
{
|
||||
name: "failover targets can't have Peer and Datacenter",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
Targets: []ServiceResolverFailoverTarget{{Peer: "cluster-01", Datacenter: "dc1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["*"].Targets[0]: Peer cannot be set with Datacenter`,
|
||||
},
|
||||
{
|
||||
name: "failover Targets cannot be set with Datacenters",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
Datacenters: []string{"a"},
|
||||
Targets: []ServiceResolverFailoverTarget{{Peer: "cluster-01"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["*"]: Targets cannot be set with Datacenters`,
|
||||
},
|
||||
{
|
||||
name: "failover Targets cannot be set with ServiceSubset",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
ServiceSubset: "v2",
|
||||
Targets: []ServiceResolverFailoverTarget{{Peer: "cluster-01"}},
|
||||
},
|
||||
},
|
||||
Subsets: map[string]ServiceResolverSubset{
|
||||
"v2": {Filter: "Service.Meta.version == v2"},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["*"]: Targets cannot be set with ServiceSubset`,
|
||||
},
|
||||
{
|
||||
name: "failover Targets cannot be set with Service",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
Service: "another-service",
|
||||
Targets: []ServiceResolverFailoverTarget{{Peer: "cluster-01"}},
|
||||
},
|
||||
},
|
||||
Subsets: map[string]ServiceResolverSubset{
|
||||
"v2": {Filter: "Service.Meta.version == v2"},
|
||||
},
|
||||
},
|
||||
validateErr: `Bad Failover["*"]: Targets cannot be set with Service`,
|
||||
},
|
||||
{
|
||||
name: "complicated failover targets",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
Kind: ServiceResolver,
|
||||
Name: "test",
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
Targets: []ServiceResolverFailoverTarget{
|
||||
{Peer: "cluster-01", Service: "test-v2"},
|
||||
{Service: "test-v2", ServiceSubset: "test"},
|
||||
{Datacenter: "dc2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad connect timeout",
|
||||
entry: &ServiceResolverConfigEntry{
|
||||
|
|
|
@ -216,6 +216,85 @@ func testConfigEntries_ListRelatedServices_AndACLs(t *testing.T, cases []configE
|
|||
}
|
||||
}
|
||||
|
||||
func TestDecodeConfigEntry_ServiceDefaults(t *testing.T) {
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
camel string
|
||||
snake string
|
||||
expect ConfigEntry
|
||||
expectErr string
|
||||
}{
|
||||
{
|
||||
name: "service-defaults-with-MaxInboundConnections",
|
||||
snake: `
|
||||
kind = "service-defaults"
|
||||
name = "external"
|
||||
protocol = "tcp"
|
||||
destination {
|
||||
addresses = [
|
||||
"api.google.com",
|
||||
"web.google.com"
|
||||
]
|
||||
port = 8080
|
||||
}
|
||||
max_inbound_connections = 14
|
||||
`,
|
||||
camel: `
|
||||
Kind = "service-defaults"
|
||||
Name = "external"
|
||||
Protocol = "tcp"
|
||||
Destination {
|
||||
Addresses = [
|
||||
"api.google.com",
|
||||
"web.google.com"
|
||||
]
|
||||
Port = 8080
|
||||
}
|
||||
MaxInboundConnections = 14
|
||||
`,
|
||||
expect: &ServiceConfigEntry{
|
||||
Kind: "service-defaults",
|
||||
Name: "external",
|
||||
Protocol: "tcp",
|
||||
Destination: &DestinationConfig{
|
||||
Addresses: []string{
|
||||
"api.google.com",
|
||||
"web.google.com",
|
||||
},
|
||||
Port: 8080,
|
||||
},
|
||||
MaxInboundConnections: 14,
|
||||
},
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
|
||||
testbody := func(t *testing.T, body string) {
|
||||
var raw map[string]interface{}
|
||||
err := hcl.Decode(&raw, body)
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := DecodeConfigEntry(raw)
|
||||
if tc.expectErr != "" {
|
||||
require.Nil(t, got)
|
||||
require.Error(t, err)
|
||||
requireContainsLower(t, err.Error(), tc.expectErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expect, got)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run(tc.name+" (snake case)", func(t *testing.T) {
|
||||
testbody(t, tc.snake)
|
||||
})
|
||||
t.Run(tc.name+" (camel case)", func(t *testing.T) {
|
||||
testbody(t, tc.camel)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDecodeConfigEntry is the 'structs' mirror image of
|
||||
// command/config/write/config_write_test.go:TestParseConfigEntry
|
||||
func TestDecodeConfigEntry(t *testing.T) {
|
||||
|
|
|
@ -56,7 +56,12 @@ type CompiledDiscoveryChain struct {
|
|||
// ID returns an ID that encodes the service, namespace, partition, and datacenter.
|
||||
// This ID allows us to compare a discovery chain target to the chain upstream itself.
|
||||
func (c *CompiledDiscoveryChain) ID() string {
|
||||
return chainID("", c.ServiceName, c.Namespace, c.Partition, c.Datacenter)
|
||||
return chainID(DiscoveryTargetOpts{
|
||||
Service: c.ServiceName,
|
||||
Namespace: c.Namespace,
|
||||
Partition: c.Partition,
|
||||
Datacenter: c.Datacenter,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *CompiledDiscoveryChain) CompoundServiceName() ServiceName {
|
||||
|
@ -185,6 +190,7 @@ type DiscoveryTarget struct {
|
|||
Namespace string `json:",omitempty"`
|
||||
Partition string `json:",omitempty"`
|
||||
Datacenter string `json:",omitempty"`
|
||||
Peer string `json:",omitempty"`
|
||||
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
Subset ServiceResolverSubset `json:",omitempty"`
|
||||
|
@ -240,28 +246,52 @@ func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter string) *DiscoveryTarget {
|
||||
type DiscoveryTargetOpts struct {
|
||||
Service string
|
||||
ServiceSubset string
|
||||
Namespace string
|
||||
Partition string
|
||||
Datacenter string
|
||||
Peer string
|
||||
}
|
||||
|
||||
func NewDiscoveryTarget(opts DiscoveryTargetOpts) *DiscoveryTarget {
|
||||
t := &DiscoveryTarget{
|
||||
Service: service,
|
||||
ServiceSubset: serviceSubset,
|
||||
Namespace: namespace,
|
||||
Partition: partition,
|
||||
Datacenter: datacenter,
|
||||
Service: opts.Service,
|
||||
ServiceSubset: opts.ServiceSubset,
|
||||
Namespace: opts.Namespace,
|
||||
Partition: opts.Partition,
|
||||
Datacenter: opts.Datacenter,
|
||||
Peer: opts.Peer,
|
||||
}
|
||||
t.setID()
|
||||
return t
|
||||
}
|
||||
|
||||
func chainID(subset, service, namespace, partition, dc string) string {
|
||||
// NOTE: this format is similar to the SNI syntax for simplicity
|
||||
if subset == "" {
|
||||
return fmt.Sprintf("%s.%s.%s.%s", service, namespace, partition, dc)
|
||||
func (t *DiscoveryTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
||||
return DiscoveryTargetOpts{
|
||||
Service: t.Service,
|
||||
ServiceSubset: t.ServiceSubset,
|
||||
Namespace: t.Namespace,
|
||||
Partition: t.Partition,
|
||||
Datacenter: t.Datacenter,
|
||||
Peer: t.Peer,
|
||||
}
|
||||
return fmt.Sprintf("%s.%s.%s.%s.%s", subset, service, namespace, partition, dc)
|
||||
}
|
||||
|
||||
func chainID(opts DiscoveryTargetOpts) string {
|
||||
// NOTE: this format is similar to the SNI syntax for simplicity
|
||||
if opts.Peer != "" {
|
||||
return fmt.Sprintf("%s.%s.default.external.%s", opts.Service, opts.Namespace, opts.Peer)
|
||||
}
|
||||
if opts.ServiceSubset == "" {
|
||||
return fmt.Sprintf("%s.%s.%s.%s", opts.Service, opts.Namespace, opts.Partition, opts.Datacenter)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s.%s.%s.%s", opts.ServiceSubset, opts.Service, opts.Namespace, opts.Partition, opts.Datacenter)
|
||||
}
|
||||
|
||||
func (t *DiscoveryTarget) setID() {
|
||||
t.ID = chainID(t.ServiceSubset, t.Service, t.Namespace, t.Partition, t.Datacenter)
|
||||
t.ID = chainID(t.ToDiscoveryTargetOpts())
|
||||
}
|
||||
|
||||
func (t *DiscoveryTarget) String() string {
|
||||
|
|
|
@ -353,7 +353,7 @@ func (q QueryOptions) Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime tim
|
|||
q.MaxQueryTime = defaultQueryTime
|
||||
}
|
||||
// Timeout after maximum jitter has elapsed.
|
||||
q.MaxQueryTime += lib.RandomStagger(q.MaxQueryTime / JitterFraction)
|
||||
q.MaxQueryTime += q.MaxQueryTime / JitterFraction
|
||||
|
||||
return q.MaxQueryTime + rpcHoldTimeout
|
||||
}
|
||||
|
|
|
@ -66,6 +66,10 @@ func (m *LocalMaterializer) Run(ctx context.Context) {
|
|||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
if m.isTerminalError(err) {
|
||||
return
|
||||
}
|
||||
|
||||
m.mat.handleError(req, err)
|
||||
|
||||
if err := m.mat.retryWaiter.Wait(ctx); err != nil {
|
||||
|
@ -74,6 +78,14 @@ func (m *LocalMaterializer) Run(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
// isTerminalError determines whether the given error cannot be recovered from
|
||||
// and should cause the materializer to halt and be evicted from the view store.
|
||||
//
|
||||
// This roughly matches the logic in agent/proxycfg-glue.newUpdateEvent.
|
||||
func (m *LocalMaterializer) isTerminalError(err error) bool {
|
||||
return acl.IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// subscribeOnce opens a new subscription to a local backend and runs
|
||||
// for its lifetime or until the view is closed.
|
||||
func (m *LocalMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error {
|
||||
|
|
|
@ -47,6 +47,9 @@ type entry struct {
|
|||
// requests is the count of active requests using this entry. This entry will
|
||||
// remain in the store as long as this count remains > 0.
|
||||
requests int
|
||||
// evicting is used to mark an entry that will be evicted when the current in-
|
||||
// flight requests finish.
|
||||
evicting bool
|
||||
}
|
||||
|
||||
// NewStore creates and returns a Store that is ready for use. The caller must
|
||||
|
@ -89,6 +92,7 @@ func (s *Store) Run(ctx context.Context) {
|
|||
|
||||
// Only stop the materializer if there are no active requests.
|
||||
if e.requests == 0 {
|
||||
s.logger.Trace("evicting item from store", "key", he.Key())
|
||||
e.stop()
|
||||
delete(s.byKey, he.Key())
|
||||
}
|
||||
|
@ -187,13 +191,13 @@ func (s *Store) NotifyCallback(
|
|||
"error", err,
|
||||
"request-type", req.Type(),
|
||||
"index", index)
|
||||
continue
|
||||
}
|
||||
|
||||
index = result.Index
|
||||
cb(ctx, cache.UpdateEvent{
|
||||
CorrelationID: correlationID,
|
||||
Result: result.Value,
|
||||
Err: err,
|
||||
Meta: cache.ResultMeta{Index: result.Index, Hit: result.Cached},
|
||||
})
|
||||
}
|
||||
|
@ -211,6 +215,9 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
|
|||
defer s.lock.Unlock()
|
||||
e, ok := s.byKey[key]
|
||||
if ok {
|
||||
if e.evicting {
|
||||
return "", nil, errors.New("item is marked for eviction")
|
||||
}
|
||||
e.requests++
|
||||
s.byKey[key] = e
|
||||
return key, e.materializer, nil
|
||||
|
@ -222,7 +229,18 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
|
|||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go mat.Run(ctx)
|
||||
go func() {
|
||||
mat.Run(ctx)
|
||||
|
||||
// Materializers run until they either reach their TTL and are evicted (which
|
||||
// cancels the given context) or encounter an irrecoverable error.
|
||||
//
|
||||
// If the context hasn't been canceled, we know it's the error case so we
|
||||
// trigger an immediate eviction.
|
||||
if ctx.Err() == nil {
|
||||
s.evictNow(key)
|
||||
}
|
||||
}()
|
||||
|
||||
e = entry{
|
||||
materializer: mat,
|
||||
|
@ -233,6 +251,28 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
|
|||
return key, e.materializer, nil
|
||||
}
|
||||
|
||||
// evictNow causes the item with the given key to be evicted immediately.
|
||||
//
|
||||
// If there are requests in-flight, the item is marked for eviction such that
|
||||
// once the requests have been served releaseEntry will move it to the top of
|
||||
// the expiry heap. If there are no requests in-flight, evictNow will move the
|
||||
// item to the top of the expiry heap itself.
|
||||
//
|
||||
// In either case, the entry's evicting flag prevents it from being served by
|
||||
// readEntry (and thereby gaining new in-flight requests).
|
||||
func (s *Store) evictNow(key string) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
e := s.byKey[key]
|
||||
e.evicting = true
|
||||
s.byKey[key] = e
|
||||
|
||||
if e.requests == 0 {
|
||||
s.expireNowLocked(key)
|
||||
}
|
||||
}
|
||||
|
||||
// releaseEntry decrements the request count and starts an expiry timer if the
|
||||
// count has reached 0. Must be called once for every call to readEntry.
|
||||
func (s *Store) releaseEntry(key string) {
|
||||
|
@ -246,6 +286,11 @@ func (s *Store) releaseEntry(key string) {
|
|||
return
|
||||
}
|
||||
|
||||
if e.evicting {
|
||||
s.expireNowLocked(key)
|
||||
return
|
||||
}
|
||||
|
||||
if e.expiry.Index() == ttlcache.NotIndexed {
|
||||
e.expiry = s.expiryHeap.Add(key, s.idleTTL)
|
||||
s.byKey[key] = e
|
||||
|
@ -255,6 +300,17 @@ func (s *Store) releaseEntry(key string) {
|
|||
s.expiryHeap.Update(e.expiry.Index(), s.idleTTL)
|
||||
}
|
||||
|
||||
// expireNowLocked moves the item with the given key to the top of the expiry
|
||||
// heap, causing it to be picked up by the expiry loop and evicted immediately.
|
||||
func (s *Store) expireNowLocked(key string) {
|
||||
e := s.byKey[key]
|
||||
if idx := e.expiry.Index(); idx != ttlcache.NotIndexed {
|
||||
s.expiryHeap.Remove(idx)
|
||||
}
|
||||
e.expiry = s.expiryHeap.Add(key, time.Duration(0))
|
||||
s.byKey[key] = e
|
||||
}
|
||||
|
||||
// makeEntryKey matches agent/cache.makeEntryKey, but may change in the future.
|
||||
func makeEntryKey(typ string, r cache.RequestInfo) string {
|
||||
return fmt.Sprintf("%s/%s/%s/%s", typ, r.Datacenter, r.Token, r.Key)
|
||||
|
|
|
@ -509,3 +509,75 @@ func TestStore_Run_ExpiresEntries(t *testing.T) {
|
|||
require.Len(t, store.byKey, 0)
|
||||
require.Equal(t, ttlcache.NotIndexed, e.expiry.Index())
|
||||
}
|
||||
|
||||
func TestStore_Run_FailingMaterializer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
store := NewStore(hclog.NewNullLogger())
|
||||
store.idleTTL = 24 * time.Hour
|
||||
go store.Run(ctx)
|
||||
|
||||
t.Run("with an in-flight request", func(t *testing.T) {
|
||||
req := &failingMaterializerRequest{
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
ch := make(chan cache.UpdateEvent)
|
||||
reqCtx, reqCancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(reqCancel)
|
||||
require.NoError(t, store.Notify(reqCtx, req, "", ch))
|
||||
|
||||
assertRequestCount(t, store, req, 1)
|
||||
|
||||
// Cause the materializer to "fail" (exit before its context is canceled).
|
||||
close(req.doneCh)
|
||||
|
||||
// End the in-flight request.
|
||||
reqCancel()
|
||||
|
||||
// Check that the item was evicted.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
store.lock.Lock()
|
||||
defer store.lock.Unlock()
|
||||
|
||||
require.Len(r, store.byKey, 0)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with no in-flight requests", func(t *testing.T) {
|
||||
req := &failingMaterializerRequest{
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Cause the materializer to "fail" (exit before its context is canceled).
|
||||
close(req.doneCh)
|
||||
|
||||
// Check that the item was evicted.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
store.lock.Lock()
|
||||
defer store.lock.Unlock()
|
||||
|
||||
require.Len(r, store.byKey, 0)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type failingMaterializerRequest struct {
|
||||
doneCh chan struct{}
|
||||
}
|
||||
|
||||
func (failingMaterializerRequest) CacheInfo() cache.RequestInfo { return cache.RequestInfo{} }
|
||||
func (failingMaterializerRequest) Type() string { return "test.FailingMaterializerRequest" }
|
||||
|
||||
func (r *failingMaterializerRequest) NewMaterializer() (Materializer, error) {
|
||||
return &failingMaterializer{doneCh: r.doneCh}, nil
|
||||
}
|
||||
|
||||
type failingMaterializer struct {
|
||||
doneCh <-chan struct{}
|
||||
}
|
||||
|
||||
func (failingMaterializer) Query(context.Context, uint64) (Result, error) { return Result{}, nil }
|
||||
|
||||
func (m *failingMaterializer) Run(context.Context) { <-m.doneCh }
|
||||
|
|
|
@ -81,6 +81,11 @@ const (
|
|||
)
|
||||
|
||||
func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discovery_v3.DeltaDiscoveryRequest) error {
|
||||
// Handle invalid ACL tokens up-front.
|
||||
if _, err := s.authenticate(stream.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Loop state
|
||||
var (
|
||||
cfgSnap *proxycfg.ConfigSnapshot
|
||||
|
@ -200,7 +205,18 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
}
|
||||
}
|
||||
|
||||
case cfgSnap = <-stateCh:
|
||||
case cs, ok := <-stateCh:
|
||||
if !ok {
|
||||
// stateCh is closed either when *we* cancel the watch (on-exit via defer)
|
||||
// or by the proxycfg.Manager when an irrecoverable error is encountered
|
||||
// such as the ACL token getting deleted.
|
||||
//
|
||||
// We know for sure that this is the latter case, because in the former we
|
||||
// would've already exited this loop.
|
||||
return status.Error(codes.Aborted, "xDS stream terminated due to an irrecoverable error, please try again")
|
||||
}
|
||||
cfgSnap = cs
|
||||
|
||||
newRes, err := generator.allResourcesFromSnapshot(cfgSnap)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "failed to generate all xDS resources from the snapshot: %v", err)
|
||||
|
|
|
@ -15,15 +15,40 @@ func TestFirstHealthyTarget(t *testing.T) {
|
|||
warning := proxycfg.TestUpstreamNodesInStatus(t, "warning")
|
||||
critical := proxycfg.TestUpstreamNodesInStatus(t, "critical")
|
||||
|
||||
warnOnlyPassingTarget := structs.NewDiscoveryTarget("all-warn", "", "default", "default", "dc1")
|
||||
warnOnlyPassingTarget := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "all-warn",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Datacenter: "dc1",
|
||||
})
|
||||
warnOnlyPassingTarget.Subset.OnlyPassing = true
|
||||
failOnlyPassingTarget := structs.NewDiscoveryTarget("all-fail", "", "default", "default", "dc1")
|
||||
failOnlyPassingTarget := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "all-fail",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Datacenter: "dc1",
|
||||
})
|
||||
failOnlyPassingTarget.Subset.OnlyPassing = true
|
||||
|
||||
targets := map[string]*structs.DiscoveryTarget{
|
||||
"all-ok.default.dc1": structs.NewDiscoveryTarget("all-ok", "", "default", "default", "dc1"),
|
||||
"all-warn.default.dc1": structs.NewDiscoveryTarget("all-warn", "", "default", "default", "dc1"),
|
||||
"all-fail.default.default.dc1": structs.NewDiscoveryTarget("all-fail", "", "default", "default", "dc1"),
|
||||
"all-ok.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "all-ok",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Datacenter: "dc1",
|
||||
}),
|
||||
"all-warn.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "all-warn",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Datacenter: "dc1",
|
||||
}),
|
||||
"all-fail.default.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "all-fail",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Datacenter: "dc1",
|
||||
}),
|
||||
"all-warn-onlypassing.default.dc1": warnOnlyPassingTarget,
|
||||
"all-fail-onlypassing.default.dc1": failOnlyPassingTarget,
|
||||
}
|
||||
|
|
|
@ -1214,16 +1214,38 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot
|
|||
filterOpts.forwardClientPolicy = envoy_http_v3.HttpConnectionManager_APPEND_FORWARD
|
||||
}
|
||||
}
|
||||
|
||||
// If an inbound connect limit is set, inject a connection limit filter on each chain.
|
||||
if cfg.MaxInboundConnections > 0 {
|
||||
connectionLimitFilter, err := makeConnectionLimitFilter(cfg.MaxInboundConnections)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.FilterChains = []*envoy_listener_v3.FilterChain{
|
||||
{
|
||||
Filters: []*envoy_listener_v3.Filter{
|
||||
connectionLimitFilter,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
filter, err := makeListenerFilter(filterOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.FilterChains = []*envoy_listener_v3.FilterChain{
|
||||
{
|
||||
Filters: []*envoy_listener_v3.Filter{
|
||||
filter,
|
||||
|
||||
if len(l.FilterChains) > 0 {
|
||||
// The list of FilterChains has already been initialized
|
||||
l.FilterChains[0].Filters = append(l.FilterChains[0].Filters, filter)
|
||||
} else {
|
||||
l.FilterChains = []*envoy_listener_v3.FilterChain{
|
||||
{
|
||||
Filters: []*envoy_listener_v3.Filter{
|
||||
filter,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
err = s.finalizePublicListenerFromConfig(l, cfgSnap, cfg, useHTTPFilter)
|
||||
|
@ -1249,17 +1271,6 @@ func (s *ResourceGenerator) finalizePublicListenerFromConfig(l *envoy_listener_v
|
|||
return nil
|
||||
}
|
||||
|
||||
// If an inbound connect limit is set, inject a connection limit filter on each chain.
|
||||
if proxyCfg.MaxInboundConnections > 0 {
|
||||
filter, err := makeConnectionLimitFilter(proxyCfg.MaxInboundConnections)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
for idx := range l.FilterChains {
|
||||
l.FilterChains[idx].Filters = append(l.FilterChains[idx].Filters, filter)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1990,6 +2001,7 @@ func makeTCPProxyFilter(filterName, cluster, statPrefix string) (*envoy_listener
|
|||
|
||||
func makeConnectionLimitFilter(limit int) (*envoy_listener_v3.Filter, error) {
|
||||
cfg := &envoy_connection_limit_v3.ConnectionLimit{
|
||||
StatPrefix: "inbound_connection_limit",
|
||||
MaxConnections: wrapperspb.UInt64(uint64(limit)),
|
||||
}
|
||||
return makeFilter("envoy.filters.network.connection_limit", cfg)
|
||||
|
|
|
@ -186,6 +186,18 @@ func (s *Server) Register(srv *grpc.Server) {
|
|||
envoy_discovery_v3.RegisterAggregatedDiscoveryServiceServer(srv, s)
|
||||
}
|
||||
|
||||
func (s *Server) authenticate(ctx context.Context) (acl.Authorizer, error) {
|
||||
authz, err := s.ResolveToken(external.TokenFromContext(ctx))
|
||||
if acl.IsErrNotFound(err) {
|
||||
return nil, status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err)
|
||||
} else if acl.IsErrPermissionDenied(err) {
|
||||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
} else if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error resolving acl token: %v", err)
|
||||
}
|
||||
return authz, nil
|
||||
}
|
||||
|
||||
// authorize the xDS request using the token stored in ctx. This authorization is
|
||||
// a bit different from most interfaces. Instead of explicitly authorizing or
|
||||
// filtering each piece of data in the response, the request is authorized
|
||||
|
@ -201,13 +213,9 @@ func (s *Server) authorize(ctx context.Context, cfgSnap *proxycfg.ConfigSnapshot
|
|||
return status.Errorf(codes.Unauthenticated, "unauthenticated: no config snapshot")
|
||||
}
|
||||
|
||||
authz, err := s.ResolveToken(external.TokenFromContext(ctx))
|
||||
if acl.IsErrNotFound(err) {
|
||||
return status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err)
|
||||
} else if acl.IsErrPermissionDenied(err) {
|
||||
return status.Error(codes.PermissionDenied, err.Error())
|
||||
} else if err != nil {
|
||||
return status.Errorf(codes.Internal, "error resolving acl token: %v", err)
|
||||
authz, err := s.authenticate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var authzContext acl.AuthorizerContext
|
||||
|
|
|
@ -73,6 +73,14 @@
|
|||
"statPrefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.connection_limit",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit",
|
||||
"statPrefix": "inbound_connection_limit",
|
||||
"maxConnections": "222"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.tcp_proxy",
|
||||
"typedConfig": {
|
||||
|
@ -80,13 +88,6 @@
|
|||
"statPrefix": "public_listener",
|
||||
"cluster": "local_app"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.connection_limit",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit",
|
||||
"maxConnections": "222"
|
||||
}
|
||||
}
|
||||
],
|
||||
"transportSocket": {
|
||||
|
|
|
@ -218,21 +218,22 @@ type UpstreamLimits struct {
|
|||
}
|
||||
|
||||
type ServiceConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Partition string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Protocol string `json:",omitempty"`
|
||||
Mode ProxyMode `json:",omitempty"`
|
||||
TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"`
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
|
||||
Expose ExposeConfig `json:",omitempty"`
|
||||
ExternalSNI string `json:",omitempty" alias:"external_sni"`
|
||||
UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"`
|
||||
Destination *DestinationConfig `json:",omitempty"`
|
||||
Meta map[string]string `json:",omitempty"`
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
Kind string
|
||||
Name string
|
||||
Partition string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Protocol string `json:",omitempty"`
|
||||
Mode ProxyMode `json:",omitempty"`
|
||||
TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"`
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
|
||||
Expose ExposeConfig `json:",omitempty"`
|
||||
ExternalSNI string `json:",omitempty" alias:"external_sni"`
|
||||
UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"`
|
||||
Destination *DestinationConfig `json:",omitempty"`
|
||||
MaxInboundConnections int `json:",omitempty" alias:"max_inbound_connections"`
|
||||
Meta map[string]string `json:",omitempty"`
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
func (s *ServiceConfigEntry) GetKind() string { return s.Kind }
|
||||
|
|
|
@ -225,8 +225,18 @@ type ServiceResolverFailover struct {
|
|||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty" alias:"service_subset"`
|
||||
// Referencing other partitions is not supported.
|
||||
Namespace string `json:",omitempty"`
|
||||
Datacenters []string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Datacenters []string `json:",omitempty"`
|
||||
Targets []ServiceResolverFailoverTarget `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ServiceResolverFailoverTarget struct {
|
||||
Service string `json:",omitempty"`
|
||||
ServiceSubset string `json:",omitempty" alias:"service_subset"`
|
||||
Partition string `json:",omitempty"`
|
||||
Namespace string `json:",omitempty"`
|
||||
Datacenter string `json:",omitempty"`
|
||||
Peer string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// LoadBalancer determines the load balancing policy and configuration for services
|
||||
|
|
|
@ -149,6 +149,9 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
"v2": {
|
||||
Filter: "Service.Meta.version == v2",
|
||||
},
|
||||
"v3": {
|
||||
Filter: "Service.Meta.version == v3",
|
||||
},
|
||||
},
|
||||
Failover: map[string]ServiceResolverFailover{
|
||||
"*": {
|
||||
|
@ -158,6 +161,13 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
|||
Service: "alternate",
|
||||
Namespace: splitDefaultNamespace,
|
||||
},
|
||||
"v3": {
|
||||
Targets: []ServiceResolverFailoverTarget{
|
||||
{Peer: "cluster-01"},
|
||||
{Datacenter: "dc1"},
|
||||
{Service: "another-service", ServiceSubset: "v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
ConnectTimeout: 5 * time.Second,
|
||||
Meta: map[string]string{
|
||||
|
|
|
@ -104,6 +104,7 @@ func TestAPI_ConfigEntries(t *testing.T) {
|
|||
"foo": "bar",
|
||||
"gir": "zim",
|
||||
},
|
||||
MaxInboundConnections: 5,
|
||||
}
|
||||
|
||||
dest := &DestinationConfig{
|
||||
|
@ -144,6 +145,7 @@ func TestAPI_ConfigEntries(t *testing.T) {
|
|||
require.Equal(t, service.Protocol, readService.Protocol)
|
||||
require.Equal(t, service.Meta, readService.Meta)
|
||||
require.Equal(t, service.Meta, readService.GetMeta())
|
||||
require.Equal(t, service.MaxInboundConnections, readService.MaxInboundConnections)
|
||||
|
||||
// update it
|
||||
service.Protocol = "tcp"
|
||||
|
|
|
@ -232,7 +232,7 @@ func LookupProxyIDForSidecar(client *api.Client, sidecarFor string) (string, err
|
|||
var proxyIDs []string
|
||||
for _, svc := range svcs {
|
||||
if svc.Kind == api.ServiceKindConnectProxy && svc.Proxy != nil &&
|
||||
strings.ToLower(svc.Proxy.DestinationServiceID) == sidecarFor {
|
||||
strings.EqualFold(svc.Proxy.DestinationServiceID, sidecarFor) {
|
||||
proxyIDs = append(proxyIDs, svc.ID)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,6 +110,17 @@ func TestCommandConfigWatcher(t *testing.T) {
|
|||
require.Equal(t, 9999, cfg.PublicListener.BindPort)
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Name: "-sidecar-for, one sidecar case-insensitive",
|
||||
Flags: []string{
|
||||
"-sidecar-for", "One-SideCar",
|
||||
},
|
||||
Test: func(t *testing.T, cfg *proxy.Config) {
|
||||
// Sanity check we got the right instance.
|
||||
require.Equal(t, 9999, cfg.PublicListener.BindPort)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
|
|
|
@ -99,6 +99,32 @@ func (c *cmd) Run(args []string) int {
|
|||
}
|
||||
|
||||
switch {
|
||||
case c.keys && c.recurse:
|
||||
pairs, _, err := client.KV().List(key, &api.QueryOptions{
|
||||
AllowStale: c.http.Stale(),
|
||||
})
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error querying Consul agent: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
for i, pair := range pairs {
|
||||
if c.detailed {
|
||||
var b bytes.Buffer
|
||||
if err := prettyKVPair(&b, pair, false, true); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error rendering KV key: %s", err))
|
||||
return 1
|
||||
}
|
||||
c.UI.Info(b.String())
|
||||
|
||||
if i < len(pairs)-1 {
|
||||
c.UI.Info("")
|
||||
}
|
||||
} else {
|
||||
c.UI.Info(fmt.Sprintf("%s", pair.Key))
|
||||
}
|
||||
}
|
||||
return 0
|
||||
case c.keys:
|
||||
keys, _, err := client.KV().Keys(key, c.separator, &api.QueryOptions{
|
||||
AllowStale: c.http.Stale(),
|
||||
|
@ -125,7 +151,7 @@ func (c *cmd) Run(args []string) int {
|
|||
for i, pair := range pairs {
|
||||
if c.detailed {
|
||||
var b bytes.Buffer
|
||||
if err := prettyKVPair(&b, pair, c.base64encode); err != nil {
|
||||
if err := prettyKVPair(&b, pair, c.base64encode, false); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error rendering KV pair: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
@ -161,7 +187,7 @@ func (c *cmd) Run(args []string) int {
|
|||
|
||||
if c.detailed {
|
||||
var b bytes.Buffer
|
||||
if err := prettyKVPair(&b, pair, c.base64encode); err != nil {
|
||||
if err := prettyKVPair(&b, pair, c.base64encode, false); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error rendering KV pair: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
@ -187,7 +213,7 @@ func (c *cmd) Help() string {
|
|||
return c.help
|
||||
}
|
||||
|
||||
func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool) error {
|
||||
func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool, keysOnly bool) error {
|
||||
tw := tabwriter.NewWriter(w, 0, 2, 6, ' ', 0)
|
||||
fmt.Fprintf(tw, "CreateIndex\t%d\n", pair.CreateIndex)
|
||||
fmt.Fprintf(tw, "Flags\t%d\n", pair.Flags)
|
||||
|
@ -205,9 +231,9 @@ func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool) error {
|
|||
if pair.Namespace != "" {
|
||||
fmt.Fprintf(tw, "Namespace\t%s\n", pair.Namespace)
|
||||
}
|
||||
if base64EncodeValue {
|
||||
if !keysOnly && base64EncodeValue {
|
||||
fmt.Fprintf(tw, "Value\t%s", base64.StdEncoding.EncodeToString(pair.Value))
|
||||
} else {
|
||||
} else if !keysOnly {
|
||||
fmt.Fprintf(tw, "Value\t%s", pair.Value)
|
||||
}
|
||||
return tw.Flush()
|
||||
|
|
|
@ -418,3 +418,102 @@ func TestKVGetCommand_DetailedBase64(t *testing.T) {
|
|||
t.Fatalf("bad %#v, value is not base64 encoded", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVGetCommand_KeysRecurse(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
a := agent.NewTestAgent(t, ``)
|
||||
defer a.Shutdown()
|
||||
client := a.Client()
|
||||
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
keys := map[string]string{
|
||||
"foo/": "",
|
||||
"foo/a": "Hello World 2",
|
||||
"foo1/a": "Hello World 1",
|
||||
}
|
||||
for k, v := range keys {
|
||||
var pair *api.KVPair
|
||||
switch v {
|
||||
case "":
|
||||
pair = &api.KVPair{Key: k, Value: nil}
|
||||
default:
|
||||
pair = &api.KVPair{Key: k, Value: []byte(v)}
|
||||
}
|
||||
if _, err := client.KV().Put(pair, nil); err != nil {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
}
|
||||
args := []string{
|
||||
"-http-addr=" + a.HTTPAddr(),
|
||||
"-recurse",
|
||||
"-keys",
|
||||
"foo",
|
||||
}
|
||||
|
||||
code := c.Run(args)
|
||||
if code != 0 {
|
||||
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||
}
|
||||
output := ui.OutputWriter.String()
|
||||
for key, value := range keys {
|
||||
if !strings.Contains(output, key) {
|
||||
t.Fatalf("bad %#v missing %q", output, key)
|
||||
}
|
||||
if strings.Contains(output, key+":"+value) {
|
||||
t.Fatalf("bad %#v expected no values for keys %q but received %q", output, key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestKVGetCommand_DetailedKeysRecurse(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
a := agent.NewTestAgent(t, ``)
|
||||
defer a.Shutdown()
|
||||
client := a.Client()
|
||||
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
keys := map[string]string{
|
||||
"foo/": "",
|
||||
"foo/a": "Hello World 2",
|
||||
"foo1/a": "Hello World 1",
|
||||
}
|
||||
for k, v := range keys {
|
||||
var pair *api.KVPair
|
||||
switch v {
|
||||
case "":
|
||||
pair = &api.KVPair{Key: k, Value: nil}
|
||||
default:
|
||||
pair = &api.KVPair{Key: k, Value: []byte(v)}
|
||||
}
|
||||
if _, err := client.KV().Put(pair, nil); err != nil {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
}
|
||||
args := []string{
|
||||
"-http-addr=" + a.HTTPAddr(),
|
||||
"-recurse",
|
||||
"-keys",
|
||||
"-detailed",
|
||||
"foo",
|
||||
}
|
||||
|
||||
code := c.Run(args)
|
||||
if code != 0 {
|
||||
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||
}
|
||||
output := ui.OutputWriter.String()
|
||||
for key, value := range keys {
|
||||
if value != "" && strings.Contains(output, value) {
|
||||
t.Fatalf("bad %#v expected no values for keys %q but received %q", output, key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
4
go.mod
4
go.mod
|
@ -53,7 +53,7 @@ require (
|
|||
github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267
|
||||
github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493
|
||||
github.com/imdario/mergo v0.3.6
|
||||
github.com/imdario/mergo v0.3.13
|
||||
github.com/kr/text v0.2.0
|
||||
github.com/miekg/dns v1.1.41
|
||||
github.com/mitchellh/cli v1.1.0
|
||||
|
@ -183,7 +183,7 @@ require (
|
|||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/resty.v1 v1.12.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0 // indirect
|
||||
k8s.io/klog v1.0.0 // indirect
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect
|
||||
|
|
6
go.sum
6
go.sum
|
@ -396,8 +396,9 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
|
|||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
||||
github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
|
@ -969,8 +970,9 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
|
@ -401,12 +401,17 @@ type GetEnvoyBootstrapParamsResponse struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ServiceKind ServiceKind `protobuf:"varint,1,opt,name=service_kind,json=serviceKind,proto3,enum=hashicorp.consul.dataplane.ServiceKind" json:"service_kind,omitempty"`
|
||||
// The destination service name
|
||||
// service is be used to identify the service (as the local cluster name and
|
||||
// in metric tags). If the service is a connect proxy it will be the name of
|
||||
// the proxy's destination service, for gateways it will be the gateway
|
||||
// service's name.
|
||||
Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"`
|
||||
Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Partition string `protobuf:"bytes,4,opt,name=partition,proto3" json:"partition,omitempty"`
|
||||
Datacenter string `protobuf:"bytes,5,opt,name=datacenter,proto3" json:"datacenter,omitempty"`
|
||||
Config *structpb.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"`
|
||||
NodeId string `protobuf:"bytes,7,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
NodeName string `protobuf:"bytes,8,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetEnvoyBootstrapParamsResponse) Reset() {
|
||||
|
@ -483,6 +488,20 @@ func (x *GetEnvoyBootstrapParamsResponse) GetConfig() *structpb.Struct {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (x *GetEnvoyBootstrapParamsResponse) GetNodeId() string {
|
||||
if x != nil {
|
||||
return x.NodeId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *GetEnvoyBootstrapParamsResponse) GetNodeName() string {
|
||||
if x != nil {
|
||||
return x.NodeName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_proto_public_pbdataplane_dataplane_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{
|
||||
|
@ -525,7 +544,7 @@ var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{
|
|||
0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
|
||||
0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
|
||||
0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x94,
|
||||
0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0xca,
|
||||
0x02, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73,
|
||||
0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6b, 0x69,
|
||||
|
@ -543,69 +562,73 @@ var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{
|
|||
0x6e, 0x74, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63,
|
||||
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a, 0xc7, 0x01, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c,
|
||||
0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x44,
|
||||
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64,
|
||||
0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1b,
|
||||
0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x2a, 0xc7, 0x01, 0x0a, 0x11,
|
||||
0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
|
||||
0x73, 0x12, 0x22, 0x0a, 0x1e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46,
|
||||
0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
|
||||
0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41,
|
||||
0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x57, 0x41, 0x54, 0x43,
|
||||
0x48, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x32, 0x0a, 0x2e, 0x44,
|
||||
0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45,
|
||||
0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
|
||||
0x24, 0x0a, 0x20, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41,
|
||||
0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x57, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x45, 0x52, 0x56,
|
||||
0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x32, 0x0a, 0x2e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41,
|
||||
0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x44, 0x47, 0x45,
|
||||
0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e,
|
||||
0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x34, 0x0a, 0x30, 0x44, 0x41, 0x54,
|
||||
0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f,
|
||||
0x45, 0x4e, 0x56, 0x4f, 0x59, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f,
|
||||
0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a,
|
||||
0xcc, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12,
|
||||
0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f,
|
||||
0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a,
|
||||
0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x59,
|
||||
0x50, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49,
|
||||
0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f,
|
||||
0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49,
|
||||
0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4d, 0x45, 0x53, 0x48, 0x5f, 0x47, 0x41, 0x54,
|
||||
0x45, 0x57, 0x41, 0x59, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43,
|
||||
0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49,
|
||||
0x4e, 0x47, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c,
|
||||
0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x47,
|
||||
0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x05, 0x32, 0xd2,
|
||||
0x02, 0x0a, 0x10, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76,
|
||||
0x69, 0x63, 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f,
|
||||
0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61,
|
||||
0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x40, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
|
||||
0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61,
|
||||
0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44,
|
||||
0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70,
|
||||
0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65,
|
||||
0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
|
||||
0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x94, 0x01, 0x0a,
|
||||
0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
|
||||
0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
|
||||
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61,
|
||||
0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f,
|
||||
0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
|
||||
0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e,
|
||||
0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74,
|
||||
0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x00, 0x42, 0xf0, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74,
|
||||
0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e,
|
||||
0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63,
|
||||
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c,
|
||||
0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xa2, 0x02,
|
||||
0x03, 0x48, 0x43, 0x44, 0xaa, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
|
||||
0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e,
|
||||
0x65, 0xca, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f,
|
||||
0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xe2, 0x02,
|
||||
0x26, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75,
|
||||
0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d,
|
||||
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x44, 0x61, 0x74,
|
||||
0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x53, 0x5f, 0x45, 0x44, 0x47, 0x45, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41,
|
||||
0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12,
|
||||
0x34, 0x0a, 0x30, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41,
|
||||
0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x4e, 0x56, 0x4f, 0x59, 0x5f, 0x42, 0x4f, 0x4f, 0x54,
|
||||
0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54,
|
||||
0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a, 0xcc, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
||||
0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45,
|
||||
0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
|
||||
0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b,
|
||||
0x49, 0x4e, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1e, 0x0a,
|
||||
0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f,
|
||||
0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a,
|
||||
0x19, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4d, 0x45,
|
||||
0x53, 0x48, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20,
|
||||
0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x45, 0x52,
|
||||
0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59,
|
||||
0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49,
|
||||
0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57,
|
||||
0x41, 0x59, 0x10, 0x05, 0x32, 0xd2, 0x02, 0x0a, 0x10, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61,
|
||||
0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x1d, 0x47, 0x65,
|
||||
0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c,
|
||||
0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x40, 0x2e, 0x68, 0x61,
|
||||
0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64,
|
||||
0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70,
|
||||
0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65,
|
||||
0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e,
|
||||
0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
|
||||
0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75,
|
||||
0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65,
|
||||
0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x00, 0x12, 0x94, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42,
|
||||
0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3a,
|
||||
0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75,
|
||||
0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45,
|
||||
0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72,
|
||||
0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x68, 0x61, 0x73,
|
||||
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61,
|
||||
0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79,
|
||||
0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xf0, 0x01, 0x0a, 0x1e, 0x63, 0x6f,
|
||||
0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x0e, 0x44, 0x61,
|
||||
0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69,
|
||||
0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70,
|
||||
0x6c, 0x61, 0x6e, 0x65, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x44, 0xaa, 0x02, 0x1a, 0x48, 0x61, 0x73,
|
||||
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x44, 0x61,
|
||||
0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xca, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70,
|
||||
0x6c, 0x61, 0x6e, 0x65, 0xe2, 0x02, 0x26, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
|
||||
0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e,
|
||||
0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c,
|
||||
0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75,
|
||||
0x6c, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
@ -68,12 +68,17 @@ enum ServiceKind {
|
|||
|
||||
message GetEnvoyBootstrapParamsResponse {
|
||||
ServiceKind service_kind = 1;
|
||||
// The destination service name
|
||||
// service is be used to identify the service (as the local cluster name and
|
||||
// in metric tags). If the service is a connect proxy it will be the name of
|
||||
// the proxy's destination service, for gateways it will be the gateway
|
||||
// service's name.
|
||||
string service = 2;
|
||||
string namespace = 3;
|
||||
string partition = 4;
|
||||
string datacenter = 5;
|
||||
google.protobuf.Struct config = 6;
|
||||
string node_id = 7;
|
||||
string node_name = 8;
|
||||
}
|
||||
|
||||
service DataplaneService {
|
||||
|
|
|
@ -630,6 +630,14 @@ func ServiceResolverFailoverToStructs(s *ServiceResolverFailover, t *structs.Ser
|
|||
t.ServiceSubset = s.ServiceSubset
|
||||
t.Namespace = s.Namespace
|
||||
t.Datacenters = s.Datacenters
|
||||
{
|
||||
t.Targets = make([]structs.ServiceResolverFailoverTarget, len(s.Targets))
|
||||
for i := range s.Targets {
|
||||
if s.Targets[i] != nil {
|
||||
ServiceResolverFailoverTargetToStructs(s.Targets[i], &t.Targets[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func ServiceResolverFailoverFromStructs(t *structs.ServiceResolverFailover, s *ServiceResolverFailover) {
|
||||
if s == nil {
|
||||
|
@ -639,6 +647,38 @@ func ServiceResolverFailoverFromStructs(t *structs.ServiceResolverFailover, s *S
|
|||
s.ServiceSubset = t.ServiceSubset
|
||||
s.Namespace = t.Namespace
|
||||
s.Datacenters = t.Datacenters
|
||||
{
|
||||
s.Targets = make([]*ServiceResolverFailoverTarget, len(t.Targets))
|
||||
for i := range t.Targets {
|
||||
{
|
||||
var x ServiceResolverFailoverTarget
|
||||
ServiceResolverFailoverTargetFromStructs(&t.Targets[i], &x)
|
||||
s.Targets[i] = &x
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func ServiceResolverFailoverTargetToStructs(s *ServiceResolverFailoverTarget, t *structs.ServiceResolverFailoverTarget) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
t.Service = s.Service
|
||||
t.ServiceSubset = s.ServiceSubset
|
||||
t.Partition = s.Partition
|
||||
t.Namespace = s.Namespace
|
||||
t.Datacenter = s.Datacenter
|
||||
t.Peer = s.Peer
|
||||
}
|
||||
func ServiceResolverFailoverTargetFromStructs(t *structs.ServiceResolverFailoverTarget, s *ServiceResolverFailoverTarget) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.Service = t.Service
|
||||
s.ServiceSubset = t.ServiceSubset
|
||||
s.Partition = t.Partition
|
||||
s.Namespace = t.Namespace
|
||||
s.Datacenter = t.Datacenter
|
||||
s.Peer = t.Peer
|
||||
}
|
||||
func ServiceResolverRedirectToStructs(s *ServiceResolverRedirect, t *structs.ServiceResolverRedirect) {
|
||||
if s == nil {
|
||||
|
|
|
@ -107,6 +107,16 @@ func (msg *ServiceResolverFailover) UnmarshalBinary(b []byte) error {
|
|||
return proto.Unmarshal(b, msg)
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (msg *ServiceResolverFailoverTarget) MarshalBinary() ([]byte, error) {
|
||||
return proto.Marshal(msg)
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||
func (msg *ServiceResolverFailoverTarget) UnmarshalBinary(b []byte) error {
|
||||
return proto.Unmarshal(b, msg)
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (msg *LoadBalancer) MarshalBinary() ([]byte, error) {
|
||||
return proto.Marshal(msg)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -134,6 +134,21 @@ message ServiceResolverFailover {
|
|||
string ServiceSubset = 2;
|
||||
string Namespace = 3;
|
||||
repeated string Datacenters = 4;
|
||||
repeated ServiceResolverFailoverTarget Targets = 5;
|
||||
}
|
||||
|
||||
// mog annotation:
|
||||
//
|
||||
// target=github.com/hashicorp/consul/agent/structs.ServiceResolverFailoverTarget
|
||||
// output=config_entry.gen.go
|
||||
// name=Structs
|
||||
message ServiceResolverFailoverTarget {
|
||||
string Service = 1;
|
||||
string ServiceSubset = 2;
|
||||
string Partition = 3;
|
||||
string Namespace = 4;
|
||||
string Datacenter = 5;
|
||||
string Peer = 6;
|
||||
}
|
||||
|
||||
// mog annotation:
|
||||
|
|
|
@ -143,10 +143,10 @@ func PeeringStateFromAPI(t api.PeeringState) PeeringState {
|
|||
}
|
||||
|
||||
func (p *Peering) IsActive() bool {
|
||||
if p != nil && p.State == PeeringState_TERMINATED {
|
||||
if p == nil || p.State == PeeringState_TERMINATED {
|
||||
return false
|
||||
}
|
||||
if p == nil || p.DeletedAt == nil {
|
||||
if p.DeletedAt == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -102,6 +102,10 @@ type ProtocolConfig struct {
|
|||
//
|
||||
// Note: this setting only applies to the Internal RPC configuration.
|
||||
VerifyServerHostname bool
|
||||
|
||||
// UseAutoCert is used to enable usage of auto_encrypt/auto_config generated
|
||||
// certificate & key material on external gRPC listener.
|
||||
UseAutoCert bool
|
||||
}
|
||||
|
||||
// Config configures the Configurator.
|
||||
|
@ -167,6 +171,10 @@ type protocolConfig struct {
|
|||
// combinedCAPool is a pool containing both manualCAPEMs and the certificates
|
||||
// received from auto-config/auto-encrypt.
|
||||
combinedCAPool *x509.CertPool
|
||||
|
||||
// useAutoCert indicates wether we should use auto-encrypt/config data
|
||||
// for TLS server/listener. NOTE: Only applies to external GRPC Server.
|
||||
useAutoCert bool
|
||||
}
|
||||
|
||||
// Configurator provides tls.Config and net.Dial wrappers to enable TLS for
|
||||
|
@ -323,6 +331,7 @@ func (c *Configurator) loadProtocolConfig(base Config, pc ProtocolConfig) (*prot
|
|||
manualCAPEMs: pems,
|
||||
manualCAPool: manualPool,
|
||||
combinedCAPool: combinedPool,
|
||||
useAutoCert: pc.UseAutoCert,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -620,16 +629,15 @@ func (c *Configurator) Cert() *tls.Certificate {
|
|||
return cert
|
||||
}
|
||||
|
||||
// GRPCTLSConfigured returns whether there's a TLS certificate configured for
|
||||
// gRPC (either manually or by auto-config/auto-encrypt). It is checked, along
|
||||
// with the presence of an HTTPS port, to determine whether to enable TLS on
|
||||
// incoming gRPC connections.
|
||||
// GRPCServerUseTLS returns whether there's a TLS certificate configured for
|
||||
// (external) gRPC (either manually or by auto-config/auto-encrypt), and use
|
||||
// of TLS for gRPC has not been explicitly disabled at auto-encrypt.
|
||||
//
|
||||
// This function acquires a read lock because it reads from the config.
|
||||
func (c *Configurator) GRPCTLSConfigured() bool {
|
||||
func (c *Configurator) GRPCServerUseTLS() bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.grpc.cert != nil || c.autoTLS.cert != nil
|
||||
return c.grpc.cert != nil || (c.grpc.useAutoCert && c.autoTLS.cert != nil)
|
||||
}
|
||||
|
||||
// VerifyIncomingRPC returns true if we should verify incoming connnections to
|
||||
|
|
|
@ -1465,7 +1465,7 @@ func TestConfigurator_AuthorizeInternalRPCServerConn(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestConfigurator_GRPCTLSConfigured(t *testing.T) {
|
||||
func TestConfigurator_GRPCServerUseTLS(t *testing.T) {
|
||||
t.Run("certificate manually configured", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{
|
||||
GRPC: ProtocolConfig{
|
||||
|
@ -1473,22 +1473,47 @@ func TestConfigurator_GRPCTLSConfigured(t *testing.T) {
|
|||
KeyFile: "../test/hostname/Alice.key",
|
||||
},
|
||||
})
|
||||
require.True(t, c.GRPCTLSConfigured())
|
||||
require.True(t, c.GRPCServerUseTLS())
|
||||
})
|
||||
|
||||
t.Run("AutoTLS", func(t *testing.T) {
|
||||
t.Run("no certificate", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{})
|
||||
require.False(t, c.GRPCServerUseTLS())
|
||||
})
|
||||
|
||||
t.Run("AutoTLS (default)", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{})
|
||||
|
||||
bobCert := loadFile(t, "../test/hostname/Bob.crt")
|
||||
bobKey := loadFile(t, "../test/hostname/Bob.key")
|
||||
require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey))
|
||||
|
||||
require.True(t, c.GRPCTLSConfigured())
|
||||
require.False(t, c.GRPCServerUseTLS())
|
||||
})
|
||||
|
||||
t.Run("no certificate", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{})
|
||||
require.False(t, c.GRPCTLSConfigured())
|
||||
t.Run("AutoTLS w/ UseAutoCert Disabled", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{
|
||||
GRPC: ProtocolConfig{
|
||||
UseAutoCert: false,
|
||||
},
|
||||
})
|
||||
|
||||
bobCert := loadFile(t, "../test/hostname/Bob.crt")
|
||||
bobKey := loadFile(t, "../test/hostname/Bob.key")
|
||||
require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey))
|
||||
require.False(t, c.GRPCServerUseTLS())
|
||||
})
|
||||
|
||||
t.Run("AutoTLS w/ UseAutoCert Enabled", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{
|
||||
GRPC: ProtocolConfig{
|
||||
UseAutoCert: true,
|
||||
},
|
||||
})
|
||||
|
||||
bobCert := loadFile(t, "../test/hostname/Bob.crt")
|
||||
bobKey := loadFile(t, "../test/hostname/Bob.key")
|
||||
require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey))
|
||||
require.True(t, c.GRPCServerUseTLS())
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ node_modules
|
|||
.pnp*
|
||||
.sass-cache
|
||||
.DS_Store
|
||||
.tool-versions
|
||||
connect.lock
|
||||
coverage
|
||||
coverage_*
|
||||
|
|
|
@ -95,7 +95,7 @@
|
|||
}
|
||||
|
||||
%composite-row-detail .policy::before {
|
||||
@extend %with-file-fill-mask, %as-pseudo;
|
||||
@extend %with-file-text-mask, %as-pseudo;
|
||||
margin-right: 3px;
|
||||
}
|
||||
%composite-row-detail .role::before {
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
.consul-external-source {
|
||||
@extend %pill-200, %frame-gray-600, %p1;
|
||||
}
|
||||
|
||||
.consul-external-source::before {
|
||||
--icon-size: icon-300;
|
||||
}
|
||||
|
||||
.consul-external-source.kubernetes::before {
|
||||
@extend %with-logo-kubernetes-color-icon, %as-pseudo;
|
||||
}
|
||||
|
@ -15,10 +20,10 @@
|
|||
@extend %with-logo-consul-color-icon, %as-pseudo;
|
||||
}
|
||||
.consul-external-source.vault::before {
|
||||
@extend %with-vault-100;
|
||||
@extend %with-vault-300;
|
||||
}
|
||||
.consul-external-source.aws::before {
|
||||
@extend %with-aws-100;
|
||||
@extend %with-aws-300;
|
||||
}
|
||||
.consul-external-source.leader::before {
|
||||
@extend %with-star-outline-mask, %as-pseudo;
|
||||
|
|
|
@ -3,4 +3,5 @@
|
|||
}
|
||||
.consul-kind::before {
|
||||
@extend %with-gateway-mask, %as-pseudo;
|
||||
--icon-size: icon-300;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,9 @@ span.policy-node-identity::before {
|
|||
span.policy-service-identity::before {
|
||||
content: 'Service Identity: ';
|
||||
}
|
||||
%pill::before {
|
||||
--icon-size: icon-300;
|
||||
}
|
||||
%pill.leader::before {
|
||||
@extend %with-star-outline-mask, %as-pseudo;
|
||||
}
|
||||
|
|
|
@ -330,7 +330,7 @@
|
|||
// @import './file-minus/index.scss';
|
||||
// @import './file-plus/index.scss';
|
||||
// @import './file-source/index.scss';
|
||||
// @import './file-text/index.scss';
|
||||
@import './file-text/index.scss';
|
||||
// @import './file-x/index.scss';
|
||||
// @import './files/index.scss';
|
||||
// @import './film/index.scss';
|
||||
|
|
|
@ -6,7 +6,10 @@ description: The /agent/check endpoints interact with checks on the local agent
|
|||
|
||||
# Check - Agent HTTP API
|
||||
|
||||
The `/agent/check` endpoints interact with checks on the local agent in Consul.
|
||||
Consul's health check capabilities are described in the
|
||||
[health checks overview](/docs/discovery/checks).
|
||||
The `/agent/check` endpoints interact with health checks
|
||||
managed by the local agent in Consul.
|
||||
These should not be confused with checks in the catalog.
|
||||
|
||||
## List Checks
|
||||
|
@ -420,6 +423,10 @@ $ curl \
|
|||
This endpoint is used with a TTL type check to set the status of the check to
|
||||
`critical` and to reset the TTL clock.
|
||||
|
||||
If you want to manually mark a service as unhealthy,
|
||||
use [maintenance mode](/api-docs/agent#enable-maintenance-mode)
|
||||
instead of defining a TTL health check and using this endpoint.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ----------------------------- | ------------------ |
|
||||
| `PUT` | `/agent/check/fail/:check_id` | `application/json` |
|
||||
|
@ -458,6 +465,10 @@ $ curl \
|
|||
This endpoint is used with a TTL type check to set the status of the check and
|
||||
to reset the TTL clock.
|
||||
|
||||
If you want to manually mark a service as unhealthy,
|
||||
use [maintenance mode](/api-docs/agent#enable-maintenance-mode)
|
||||
instead of defining a TTL health check and using this endpoint.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ------------------------------- | ------------------ |
|
||||
| `PUT` | `/agent/check/update/:check_id` | `application/json` |
|
||||
|
|
|
@ -410,13 +410,64 @@ The corresponding CLI command is [`consul catalog services`](/commands/catalog/s
|
|||
- `dc` `(string: "")` - Specifies the datacenter to query. This will default to
|
||||
the datacenter of the agent being queried.
|
||||
|
||||
- `node-meta` `(string: "")` - Specifies a desired node metadata key/value pair
|
||||
- `node-meta` `(string: "")` **Deprecated** - Use `filter` with the `NodeMeta` selector instead.
|
||||
This parameter will be removed in a future version of Consul.
|
||||
Specifies a desired node metadata key/value pair
|
||||
of the form `key:value`. This parameter can be specified multiple times, and
|
||||
filters the results to nodes with the specified key/value pairs.
|
||||
|
||||
- `ns` `(string: "")` <EnterpriseAlert inline /> - Specifies the namespace of the services you lookup.
|
||||
You can also [specify the namespace through other methods](#methods-to-specify-namespace).
|
||||
|
||||
- `filter` `(string: "")` - Specifies the expression used to filter the
|
||||
queries results prior to returning the data.
|
||||
|
||||
### Filtering
|
||||
|
||||
The filter will be executed against each Service mapping within the catalog.
|
||||
The following selectors and filter operations are supported:
|
||||
|
||||
| Selector | Supported Operations |
|
||||
| ---------------------------------------------------- | -------------------------------------------------- |
|
||||
| `Address` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `Datacenter` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ID` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `Node` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `NodeMeta.<any>` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `NodeMeta` | Is Empty, Is Not Empty, In, Not In |
|
||||
| `ServiceAddress` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceConnect.Native` | Equal, Not Equal |
|
||||
| `ServiceEnableTagOverride` | Equal, Not Equal |
|
||||
| `ServiceID` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceKind` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceMeta.<any>` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceMeta` | Is Empty, Is Not Empty, In, Not In |
|
||||
| `ServiceName` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServicePort` | Equal, Not Equal |
|
||||
| `ServiceProxy.DestinationServiceID` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.DestinationServiceName` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.LocalServiceAddress` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.LocalServicePort` | Equal, Not Equal |
|
||||
| `ServiceProxy.Mode` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.TransparentProxy.OutboundListenerPort` | Equal, Not Equal |
|
||||
| `ServiceProxy.MeshGateway.Mode` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.Upstreams.Datacenter` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.Upstreams.DestinationName` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.Upstreams.DestinationNamespace` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.Upstreams.DestinationType` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.Upstreams.LocalBindAddress` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.Upstreams.LocalBindPort` | Equal, Not Equal |
|
||||
| `ServiceProxy.Upstreams.MeshGateway.Mode` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceProxy.Upstreams` | Is Empty, Is Not Empty |
|
||||
| `ServiceTaggedAddresses.<any>.Address` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `ServiceTaggedAddresses.<any>.Port` | Equal, Not Equal |
|
||||
| `ServiceTaggedAddresses` | Is Empty, Is Not Empty, In, Not In |
|
||||
| `ServiceTags` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `ServiceWeights.Passing` | Equal, Not Equal |
|
||||
| `ServiceWeights.Warning` | Equal, Not Equal |
|
||||
| `TaggedAddresses.<any>` | Equal, Not Equal, In, Not In, Matches, Not Matches |
|
||||
| `TaggedAddresses` | Is Empty, Is Not Empty, In, Not In |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```shell-session
|
||||
|
|
|
@ -14,6 +14,9 @@ optional health checking mechanisms. Additionally, some of the query results
|
|||
from the health endpoints are filtered while the catalog endpoints provide the
|
||||
raw entries.
|
||||
|
||||
To modify health check registration or information,
|
||||
use the [`/agent/check`](/api-docs/agent/check) endpoints.
|
||||
|
||||
## List Checks for Node
|
||||
|
||||
This endpoint returns the checks specific to the node provided on the path.
|
||||
|
|
|
@ -11,7 +11,7 @@ The `/query` endpoints create, update, destroy, and execute prepared queries.
|
|||
Prepared queries allow you to register a complex service query and then execute
|
||||
it later via its ID or name to get a set of healthy nodes that provide a given
|
||||
service. This is particularly useful in combination with Consul's
|
||||
[DNS Interface](/docs/discovery/dns) as it allows for much richer queries than
|
||||
[DNS Interface](/docs/discovery/dns#prepared-query-lookups) as it allows for much richer queries than
|
||||
would be possible given the limited entry points exposed by DNS.
|
||||
|
||||
Check the [Geo Failover tutorial](https://learn.hashicorp.com/tutorials/consul/automate-geo-failover) for details and
|
||||
|
|
|
@ -1998,7 +1998,7 @@ specially crafted certificate signed by the CA can be used to gain full access t
|
|||
Certificate Authority from the [`ca_file`](#tls_defaults_ca_file) or
|
||||
[`ca_path`](#tls_defaults_ca_path). By default, this is false, and Consul
|
||||
will not make use of TLS for outgoing connections. This applies to clients
|
||||
and servers as both will make outgoing connections. This setting *does not*
|
||||
and servers as both will make outgoing connections. This setting does not
|
||||
apply to the gRPC interface as Consul makes no outgoing connections on this
|
||||
interface.
|
||||
|
||||
|
@ -2019,6 +2019,8 @@ specially crafted certificate signed by the CA can be used to gain full access t
|
|||
|
||||
- `verify_incoming` - ((#tls_grpc_verify_incoming)) Overrides [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming).
|
||||
|
||||
- `use_auto_cert` - (Defaults to `false`) Enables or disables TLS on gRPC servers. Set to `true` to allow `auto_encrypt` TLS settings to apply to gRPC listeners. We recommend disabling TLS on gRPC servers if you are using `auto_encrypt` for other TLS purposes, such as enabling HTTPS.
|
||||
|
||||
- `https` ((#tls_https)) Provides settings for the HTTPS interface. To enable
|
||||
the HTTPS interface you must define a port via [`ports.https`](#https_port).
|
||||
|
||||
|
@ -2071,7 +2073,9 @@ specially crafted certificate signed by the CA can be used to gain full access t
|
|||
set to true, Consul verifies the TLS certificate presented by the servers
|
||||
match the hostname `server.<datacenter>.<domain>`. By default this is false,
|
||||
and Consul does not verify the hostname of the certificate, only that it
|
||||
is signed by a trusted CA. This setting *must* be enabled to prevent a
|
||||
is signed by a trusted CA.
|
||||
|
||||
~> **Security Note:** `verify_server_hostname` *must* be set to true to prevent a
|
||||
compromised client from gaining full read and write access to all cluster
|
||||
data *including all ACL tokens and Connect CA root keys*.
|
||||
|
||||
|
@ -2082,7 +2086,7 @@ specially crafted certificate signed by the CA can be used to gain full access t
|
|||
### Deprecated Options ((#tls_deprecated_options))
|
||||
|
||||
The following options were deprecated in Consul 1.12, please use the
|
||||
[`tls`](#tls) stanza instead.
|
||||
[`tls`](#tls-1) stanza instead.
|
||||
|
||||
- `ca_file` See: [`tls.defaults.ca_file`](#tls_defaults_ca_file).
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
layout: docs
|
||||
page_title: Consul API Gateway Gateway
|
||||
description: >-
|
||||
This topic descrbes how to configure the Consul API Gateway Gateway object
|
||||
This topic describes how to configure the Consul API Gateway Gateway object
|
||||
---
|
||||
|
||||
# Gateway
|
||||
|
@ -159,7 +159,7 @@ Specifies the `tls` configurations for the `Gateway`. The `tls` object is requir
|
|||
|
||||
| Parameter | Description | Type | Required |
|
||||
| --- | --- | --- | --- |
|
||||
| `certificateRefs` | <div style={{width:480}}>Specifies Kubernetes `name` and `namespace` objects that contains TLS certificates and private keys. <br/>The certificates establish a TLS handshake for requests that match the `hostname` of the associated `listener`. Each reference must be a Kubernetes Secret. If you are using a Secret in a namespace other than the `Gateway`'s, each reference must also have a corresponding [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy).</div> | Object or array | Required if `tls` is set |
|
||||
| `certificateRefs` | <div style={{width:480}}>Specifies Kubernetes `name` and `namespace` objects that contains TLS certificates and private keys. <br/>The certificates establish a TLS handshake for requests that match the `hostname` of the associated `listener`. Each reference must be a Kubernetes Secret. If you are using a Secret in a namespace other than the `Gateway`'s, each reference must also have a corresponding [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant).</div> | Object or array | Required if `tls` is set |
|
||||
| `mode` | Specifies the TLS Mode. Should always be set to `Terminate` for `HTTPRoutes` | string | Required if `certificateRefs` is set |
|
||||
| `options` | Specifies additional Consul API Gateway options. | Map of strings | optional |
|
||||
|
||||
|
@ -174,7 +174,7 @@ In the following example, `tls` settings are configured to use a secret named `c
|
|||
|
||||
tls:
|
||||
certificateRefs:
|
||||
name: consul-server-cert
|
||||
- name: consul-server-cert
|
||||
group: ""
|
||||
kind: Secret
|
||||
mode: Terminate
|
||||
|
@ -183,3 +183,49 @@ tls:
|
|||
|
||||
```
|
||||
|
||||
#### Example cross-namespace certificateRef
|
||||
|
||||
The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace` (lines 2-4). The gateway has a `certificateRef` in namespace `secret-namespace` (lines 16-18). The reference is allowed because the `ReferenceGrant` configuration, named `reference-grant` in namespace `secret-namespace` (lines 24-27), allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace` (lines 31-35).
|
||||
|
||||
<CodeBlockConfig filename="gateway_with_referencegrant.yaml" lineNumbers highlight="2-4,16-18,24-27,31-35">
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1beta1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: example-gateway
|
||||
namespace: gateway-namespace
|
||||
spec:
|
||||
gatewayClassName: consul-api-gateway
|
||||
listeners:
|
||||
- protocol: HTTPS
|
||||
port: 443
|
||||
name: https
|
||||
allowedRoutes:
|
||||
namespaces:
|
||||
from: Same
|
||||
tls:
|
||||
certificateRefs:
|
||||
- name: cert
|
||||
namespace: secret-namespace
|
||||
group: ""
|
||||
kind: Secret
|
||||
---
|
||||
|
||||
apiVersion: gateway.networking.k8s.io/v1alpha2
|
||||
kind: ReferenceGrant
|
||||
metadata:
|
||||
name: reference-grant
|
||||
namespace: secret-namespace
|
||||
spec:
|
||||
from:
|
||||
- group: gateway.networking.k8s.io
|
||||
kind: Gateway
|
||||
namespace: gateway-namespace
|
||||
to:
|
||||
- group: ""
|
||||
kind: Secret
|
||||
name: cert
|
||||
```
|
||||
|
||||
</CodeBlockConfig>
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
---
|
||||
layout: docs
|
||||
page_title: Consul API Gateway MeshService
|
||||
description: >-
|
||||
This topic describes how to configure the Consul API Gateway MeshService object
|
||||
---
|
||||
|
||||
# MeshService
|
||||
|
||||
This topic provides full details about the `MeshService` resource.
|
||||
|
||||
## Introduction
|
||||
|
||||
A `MeshService` is a resource in the Kubernetes cluster that enables Kubernetes configuration models, such as `HTTPRoute` and `TCPRoute`, to reference services that only exist in Consul. A `MeshService` represents a service in the Consul service mesh outside the Kubernetes cluster where Consul API Gateway is deployed. The service represented by the `MeshService` resource must be in the same Consul datacenter as the Kubernetes cluster.
|
||||
|
||||
|
||||
## Configuration Model
|
||||
|
||||
The following outline shows how to format the configurations in the `MeshService` object. Click on a property name to view details about the configuration.
|
||||
|
||||
* [`name`](#name): string | required
|
||||
|
||||
|
||||
## Specification
|
||||
|
||||
This topic provides details about the configuration parameters.
|
||||
|
||||
### name
|
||||
Specifies the name of the service in the Consul service mesh to send traffic to.
|
||||
* Type: string
|
||||
* Required: required
|
|
@ -7,7 +7,9 @@ description: >-
|
|||
|
||||
# Route
|
||||
|
||||
Routes are independent configuration objects that are associated with specific listeners.
|
||||
This topic describes how to create and configure `Route` resources. Routes are independent configuration objects that are associated with specific listeners.
|
||||
|
||||
## Create a `Route`
|
||||
|
||||
Declare a route with either `kind: HTTPRoute` or `kind: TCPRoute` and configure the route parameters in the `spec` block.
|
||||
Refer to the Kubernetes Gateway API documentation for each object type for details:
|
||||
|
@ -36,12 +38,98 @@ The following example creates a route named `example-route` associated with a li
|
|||
|
||||
</CodeBlockConfig>
|
||||
|
||||
To create a route for a `backendRef` in a different namespace, you must also
|
||||
create a [ReferencePolicy](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy).
|
||||
## Configuration model
|
||||
|
||||
The following example creates a route named `example-route` in namespace `gateway-namespace`. This route has a `backendRef` in namespace `service-namespace`. Traffic is allowed because the `ReferencePolicy`, named `reference-policy` in namespace `service-namespace`, allows traffic from `HTTPRoutes` in `gateway-namespace` to `Services` in `service-namespace`.
|
||||
The following outline shows how to format the configurations for the `Route` object. The top-level `spec` field is the root for all configurations. Click on a property name to view details about the configuration.
|
||||
|
||||
<CodeBlockConfig filename="route_with_referencepolicy.yaml">
|
||||
* [`parentRefs`](#parentrefs): array of objects | optional
|
||||
* [`group`](#parentrefs): string | optional
|
||||
* [`kind`](#parentrefs): string | optional
|
||||
* [`name`](#parentrefs): string | required
|
||||
* [`namespace`](#parentrefs): string | optional
|
||||
* [`sectionName`](#parentrefs): string | optional
|
||||
* [`rules`](#rules): list of objects | optional
|
||||
* [`backendRefs`](#rules-backendrefs): list of objects | optional
|
||||
* [`group`](#rules-backend-refs): string | optional
|
||||
* [`kind`](#rules-backendrefs): string | optional
|
||||
* [`name`](#rules-backendrefs): string | required
|
||||
* [`namespace`](#rules-backendrefs): string | optional
|
||||
* [`port`](#rules-backendrefs): integer | required
|
||||
* [`weight`](#rules-backendrefs): integer | optional
|
||||
* [`filters`](#rules-filters): list of objects | optional
|
||||
* [`type`](#rules-filters-type): string | required
|
||||
* [`requestHeaderModifier`](#rules-filters-requestheadermodifier): object | optional
|
||||
* [`set`](#rules-filters-requestheadermodifier): array of objects | optional
|
||||
* [`name`](#rules-filters-requestheadermodifier): string | required
|
||||
* [`value`](#rules-filters-requestheadermodifier): string | required
|
||||
* [`add`](#rules-filters-requestheadermodifier): array of objects | optional
|
||||
* [`name`](#rules-filters-requestheadermodifier): string | required
|
||||
* [`value`](#rules-filters-requestheadermodifier): string | required
|
||||
* [`remove`](#rules-filters-requestheadermodifier): array of strings | optional
|
||||
* [`urlRewrite`](#rules-filters-urlrewrite): object | optional
|
||||
* [`path`](#rules-filters-urlrewrite-path): object | required
|
||||
* [`replacePrefixMatch`](#rules-filters-urlrewrite-path): string | required
|
||||
* [`type`](#rules-filters-urlrewrite-path): string | required
|
||||
* [`matches`](#rules-matches): array of objects | optional
|
||||
* [`path`](#rules-matches-path): list of objects | optional
|
||||
* [`type`](#rules-matches-path): string | required
|
||||
* [`value`](#rules-matches-path): string | required
|
||||
* [`headers`](#rules-matches-headers): list of objects | optional
|
||||
* [`type`](#rules-matches-headers): string | required
|
||||
* [`name`](#rules-matches-headers): string | required
|
||||
* [`value`](#rules-matches-headers): string | required
|
||||
* [`queryParams`](#rules-matches-queryparams): list of objects | optional
|
||||
* [`type`](#rules-matches-queryparams): string | required
|
||||
* [`name`](#rules-matches-queryparams): string | required
|
||||
* [`value`](#rules-matches-queryparams): string | required
|
||||
* [`method`](#rules-matches-method): string | optional
|
||||
|
||||
|
||||
## Specification
|
||||
|
||||
This topic provides details about the configuration parameters.
|
||||
|
||||
### parentRefs
|
||||
|
||||
This field contains the list of `Gateways` that the route should attach to. If not set, the route will not attach to a `Gateway`. The following table describes the objects you can configure in the `parentRefs` block:
|
||||
|
||||
| Parameter | Description | Type | Required |
|
||||
| --- | --- | --- | --- |
|
||||
| `group` | Specifies the Kubernetes API group of the `Gateway` to attach to. You can specify the following values: <ul><li>`gateway.networking.k8s.io`</li></ul>. Defaults to `gateway.networking.k8s.io`. | String | Optional |
|
||||
| `kind` | Specifies the Kubernetes kind of the `Gateway` to attach to. you can specify the following values: <ul><li>`Gateway`</li></ul>. Defaults to `Gateway`. | String | Optional |
|
||||
| `name` | Specifies the name of the `Gateway` the route is attached to. | String | Required |
|
||||
| `namespace` | Specifies the Kubernetes namespace containing the `Gateway` to attach to. If the `Gateway` is in a different Kubernetes namespace than the `Route`, then you must specify a value. Defaults to the `Route` namespace. | String | Optional |
|
||||
| `sectionName` | Specifies the name of a specific listener on the `Gateway` to attach to. The `Route` attempts to attach to all listeners on the `Gateway`. | String | Required |
|
||||
|
||||
|
||||
### rules
|
||||
|
||||
The `rules` field contains a list of objects that define behaviors for network traffic that goes through the route. The rule configuration contains the following objects:
|
||||
|
||||
* [`backendRefs`](#rules-backendrefs): Specifies which backend services the `Route` references when processing traffic.
|
||||
* [`filters`](#rules-filters): Specifies which operations Consul API Gateway performs when traffic goes through the `Route`.
|
||||
* [`matches`](#rules-matches): Deterines which requests Consul API Gateway processes.
|
||||
|
||||
Rules are optional.
|
||||
|
||||
### rules.backendRefs
|
||||
|
||||
This field specifies backend services that the `Route` references. The following table describes the parameters for `backendRefs`:
|
||||
|
||||
| Parameter | Description | Type | Required |
|
||||
| --- | --- | --- | --- |
|
||||
| `group` | Specifies the Kubernetes API Group of the referenced backend. You can specify the following values: <ul><li>`""`: Specifies the core Kubernetes API group. This value must be used when `kind` is set to `Service`. This is the default value if unspecified.</li><li>`api-gateway.consul.hashicorp.com`: This value must be used when `kind` is set to `MeshService`.</li></ul> | String | Optional |
|
||||
| `kind` | Specifies the Kubernetes Kind of the referenced backend. You can specify the following values: <ul><li>`Service` (default): Indicates that the `backendRef` references a Service in the Kubernetes cluster. </li><li>`MeshService`: Indicates that the `backendRef` references a service in the Consul mesh. Refer to the `MeshService` [documentation](/docs/api-gateway/configuration/meshservice) for additional information.</li></ul> | String | Optional |
|
||||
| `name` | Specifies the name of the Kubernetes Service or Consul mesh service resource. | String | Required |
|
||||
| `namespace` | Specifies the Kubernetes namespace containing the Kubernetes Service or Consul mesh service resource. You must specify a value if the Service or Consul mesh service is defined in a different namespace from the `Route`. Defaults to the namespace of the `Route`. <br/>To create a route for a `backendRef` in a different namespace, you must also create a [ReferenceGrant](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant). Refer to the [example route](#example-cross-namespace-backendref) configured to reference across namespaces. | String | Optional |
|
||||
| `port` | Specifies the port number for accessing the Kubernetes or Consul service. | Integer | Required |
|
||||
| `weight` | Specifies the proportion of requests sent to the backend. Computed as weight divided by the sum of all weights in this `backendRefs` list. Defaults to `1`. A value of `0` indicates that no requests should be sent to the backend. | Integer | Optional |
|
||||
|
||||
#### Example cross-namespace backendRef
|
||||
|
||||
The following example creates a route named `example-route` in namespace `gateway-namespace`. This route has a `backendRef` in namespace `service-namespace`. Traffic is allowed because the `ReferenceGrant`, named `reference-grant` in namespace `service-namespace`, allows traffic from `HTTPRoutes` in `gateway-namespace` to `Services` in `service-namespace`.
|
||||
|
||||
<CodeBlockConfig filename="route_with_referencegrant.yaml">
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1alpha2
|
||||
|
@ -61,9 +149,9 @@ The following example creates a route named `example-route` in namespace `gatewa
|
|||
---
|
||||
|
||||
apiVersion: gateway.networking.k8s.io/v1alpha2
|
||||
kind: ReferencePolicy
|
||||
kind: ReferenceGrant
|
||||
metadata:
|
||||
name: reference-policy
|
||||
name: reference-grant
|
||||
namespace: service-namespace
|
||||
spec:
|
||||
from:
|
||||
|
@ -78,12 +166,101 @@ The following example creates a route named `example-route` in namespace `gatewa
|
|||
|
||||
</CodeBlockConfig>
|
||||
|
||||
## MeshService
|
||||
### rules.filters
|
||||
|
||||
The `MeshService` configuration holds a reference to an externally-managed Consul service mesh service and can be used as a `backendRef` for a [`Route`](#route).
|
||||
The `filters` block defines steps for processing requests. You can configure filters to modify the properties of matching incoming requests and enable Consul API Gateway features, such as rewriting path prefixes (refer to [Reroute HTTP requests](/docs/api-gateway/usage#reroute-http-requests) for additional information).
|
||||
|
||||
| Parameter | Description | Type | Default |
|
||||
| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | --------------- |
|
||||
| `name` | Specifies the service name for a Consul service. It is assumed this service will exist in either the `consulDestinationNamespace` or mirrored Consul namespace from where this custom resource is defined, depending on the Helm configuration.
|
||||
* Type: Array of objects
|
||||
* Required: Optional
|
||||
|
||||
Refer to the [Consul API Gateway repository](https://github.com/hashicorp/consul-api-gateway/blob/main/config/crd/bases/api-gateway.consul.hashicorp.com_meshservices.yaml) for the complete specification.
|
||||
### rules.filters.type
|
||||
|
||||
Specifies the type of filter you want to apply to the route. The parameter is optional and takes a string value.
|
||||
|
||||
You can specify the following values:
|
||||
|
||||
* `RequestHeaderModifier`: The `RequestHeaderModifier` type modifies the HTTP headers on the incoming request. You must define the [`rules.filters.requestHeaderModifier`](#rules-filters-requestheadermodifier) configurations to use this filter type.
|
||||
|
||||
* `URLRewrite`: The `URLRewrite` type modifies the URL path on the incoming request. You must define the [`rules.filters.urlRewrite`](#rules-filters-urlrewrite) configurations to use this filter type.
|
||||
|
||||
### rules.filters.requestHeaderModifier
|
||||
|
||||
Defines operations to perform on matching request headers when `rules.filters.type` is configured to `RequestHeaderModifier`. This field contains the following configuration objects:
|
||||
|
||||
| Parameter | Description | Type | Required |
|
||||
| --- | --- | --- | --- |
|
||||
| `set` | Configure this field to rewrite the HTTP request header. It specifies the name of an HTTP header to overwrite and the new value to set. Any existing values associated with the header name are overwritten. You can specify the following configurations: <ul><li>`name`: Required string that specifies the name of the HTTP header to set.</li><li>`value`: Required string that specifies the value of the HTTP header to set.</li></ul> | List of objects | Optional |
|
||||
| `add` | Configure this field to append the request header with a new value. It specifies the name of an HTTP header to append and the value(s) to add. You can specify the following configurations: <ul><li>`name`: Required string that specifies the name of the HTTP header to append.</li><li>`value`: Required string that specifies the value of the HTTP header to add.</li></ul> | List of objects | Optional |
|
||||
| `remove` | Configure this field to specify an array of header names to remove from the request header. | Array of strings | Optional |
|
||||
|
||||
|
||||
### rules.filters.urlRewrite
|
||||
|
||||
Specifies rules for rewriting the URL of incoming requests when `rules.filters.type` is configured to `URLRewrite`.
|
||||
|
||||
* Type: Object
|
||||
* Required: Optional
|
||||
|
||||
### rules.filters.urlRewrite.path
|
||||
|
||||
Specifies a list of objects that determine how Consul API Gateway rewrites URL paths (refer to [Reroute HTTP requests](/docs/api-gateway/usage#reroute-http-requests) for additional information).
|
||||
|
||||
The following table describes the parameters for `path`:
|
||||
|
||||
| Parameter | Description | Type | Required |
|
||||
| --- | --- | --- | --- |
|
||||
| `replacePrefixMatch` | Specifies a value that replaces the path prefix for incoming HTTP requests. The operation only affects the path prefix. The rest of the path is unchanged. | String | Required |
|
||||
| `type` | Specifies the type of replacement to use for the URL path. You can specify the following values: <ul><li>`ReplacePrefixMatch`: Replaces the the matched prefix of the URL path (default). </li></ul> | String | Optional |
|
||||
|
||||
### rules.matches
|
||||
|
||||
Specifies rules for matching incoming requests. You can apply [`filters`](#rulesfilters) to requests that match the defined rules. You can match incoming requests based on the following elements:
|
||||
|
||||
* [paths](#rules-matches-path)
|
||||
* [headers](#rules-matches-headers)
|
||||
* [query parameters](#rules-matches-queryparams)
|
||||
* [request method](#rules-matches-method)
|
||||
|
||||
Each rule matches requests independently. As a result, a request matching any of the conditions is considered a match. You can configure several matching rules for each type to widen or narrow matches.
|
||||
### rules.matches.path
|
||||
|
||||
Specifies a list of objects that define matches based on URL path. The following table describes the parameters for the `path` field:
|
||||
|
||||
| Parameter | Description | Type | Required |
|
||||
| --- | --- | --- | --- |
|
||||
| `type` | Specifies the type of comparison to use for matching the path value. You can specify the following types. <ul><li>`Exact`: Returns a match only when the entire path matches the `value` field (default).</li><li> `PathPrefix`: Returns a match when the path has the prefix defined in the `value` field.</li><li>`RegularExpression`: Returns a match when the path matches the regex defined in the `value` field.</li></ul> | String | Required |
|
||||
| `value` | Specifies the value to match on. You can specify a specific string when `type` is `Exact` or `PathPrefix`. You can specify a regular expression if `type` is `RegularExpression`. | String | Required |
|
||||
|
||||
### rules.matches.headers
|
||||
|
||||
Specifies a list of objects that define matches based HTTP request headers. The following table describes the parameters for the `headers` field:
|
||||
|
||||
| Parameter | Description | Type | Required |
|
||||
| --- | --- | --- | --- |
|
||||
| `type` | Specifies the type of comparison to use for matching the header value. You can specify the following types. <ul><li>`Exact`: Returns a match only when the entire header matches the `value` field (default).</li><li> `RegularExpression`: Returns a match when the header matches the regex defined in the `value` field.</li></ul> | String | Required |
|
||||
| `name` | Specifies the name of the header to match on. | String | Required |
|
||||
| `value` | Specifies value to match on. You can specify a specific string or a regular expression. | String | Required |
|
||||
|
||||
### rules.matches.queryParams
|
||||
|
||||
Specifies a list of objects that define matches based query parameters. The following table describes the parameters for the `queryParams` field:
|
||||
|
||||
| Parameter | Description | Type | Required |
|
||||
| --- | --- | --- | --- |
|
||||
| `type` | Specifies the type of comparison to use for matching a query parameter value. You can specify the following types. <ul><li>`Exact`: Returns a match only when the query parameter match the `value` field (default).</li><li> `RegularExpression`: Returns a match when the query parameter matches the regex defined in the `value` field.</li></ul> | String | Required |
|
||||
| `name` | Specifies the name of the query parameter to match on. | String | Required |
|
||||
| `value` | Specifies value to match on. You can specify a specific string or a regular expression. | String | Required |
|
||||
|
||||
### rules.matches.method
|
||||
|
||||
Specifies a list of strings that define matches based on HTTP request method. You may specify the following values:
|
||||
|
||||
* `HEAD`
|
||||
* `POST`
|
||||
* `PUT`
|
||||
* `PATCH`
|
||||
* `GET`
|
||||
* `DELETE`
|
||||
* `OPTIONS`
|
||||
* `TRACE`
|
||||
* `CONNECT`
|
||||
|
|
|
@ -38,7 +38,7 @@ are used, see the [documentation in our GitHub repo](https://github.com/hashicor
|
|||
| [`Gateway`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.Gateway) | <ul><li>Supported protocols: `HTTP`, `HTTPS`, `TCP`</li><li>Header-based hostname matching (no SNI support)</li><li>Supported filters: header addition, removal, and setting</li><li>TLS modes supported: `terminate`</li><li>Certificate types supported: `core/v1/Secret`</li><li>Extended options: TLS version and cipher constraints</li></ul> |
|
||||
| [`HTTPRoute`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.HTTPRoute) | <ul><li>Weight-based load balancing</li><li>Supported rules: path, header, query, and method-based matching</li><li>Supported filters: header addition, removal, and setting</li><li>Supported backend types: <ol><li>`core/v1/Service` (must map to a registered Consul service)</li><li>`api-gateway.consul.hashicorp.com/v1alpha1/MeshService`</li></ol></li></ul> |
|
||||
| [`TCPRoute`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.TCPRoute) | <ul><li>Supported backend types: <ol><li>`core/v1/Service` (must map to a registered Consul service)</li><li>`api-gateway.consul.hashicorp.com/v1alpha1/MeshService`</li></ol></li></ul> |
|
||||
| [`ReferencePolicy`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferencePolicy) | <ul><li>Required to allow any reference from a `Gateway` to a Kubernetes `core/v1/Secret` in a different namespace.</li><ul><li>A Gateway with an unpermitted `certificateRefs` caused by the lack of a` ReferencePolicy` sets a `ResolvedRefs` status to `False` with the reason `InvalidCertificateRef`. The Gateway will not become ready in this case.</li></ul><li>Required to allow any reference from an `HTTPRoute` or `TCPRoute` to a Kubernetes `core/v1/Service` in a different namespace.</li><ul><li>A route with an unpermitted `backendRefs` caused by the lack of a `ReferencePolicy` sets a `ResolvedRefs` status to `False` with the reason `RefNotPermitted`. The gateway listener rejects routes with an unpermitted `backendRefs`.</li><li>WARNING: If a route `backendRefs` becomes unpermitted, the entire route is removed from the gateway listener. <ul><li>A `backendRefs` can become unpermitted when you delete a `ReferencePolicy` or add a new unpermitted `backendRefs` to an existing route.</li></ul></li></ul></ul> |
|
||||
| [`ReferenceGrant`](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant) | <ul><li>Required to allow any reference from a `Gateway` to a Kubernetes `core/v1/Secret` in a different namespace.</li><ul><li>A Gateway with an unpermitted `certificateRefs` caused by the lack of a` ReferenceGrant` sets a `ResolvedRefs` status to `False` with the reason `InvalidCertificateRef`. The Gateway will not become ready in this case.</li></ul><li>Required to allow any reference from an `HTTPRoute` or `TCPRoute` to a Kubernetes `core/v1/Service` in a different namespace.</li><ul><li>A route with an unpermitted `backendRefs` caused by the lack of a `ReferenceGrant` sets a `ResolvedRefs` status to `False` with the reason `RefNotPermitted`. The gateway listener rejects routes with an unpermitted `backendRefs`.</li><li>WARNING: If a route `backendRefs` becomes unpermitted, the entire route is removed from the gateway listener. <ul><li>A `backendRefs` can become unpermitted when you delete a `ReferenceGrant` or add a new unpermitted `backendRefs` to an existing route.</li></ul></li></ul></ul> |
|
||||
|
||||
## Additional Resources
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue