Merge branch 'main' into main

This commit is contained in:
Nick Wales 2022-09-16 12:43:54 -05:00 committed by GitHub
commit 6bb252c2be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1576 changed files with 31643 additions and 15519 deletions

3
.changelog/11742.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
api: Add filtering support to Catalog's List Services (v1/catalog/services)
```

3
.changelog/12905.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
metrics: Service RPC calls less than 1ms are now emitted as a decimal number.
```

3
.changelog/13493.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
cli: Fix Consul kv CLI 'GET' flags 'keys' and 'recurse' to be set together
```

3
.changelog/13998.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: expose new tracing configuration on envoy
```

3
.changelog/14034.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
cli: When launching a sidecar proxy with `consul connect envoy` or `consul connect proxy`, the `-sidecar-for` service ID argument is now treated as case-insensitive.
```

3
.changelog/14119.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
connect: Fixed some spurious issues during peering establishment when a follower is dialed
```

3
.changelog/14161.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
metrics: add labels of segment, partition, network area, network (lan or wan) to serf and memberlist metrics
```

5
.changelog/14162.txt Normal file
View File

@ -0,0 +1,5 @@
```release-note:improvement
config-entry: Validate that service-resolver `Failover`s and `Redirect`s only
specify `Partition` and `Namespace` on Consul Enterprise. This prevents scenarios
where OSS Consul would save service-resolvers that require Consul Enterprise.
```

4
.changelog/14178.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:breaking-change
xds: Convert service mesh failover to use Envoy's aggregate clusters. This
changes the names of some [Envoy dynamic HTTP metrics](https://www.envoyproxy.io/docs/envoy/latest/configuration/upstream/cluster_manager/cluster_stats#dynamic-http-statistics).
```

3
.changelog/14233.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
rpc: Adds max jitter to client deadlines to prevent i/o deadline errors on blocking queries
```

3
.changelog/14238.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
envoy: adds additional Envoy outlier ejection parameters to passive health check configurations.
```

3
.changelog/14269.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
connect: Fix issue where `auto_config` and `auto_encrypt` could unintentionally enable TLS for gRPC xDS connections.
```

3
.changelog/14285.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
connect: Server address changes are streamed to peers
```

3
.changelog/14290.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
envoy: validate name before deleting proxy default configurations.
```

6
.changelog/14294.txt Normal file
View File

@ -0,0 +1,6 @@
```release-note:breaking-change
config: Add new `ports.grpc_tls` configuration option.
Introduce a new port to better separate TLS config from the existing `ports.grpc` config.
The new `ports.grpc_tls` only supports TLS encrypted communication.
The existing `ports.grpc` currently supports both plain-text and tls communication, but tls support will be removed in a future release.
```

4
.changelog/14343.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:feature
ui: Use withCredentials for all HTTP API requests
```

3
.changelog/14364.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
peering: Fix issue preventing deletion and recreation of peerings in TERMINATED state.
```

3
.changelog/14373.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
xds: Set `max_ejection_percent` on Envoy's outlier detection to 100% for peered services.
```

5
.changelog/14378.txt Normal file
View File

@ -0,0 +1,5 @@
```release-note:bug
api: Fix a breaking change caused by renaming `QueryDatacenterOptions` to
`QueryFailoverOptions`. This adds `QueryDatacenterOptions` back as an alias to
`QueryFailoverOptions` and marks it as deprecated.
```

4
.changelog/14395.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:feature
service-defaults: Added support for `local_request_timeout_ms` and
`local_connect_timeout_ms` in servicedefaults config entry
```

3
.changelog/14396.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
peering: Add support to failover to services running on cluster peers.
```

3
.changelog/14397.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
xds: servers will limit the number of concurrent xDS streams they can handle to balance the load across all servers
```

3
.changelog/14423.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
cli: Adds new subcommands for `peering` workflows. Refer to the [CLI docs](https://www.consul.io/commands/peering) for more information.
```

3
.changelog/14429.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
connect: Fixed an issue where intermediate certificates could build up in the root CA because they were never being pruned after expiring.
``

3
.changelog/14433.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
checks: If set, use proxy address for automatically added sidecar check instead of service address.
```

3
.changelog/14445.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
peering: Add support to redirect to services running on cluster peers with service resolvers.
```

3
.changelog/14474.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
http: Add new `get-or-empty` operation to the txn api. Refer to the [API docs](https://www.consul.io/api-docs/txn#kv-operations) for more information.
```

3
.changelog/14475.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
metrics: Add duplicate metrics that have only a single "consul_" prefix for all existing metrics with double ("consul_consul_") prefix, with the intent to standardize on single prefixes.
```

3
.changelog/14495.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
ui: Detect a TokenSecretID cookie and passthrough to localStorage
```

3
.changelog/14516.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ca: Fixed a bug with the Vault CA provider where the intermediate PKI mount and leaf cert role were not being updated when the CA configuration was changed.
```

3
.changelog/14521.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: Reuse connections for requests to /v1/internal/ui/metrics-proxy/
```

3
.changelog/14563.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
peering: Validate peering tokens for server name conflicts
```

3
.changelog/14573.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note: improvement
connect: Bump latest Envoy to 1.23.1 in test matrix
```

3
.changelog/14577.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:security
auto-config: Added input validation for auto-config JWT authorization checks. Prior to this change, it was possible for malicious actors to construct requests which incorrectly pass custom JWT claim validation for the `AutoConfig.InitialConfiguration` endpoint. Now, only a subset of characters are allowed for the input before evaluating the bexpr.
```

3
.changelog/14579.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:security
connect: Added URI length checks to ConnectCA CSR requests. Prior to this change, it was possible for a malicious actor to designate multiple SAN URI values in a call to the `ConnectCA.Sign` endpoint. The endpoint now only allows for exactly one SAN URI to be specified.
```

3
.changelog/14598.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
connect: Fixed a bug where old root CAs would be removed from the primary datacenter after switching providers and restarting the cluster.
```

3
.changelog/14606.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Removed Overview page from HCP instalations
```

3
.changelog/14619.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
checks: Do not set interval as timeout value
```

3
.changelog/_2271.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
snapshot agent: **(Enterprise only)** Add support for path-based addressing when using s3 backend.
```

View File

@ -27,7 +27,11 @@ references:
- &default_envoy_version "1.20.6"
- "1.21.4"
- "1.22.2"
- "1.23.0"
- "1.23.1"
nomad-versions: &supported_nomad_versions
- &default_nomad_version "1.3.3"
- "1.2.10"
- "1.1.16"
images:
# When updating the Go version, remember to also update the versions in the
# workflows section for go-test-lib jobs.
@ -105,15 +109,18 @@ commands:
type: env_var_name
default: ROLE_ARN
steps:
# Only run the assume-role command for the main repo. The AWS credentials aren't available for forks.
- run: |
export AWS_ACCESS_KEY_ID="${<< parameters.access-key >>}"
export AWS_SECRET_ACCESS_KEY="${<< parameters.secret-key >>}"
export ROLE_ARN="${<< parameters.role-arn >>}"
# assume role has duration of 15 min (the minimum allowed)
CREDENTIALS="$(aws sts assume-role --duration-seconds 900 --role-arn ${ROLE_ARN} --role-session-name build-${CIRCLE_SHA1} | jq '.Credentials')"
echo "export AWS_ACCESS_KEY_ID=$(echo $CREDENTIALS | jq -r '.AccessKeyId')" >> $BASH_ENV
echo "export AWS_SECRET_ACCESS_KEY=$(echo $CREDENTIALS | jq -r '.SecretAccessKey')" >> $BASH_ENV
echo "export AWS_SESSION_TOKEN=$(echo $CREDENTIALS | jq -r '.SessionToken')" >> $BASH_ENV
if [[ "${CIRCLE_BRANCH%%/*}/" != "pull/" ]]; then
export AWS_ACCESS_KEY_ID="${<< parameters.access-key >>}"
export AWS_SECRET_ACCESS_KEY="${<< parameters.secret-key >>}"
export ROLE_ARN="${<< parameters.role-arn >>}"
# assume role has duration of 15 min (the minimum allowed)
CREDENTIALS="$(aws sts assume-role --duration-seconds 900 --role-arn ${ROLE_ARN} --role-session-name build-${CIRCLE_SHA1} | jq '.Credentials')"
echo "export AWS_ACCESS_KEY_ID=$(echo $CREDENTIALS | jq -r '.AccessKeyId')" >> $BASH_ENV
echo "export AWS_SECRET_ACCESS_KEY=$(echo $CREDENTIALS | jq -r '.SecretAccessKey')" >> $BASH_ENV
echo "export AWS_SESSION_TOKEN=$(echo $CREDENTIALS | jq -r '.SessionToken')" >> $BASH_ENV
fi
run-go-test-full:
parameters:
@ -560,17 +567,20 @@ jobs:
- run: make ci.dev-docker
- run: *notify-slack-failure
# Nomad 0.8 builds on go1.10
# Run integration tests on nomad/v0.8.7
nomad-integration-0_8:
nomad-integration-test: &NOMAD_TESTS
docker:
- image: docker.mirror.hashicorp.services/cimg/go:1.10
- image: docker.mirror.hashicorp.services/cimg/go:1.19
parameters:
nomad-version:
type: enum
enum: *supported_nomad_versions
default: *default_nomad_version
environment:
<<: *ENVIRONMENT
NOMAD_WORKING_DIR: &NOMAD_WORKING_DIR /home/circleci/go/src/github.com/hashicorp/nomad
NOMAD_VERSION: v0.8.7
NOMAD_VERSION: << parameters.nomad-version >>
steps: &NOMAD_INTEGRATION_TEST_STEPS
- run: git clone https://github.com/hashicorp/nomad.git --branch ${NOMAD_VERSION} ${NOMAD_WORKING_DIR}
- run: git clone https://github.com/hashicorp/nomad.git --branch v${NOMAD_VERSION} ${NOMAD_WORKING_DIR}
# get consul binary
- attach_workspace:
@ -601,16 +611,6 @@ jobs:
path: *TEST_RESULTS_DIR
- run: *notify-slack-failure
# run integration tests on nomad/main
nomad-integration-main:
docker:
- image: docker.mirror.hashicorp.services/cimg/go:1.18
environment:
<<: *ENVIRONMENT
NOMAD_WORKING_DIR: /home/circleci/go/src/github.com/hashicorp/nomad
NOMAD_VERSION: main
steps: *NOMAD_INTEGRATION_TEST_STEPS
# build frontend yarn cache
frontend-cache:
docker:
@ -816,7 +816,7 @@ jobs:
# Get go binary from workspace
- attach_workspace:
at: .
# Build the consul-dev image from the already built binary
# Build the consul:local image from the already built binary
- run:
command: |
sudo rm -rf /usr/local/go
@ -887,8 +887,8 @@ jobs:
- attach_workspace:
at: .
- run: *install-gotestsum
# Build the consul-dev image from the already built binary
- run: docker build -t consul-dev -f ./build-support/docker/Consul-Dev.dockerfile .
# Build the consul:local image from the already built binary
- run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile .
- run:
name: Envoy Integration Tests
command: |
@ -902,6 +902,7 @@ jobs:
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
GOTESTSUM_FORMAT: standard-verbose
COMPOSE_INTERACTIVE_NO_CLI: 1
LAMBDA_TESTS_ENABLED: "true"
# tput complains if this isn't set to something.
TERM: ansi
- store_artifacts:
@ -1117,12 +1118,12 @@ workflows:
- dev-upload-docker:
<<: *dev-upload
context: consul-ci
- nomad-integration-main:
requires:
- dev-build
- nomad-integration-0_8:
- nomad-integration-test:
requires:
- dev-build
matrix:
parameters:
nomad-version: *supported_nomad_versions
- envoy-integration-test:
requires:
- dev-build

View File

@ -16,7 +16,7 @@ jobs:
backport:
if: github.event.pull_request.merged
runs-on: ubuntu-latest
container: hashicorpdev/backport-assistant:0.2.3
container: hashicorpdev/backport-assistant:0.2.5
steps:
- name: Run Backport Assistant for stable-website
run: |
@ -24,6 +24,7 @@ jobs:
env:
BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)"
BACKPORT_TARGET_TEMPLATE: "stable-website"
BACKPORT_MERGE_COMMIT: true
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Backport changes to latest release branch
run: |

View File

@ -8,6 +8,8 @@ linters:
- ineffassign
- unparam
- forbidigo
- gomodguard
- depguard
issues:
# Disable the default exclude list so that all excludes are explicitly
@ -75,6 +77,30 @@ linters-settings:
# Exclude godoc examples from forbidigo checks.
# Default: true
exclude_godoc_examples: false
gomodguard:
blocked:
# List of blocked modules.
modules:
# Blocked module.
- github.com/hashicorp/net-rpc-msgpackrpc:
recommendations:
- github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc
- github.com/hashicorp/go-msgpack:
recommendations:
- github.com/hashicorp/consul-net-rpc/go-msgpack
depguard:
list-type: denylist
include-go-root: true
# A list of packages for the list type specified.
# Default: []
packages:
- net/rpc
# A list of packages for the list type specified.
# Specify an error message to output when a denied package is used.
# Default: []
packages-with-error-message:
- net/rpc: 'only use forked copy in github.com/hashicorp/consul-net-rpc/net/rpc'
run:
timeout: 10m

View File

@ -1,3 +1,10 @@
## 1.13.1 (August 11, 2022)
BUG FIXES:
* agent: Fixed a compatibility issue when restoring snapshots from pre-1.13.0 versions of Consul [[GH-14107](https://github.com/hashicorp/consul/issues/14107)] [[GH-14149](https://github.com/hashicorp/consul/issues/14149)]
* connect: Fixed some spurious issues during peering establishment when a follower is dialed [[GH-14119](https://github.com/hashicorp/consul/issues/14119)]
## 1.12.4 (August 11, 2022)
BUG FIXES:
@ -21,6 +28,9 @@ connect: Terminating gateways with a wildcard service entry should no longer pic
BREAKING CHANGES:
* config-entry: Exporting a specific service name across all namespace is invalid.
* connect: contains an upgrade compatibility issue when restoring snapshots containing service mesh proxy registrations from pre-1.13 versions of Consul [[GH-14107](https://github.com/hashicorp/consul/issues/14107)]. Fixed in 1.13.1 [[GH-14149](https://github.com/hashicorp/consul/issues/14149)]. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#all-service-mesh-deployments) for more information.
* connect: if using auto-encrypt or auto-config, TLS is required for gRPC communication between Envoy and Consul as of 1.13.0; this TLS for gRPC requirement will be removed in a future 1.13 patch release. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#service-mesh-deployments-using-auto-encrypt-or-auto-config) for more information.
* connect: if a pre-1.13 Consul agent's HTTPS port was not enabled, upgrading to 1.13 may turn on TLS for gRPC communication for Envoy and Consul depending on the agent's TLS configuration. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#grpc-tls) for more information.
* connect: Removes support for Envoy 1.19 [[GH-13807](https://github.com/hashicorp/consul/issues/13807)]
* telemetry: config flag `telemetry { disable_compat_1.9 = (true|false) }` has been removed. Before upgrading you should remove this flag from your config if the flag is being used. [[GH-13532](https://github.com/hashicorp/consul/issues/13532)]
@ -937,6 +947,24 @@ NOTES:
* legal: **(Enterprise only)** Enterprise binary downloads will now include a copy of the EULA and Terms of Evaluation in the zip archive
## 1.9.17 (April 13, 2022)
SECURITY:
* agent: Added a new check field, `disable_redirects`, that allows for disabling the following of redirects for HTTP checks. The intention is to default this to true in a future release so that redirects must explicitly be enabled. [[GH-12685](https://github.com/hashicorp/consul/issues/12685)]
* connect: Properly set SNI when configured for services behind a terminating gateway. [[GH-12672](https://github.com/hashicorp/consul/issues/12672)]
DEPRECATIONS:
* tls: With the upgrade to Go 1.17, the ordering of `tls_cipher_suites` will no longer be honored, and `tls_prefer_server_cipher_suites` is now ignored. [[GH-12767](https://github.com/hashicorp/consul/issues/12767)]
BUG FIXES:
* connect/ca: cancel old Vault renewal on CA configuration. Provide a 1 - 6 second backoff on repeated token renewal requests to prevent overwhelming Vault. [[GH-12607](https://github.com/hashicorp/consul/issues/12607)]
* memberlist: fixes a bug which prevented members from joining a cluster with
large amounts of churn [[GH-253](https://github.com/hashicorp/memberlist/issues/253)] [[GH-12046](https://github.com/hashicorp/consul/issues/12046)]
* replication: Fixed a bug which could prevent ACL replication from continuing successfully after a leader election. [[GH-12565](https://github.com/hashicorp/consul/issues/12565)]
## 1.9.16 (February 28, 2022)
FEATURES:

View File

@ -22,10 +22,11 @@ LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
org.opencontainers.image.url="https://www.consul.io/" \
org.opencontainers.image.documentation="https://www.consul.io/docs" \
org.opencontainers.image.source="https://github.com/hashicorp/consul" \
org.opencontainers.image.version=$VERSION \
org.opencontainers.image.version=${VERSION} \
org.opencontainers.image.vendor="HashiCorp" \
org.opencontainers.image.title="consul" \
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \
version=${VERSION}
# This is the location of the releases.
ENV HASHICORP_RELEASES=https://releases.hashicorp.com
@ -110,13 +111,13 @@ CMD ["agent", "-dev", "-client", "0.0.0.0"]
# Remember, this image cannot be built locally.
FROM docker.mirror.hashicorp.services/alpine:3.15 as default
ARG VERSION
ARG PRODUCT_VERSION
ARG BIN_NAME
# PRODUCT_NAME and PRODUCT_VERSION are the name of the software on releases.hashicorp.com
# and the version to download. Example: PRODUCT_NAME=consul PRODUCT_VERSION=1.2.3.
ENV BIN_NAME=$BIN_NAME
ENV VERSION=$VERSION
ENV PRODUCT_VERSION=$PRODUCT_VERSION
ARG PRODUCT_REVISION
ARG PRODUCT_NAME=$BIN_NAME
@ -128,10 +129,11 @@ LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
org.opencontainers.image.url="https://www.consul.io/" \
org.opencontainers.image.documentation="https://www.consul.io/docs" \
org.opencontainers.image.source="https://github.com/hashicorp/consul" \
org.opencontainers.image.version=$VERSION \
org.opencontainers.image.version=${PRODUCT_VERSION} \
org.opencontainers.image.vendor="HashiCorp" \
org.opencontainers.image.title="consul" \
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \
version=${PRODUCT_VERSION}
# Set up certificates and base tools.
# libc6-compat is needed to symlink the shared libraries for ARM builds
@ -217,10 +219,11 @@ LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
org.opencontainers.image.url="https://www.consul.io/" \
org.opencontainers.image.documentation="https://www.consul.io/docs" \
org.opencontainers.image.source="https://github.com/hashicorp/consul" \
org.opencontainers.image.version=$VERSION \
org.opencontainers.image.version=${PRODUCT_VERSION} \
org.opencontainers.image.vendor="HashiCorp" \
org.opencontainers.image.title="consul" \
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \
version=${PRODUCT_VERSION}
# Copy license for Red Hat certification.
COPY LICENSE /licenses/mozilla.txt
@ -284,4 +287,4 @@ USER 100
# By default you'll get an insecure single-node development server that stores
# everything in RAM, exposes a web UI and HTTP endpoints, and bootstraps itself.
# Don't use this configuration for production.
CMD ["agent", "-dev", "-client", "0.0.0.0"]
CMD ["agent", "-dev", "-client", "0.0.0.0"]

View File

@ -16,6 +16,7 @@ PROTOC_GO_INJECT_TAG_VERSION='v1.3.0'
GOTAGS ?=
GOPATH=$(shell go env GOPATH)
GOARCH?=$(shell go env GOARCH)
MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1)
export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH)
@ -129,7 +130,7 @@ export GOLDFLAGS
# Allow skipping docker build during integration tests in CI since we already
# have a built binary
ENVOY_INTEG_DEPS?=dev-docker
ENVOY_INTEG_DEPS?=docker-envoy-integ
ifdef SKIP_DOCKER_BUILD
ENVOY_INTEG_DEPS=noop
endif
@ -152,7 +153,28 @@ dev-docker: linux
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
# 'consul:local' tag is needed to run the integration tests
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
@docker buildx use default && docker buildx build -t 'consul:local' \
--platform linux/$(GOARCH) \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--load \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
check-remote-dev-image-env:
ifndef REMOTE_DEV_IMAGE
$(error REMOTE_DEV_IMAGE is undefined: set this image to <your_docker_repo>/<your_docker_image>:<image_tag>, e.g. hashicorp/consul-k8s-dev:latest)
endif
remote-docker: check-remote-dev-image-env
$(MAKE) GOARCH=amd64 linux
$(MAKE) GOARCH=arm64 linux
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building and Pushing Consul Development container - $(REMOTE_DEV_IMAGE)"
@docker buildx use default && docker buildx build -t '$(REMOTE_DEV_IMAGE)' \
--platform linux/amd64,linux/arm64 \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--push \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
# should only run in CI and not locally.
@ -174,10 +196,10 @@ ifeq ($(CIRCLE_BRANCH), main)
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
endif
# linux builds a linux binary independent of the source platform
# linux builds a linux binary compatible with the source platform
linux:
@mkdir -p ./pkg/bin/linux_amd64
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./pkg/bin/linux_amd64 -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
@mkdir -p ./pkg/bin/linux_$(GOARCH)
CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o ./pkg/bin/linux_$(GOARCH) -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
# dist builds binaries for all platforms and packages them for distribution
dist:
@ -324,8 +346,22 @@ consul-docker: go-build-image
ui-docker: ui-build-image
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui
# Build image used to run integration tests locally.
docker-envoy-integ:
$(MAKE) GOARCH=amd64 linux
docker build \
--platform linux/amd64 $(NOCACHE) $(QUIET) \
-t 'consul:local' \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
$(CURDIR)/pkg/bin/linux_amd64 \
-f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
# Run integration tests.
# Use GO_TEST_FLAGS to run specific tests:
# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic"
# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment.
test-envoy-integ: $(ENVOY_INTEG_DEPS)
@go test -v -timeout=30m -tags integration ./test/integration/connect/envoy
@go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy
.PHONY: test-compat-integ
test-compat-integ: dev-docker

View File

@ -213,7 +213,7 @@ type Agent struct {
// depending on the configuration
delegate delegate
// externalGRPCServer is the gRPC server exposed on the dedicated gRPC port (as
// externalGRPCServer is the gRPC server exposed on dedicated gRPC ports (as
// opposed to the multiplexed "server" port).
externalGRPCServer *grpc.Server
@ -384,18 +384,18 @@ type Agent struct {
// New process the desired options and creates a new Agent.
// This process will
// * parse the config given the config Flags
// * setup logging
// * using predefined logger given in an option
// OR
// * initialize a new logger from the configuration
// including setting up gRPC logging
// * initialize telemetry
// * create a TLS Configurator
// * build a shared connection pool
// * create the ServiceManager
// * setup the NodeID if one isn't provided in the configuration
// * create the AutoConfig object for future use in fully
// - parse the config given the config Flags
// - setup logging
// - using predefined logger given in an option
// OR
// - initialize a new logger from the configuration
// including setting up gRPC logging
// - initialize telemetry
// - create a TLS Configurator
// - build a shared connection pool
// - create the ServiceManager
// - setup the NodeID if one isn't provided in the configuration
// - create the AutoConfig object for future use in fully
// resolving the configuration
func New(bd BaseDeps) (*Agent, error) {
a := Agent{
@ -539,7 +539,7 @@ func (a *Agent) Start(ctx context.Context) error {
// This needs to happen after the initial auto-config is loaded, because TLS
// can only be configured on the gRPC server at the point of creation.
a.buildExternalGRPCServer()
a.externalGRPCServer = external.NewServer(a.logger.Named("grpc.external"))
if err := a.startLicenseManager(ctx); err != nil {
return err
@ -702,11 +702,14 @@ func (a *Agent) Start(ctx context.Context) error {
a.apiServers.Start(srv)
}
// Start gRPC server.
// Start grpc and grpc_tls servers.
if err := a.listenAndServeGRPC(); err != nil {
return err
}
// Start a goroutine to terminate excess xDS sessions.
go a.baseDeps.XDSStreamLimiter.Run(&lib.StopChannelContext{StopCh: a.shutdownCh})
// register watches
if err := a.reloadWatches(a.config); err != nil {
return err
@ -760,15 +763,10 @@ func (a *Agent) Failed() <-chan struct{} {
return a.apiServers.failed
}
func (a *Agent) buildExternalGRPCServer() {
a.externalGRPCServer = external.NewServer(a.logger.Named("grpc.external"), a.tlsConfigurator)
}
func (a *Agent) listenAndServeGRPC() error {
if len(a.config.GRPCAddrs) < 1 {
if len(a.config.GRPCAddrs) < 1 && len(a.config.GRPCTLSAddrs) < 1 {
return nil
}
// TODO(agentless): rather than asserting the concrete type of delegate, we
// should add a method to the Delegate interface to build a ConfigSource.
var cfg xds.ProxyConfigSource = localproxycfg.NewConfigSource(a.proxyConfig)
@ -787,7 +785,6 @@ func (a *Agent) listenAndServeGRPC() error {
}()
cfg = catalogCfg
}
a.xdsServer = xds.NewServer(
a.config.NodeName,
a.logger.Named(logging.Envoy),
@ -797,25 +794,65 @@ func (a *Agent) listenAndServeGRPC() error {
return a.delegate.ResolveTokenAndDefaultMeta(id, nil, nil)
},
a,
a.baseDeps.XDSStreamLimiter,
)
a.xdsServer.Register(a.externalGRPCServer)
ln, err := a.startListeners(a.config.GRPCAddrs)
if err != nil {
return err
// Attempt to spawn listeners
var listeners []net.Listener
start := func(port_name string, addrs []net.Addr, tlsConf *tls.Config) error {
if len(addrs) < 1 {
return nil
}
ln, err := a.startListeners(addrs)
if err != nil {
return err
}
for i := range ln {
// Wrap with TLS, if provided.
if tlsConf != nil {
ln[i] = tls.NewListener(ln[i], tlsConf)
}
listeners = append(listeners, ln[i])
}
for _, l := range ln {
go func(innerL net.Listener) {
a.logger.Info("Started gRPC listeners",
"port_name", port_name,
"address", innerL.Addr().String(),
"network", innerL.Addr().Network(),
)
err := a.externalGRPCServer.Serve(innerL)
if err != nil {
a.logger.Error("gRPC server failed", "port_name", port_name, "error", err)
}
}(l)
}
return nil
}
for _, l := range ln {
go func(innerL net.Listener) {
a.logger.Info("Started gRPC server",
"address", innerL.Addr().String(),
"network", innerL.Addr().Network(),
)
err := a.externalGRPCServer.Serve(innerL)
if err != nil {
a.logger.Error("gRPC server failed", "error", err)
}
}(l)
// The original grpc port may spawn in either plain-text or TLS mode (for backwards compatibility).
// TODO: Simplify this block to only spawn plain-text after 1.14 when deprecated TLS support is removed.
if a.config.GRPCPort > 0 {
// Only allow the grpc port to spawn TLS connections if the other grpc_tls port is NOT defined.
var tlsConf *tls.Config = nil
if a.config.GRPCTLSPort <= 0 && a.tlsConfigurator.GRPCServerUseTLS() {
a.logger.Warn("deprecated gRPC TLS configuration detected. Consider using `ports.grpc_tls` instead")
tlsConf = a.tlsConfigurator.IncomingGRPCConfig()
}
if err := start("grpc", a.config.GRPCAddrs, tlsConf); err != nil {
closeListeners(listeners)
return err
}
}
// Only allow grpc_tls to spawn with a TLS listener.
if a.config.GRPCTLSPort > 0 {
if err := start("grpc_tls", a.config.GRPCTLSAddrs, a.tlsConfigurator.IncomingGRPCConfig()); err != nil {
closeListeners(listeners)
return err
}
}
return nil
}
@ -939,8 +976,9 @@ func (a *Agent) listenHTTP() ([]apiServer, error) {
}
srv := &HTTPHandlers{
agent: a,
denylist: NewDenylist(a.config.HTTPBlockEndpoints),
agent: a,
denylist: NewDenylist(a.config.HTTPBlockEndpoints),
proxyTransport: http.DefaultTransport,
}
a.configReloaders = append(a.configReloaders, srv.ReloadConfig)
a.httpHandlers = srv
@ -1202,6 +1240,7 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
cfg.RPCAdvertise = runtimeCfg.RPCAdvertiseAddr
cfg.GRPCPort = runtimeCfg.GRPCPort
cfg.GRPCTLSPort = runtimeCfg.GRPCTLSPort
cfg.Segment = runtimeCfg.SegmentName
if len(runtimeCfg.Segments) > 0 {
@ -1505,7 +1544,9 @@ func (a *Agent) ShutdownAgent() error {
}
// Stop gRPC
a.externalGRPCServer.Stop()
if a.externalGRPCServer != nil {
a.externalGRPCServer.Stop()
}
// Stop the proxy config manager
if a.proxyConfig != nil {
@ -2104,6 +2145,21 @@ func (a *Agent) AddService(req AddServiceRequest) error {
// addServiceLocked adds a service entry to the service manager if enabled, or directly
// to the local state if it is not. This function assumes the state lock is already held.
func (a *Agent) addServiceLocked(req addServiceLockedRequest) error {
// Must auto-assign the port and default checks (if needed) here to avoid race collisions.
if req.Service.LocallyRegisteredAsSidecar {
if req.Service.Port < 1 {
port, err := a.sidecarPortFromServiceIDLocked(req.Service.CompoundServiceID())
if err != nil {
return err
}
req.Service.Port = port
}
// Setup default check if none given.
if len(req.chkTypes) < 1 {
req.chkTypes = sidecarDefaultChecks(req.Service.ID, req.Service.Address, req.Service.Proxy.LocalServiceAddress, req.Service.Port)
}
}
req.Service.EnterpriseMeta.Normalize()
if err := a.validateService(req.Service, req.chkTypes); err != nil {
@ -2231,7 +2287,7 @@ func (a *Agent) addServiceInternal(req addServiceInternalRequest) error {
intervalStr = chkType.Interval.String()
}
if chkType.Timeout != 0 {
timeoutStr = chkType.Interval.String()
timeoutStr = chkType.Timeout.String()
}
check := &structs.HealthCheck{
@ -2556,7 +2612,7 @@ func (a *Agent) removeServiceLocked(serviceID structs.ServiceID, persist bool) e
}
func (a *Agent) removeServiceSidecars(serviceID structs.ServiceID, persist bool) error {
sidecarSID := structs.NewServiceID(sidecarServiceID(serviceID.ID), &serviceID.EnterpriseMeta)
sidecarSID := structs.NewServiceID(sidecarIDFromServiceID(serviceID.ID), &serviceID.EnterpriseMeta)
if sidecar := a.State.Service(sidecarSID); sidecar != nil {
// Double check that it's not just an ID collision and we actually added
// this from a sidecar.
@ -3368,7 +3424,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
}
// Grab and validate sidecar if there is one too
sidecar, sidecarChecks, sidecarToken, err := a.sidecarServiceFromNodeService(ns, service.Token)
sidecar, sidecarChecks, sidecarToken, err := sidecarServiceFromNodeService(ns, service.Token)
if err != nil {
return fmt.Errorf("Failed to validate sidecar for service %q: %v", service.Name, err)
}
@ -4268,7 +4324,10 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources {
sources.Health = proxycfgglue.ServerHealth(deps, proxycfgglue.ClientHealth(a.rpcClientHealth))
sources.Intentions = proxycfgglue.ServerIntentions(deps)
sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps)
sources.IntentionUpstreamsDestination = proxycfgglue.ServerIntentionUpstreamsDestination(deps)
sources.InternalServiceDump = proxycfgglue.ServerInternalServiceDump(deps, proxycfgglue.CacheInternalServiceDump(a.cache))
sources.PeeredUpstreams = proxycfgglue.ServerPeeredUpstreams(deps)
sources.ResolvedServiceConfig = proxycfgglue.ServerResolvedServiceConfig(deps, proxycfgglue.CacheResolvedServiceConfig(a.cache))
sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache))
sources.TrustBundle = proxycfgglue.ServerTrustBundle(deps)
sources.TrustBundleList = proxycfgglue.ServerTrustBundleList(deps)

View File

@ -45,7 +45,19 @@ type Self struct {
type XDSSelf struct {
SupportedProxies map[string][]string
Port int
// Port could be used for either TLS or plain-text communication
// up through version 1.14. In order to maintain backwards-compatibility,
// Port will now default to TLS and fallback to the standard port value.
// DEPRECATED: Use Ports field instead
Port int
Ports GRPCPorts
}
// GRPCPorts is used to hold the external GRPC server's port numbers.
type GRPCPorts struct {
// Technically, this port is not always plain-text as of 1.14, but will be in a future release.
Plaintext int
TLS int
}
func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
@ -78,7 +90,16 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
SupportedProxies: map[string][]string{
"envoy": proxysupport.EnvoyVersions,
},
Port: s.agent.config.GRPCPort,
// Prefer the TLS port. See comment on the XDSSelf struct for details.
Port: s.agent.config.GRPCTLSPort,
Ports: GRPCPorts{
Plaintext: s.agent.config.GRPCPort,
TLS: s.agent.config.GRPCTLSPort,
},
}
// Fallback to standard port if TLS is not enabled.
if s.agent.config.GRPCTLSPort <= 0 {
xds.Port = s.agent.config.GRPCPort
}
}
@ -1159,7 +1180,7 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
}
// See if we have a sidecar to register too
sidecar, sidecarChecks, sidecarToken, err := s.agent.sidecarServiceFromNodeService(ns, token)
sidecar, sidecarChecks, sidecarToken, err := sidecarServiceFromNodeService(ns, token)
if err != nil {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid SidecarService: %s", err)}
}

View File

@ -1434,15 +1434,8 @@ func TestAgent_Self(t *testing.T) {
cases := map[string]struct {
hcl string
expectXDS bool
grpcTLS bool
}{
"normal": {
hcl: `
node_meta {
somekey = "somevalue"
}
`,
expectXDS: true,
},
"no grpc": {
hcl: `
node_meta {
@ -1453,13 +1446,35 @@ func TestAgent_Self(t *testing.T) {
}
`,
expectXDS: false,
grpcTLS: false,
},
"plaintext grpc": {
hcl: `
node_meta {
somekey = "somevalue"
}
`,
expectXDS: true,
grpcTLS: false,
},
"tls grpc": {
hcl: `
node_meta {
somekey = "somevalue"
}
`,
expectXDS: true,
grpcTLS: true,
},
}
for name, tc := range cases {
tc := tc
t.Run(name, func(t *testing.T) {
a := NewTestAgent(t, tc.hcl)
a := StartTestAgent(t, TestAgent{
HCL: tc.hcl,
UseGRPCTLS: tc.grpcTLS,
})
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
@ -1487,6 +1502,13 @@ func TestAgent_Self(t *testing.T) {
map[string][]string{"envoy": proxysupport.EnvoyVersions},
val.XDS.SupportedProxies,
)
require.Equal(t, a.Config.GRPCTLSPort, val.XDS.Ports.TLS)
require.Equal(t, a.Config.GRPCPort, val.XDS.Ports.Plaintext)
if tc.grpcTLS {
require.Equal(t, a.Config.GRPCTLSPort, val.XDS.Port)
} else {
require.Equal(t, a.Config.GRPCPort, val.XDS.Port)
}
} else {
require.Nil(t, val.XDS, "xds component should be missing when gRPC is disabled")
@ -3764,7 +3786,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
fmt.Println("TCP Check:= ", v)
}
if hasNoCorrectTCPCheck {
t.Fatalf("Did not find the expected TCP Healtcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
t.Fatalf("Did not find the expected TCP Healthcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
}
require.Equal(t, sidecarSvc, gotSidecar)
})

View File

@ -418,6 +418,9 @@ func testAgent_AddService(t *testing.T, extraHCL string) {
`+extraHCL)
defer a.Shutdown()
duration3s, _ := time.ParseDuration("3s")
duration10s, _ := time.ParseDuration("10s")
tests := []struct {
desc string
srv *structs.NodeService
@ -467,6 +470,50 @@ func testAgent_AddService(t *testing.T, extraHCL string) {
},
},
},
{
"one http check with interval and duration",
&structs.NodeService{
ID: "svcid1",
Service: "svcname1",
Tags: []string{"tag1"},
Weights: nil, // nil weights...
Port: 8100,
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
// ... should be populated to avoid "IsSame" returning true during AE.
func(ns *structs.NodeService) {
ns.Weights = &structs.Weights{
Passing: 1,
Warning: 1,
}
},
[]*structs.CheckType{
{
CheckID: "check1",
Name: "name1",
HTTP: "http://localhost:8100/",
Interval: duration10s,
Timeout: duration3s,
Notes: "note1",
},
},
map[string]*structs.HealthCheck{
"check1": {
Node: "node1",
CheckID: "check1",
Name: "name1",
Interval: "10s",
Timeout: "3s",
Status: "critical",
Notes: "note1",
ServiceID: "svcid1",
ServiceName: "svcname1",
ServiceTags: []string{"tag1"},
Type: "http",
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
},
},
{
"multiple checks",
&structs.NodeService{
@ -2095,7 +2142,7 @@ func TestAgent_HTTPCheck_EnableAgentTLSForChecks(t *testing.T) {
run := func(t *testing.T, ca string) {
a := StartTestAgent(t, TestAgent{
UseTLS: true,
UseHTTPS: true,
HCL: `
enable_agent_tls_for_checks = true
@ -2786,7 +2833,7 @@ func TestAgent_DeregisterPersistedSidecarAfterRestart(t *testing.T) {
},
}
connectSrv, _, _, err := a.sidecarServiceFromNodeService(srv, "")
connectSrv, _, _, err := sidecarServiceFromNodeService(srv, "")
require.NoError(t, err)
// First persist the check
@ -2959,11 +3006,24 @@ func testAgent_loadServices_sidecar(t *testing.T, extraHCL string) {
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq", nil)); token != "abc123" {
t.Fatalf("bad: %s", token)
}
requireServiceExists(t, a, "rabbitmq-sidecar-proxy")
sidecarSvc := requireServiceExists(t, a, "rabbitmq-sidecar-proxy")
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq-sidecar-proxy", nil)); token != "abc123" {
t.Fatalf("bad: %s", token)
}
// Verify default checks have been added
wantChecks := sidecarDefaultChecks(sidecarSvc.ID, sidecarSvc.Address, sidecarSvc.Proxy.LocalServiceAddress, sidecarSvc.Port)
gotChecks := a.State.ChecksForService(sidecarSvc.CompoundServiceID(), true)
gotChkNames := make(map[string]types.CheckID)
for _, check := range gotChecks {
requireCheckExists(t, a, check.CheckID)
gotChkNames[check.Name] = check.CheckID
}
for _, check := range wantChecks {
chkName := check.Name
require.NotNil(t, gotChkNames[chkName])
}
// Sanity check rabbitmq service should NOT have sidecar info in state since
// it's done it's job and should be a registration syntax sugar only.
assert.Nil(t, svc.Connect.SidecarService)
@ -3860,7 +3920,7 @@ func TestAgent_reloadWatchesHTTPS(t *testing.T) {
}
t.Parallel()
a := TestAgent{UseTLS: true}
a := TestAgent{UseHTTPS: true}
if err := a.Start(t); err != nil {
t.Fatal(err)
}
@ -5207,7 +5267,7 @@ func TestAgent_AutoEncrypt(t *testing.T) {
server = ` + strconv.Itoa(srv.Config.RPCBindAddr.Port) + `
}
retry_join = ["` + srv.Config.SerfBindAddrLAN.String() + `"]`,
UseTLS: true,
UseHTTPS: true,
})
defer client.Shutdown()

30
agent/cache/cache.go vendored
View File

@ -37,6 +37,10 @@ import (
var Gauges = []prometheus.GaugeDefinition{
{
Name: []string{"consul", "cache", "entries_count"},
Help: "Deprecated - please use cache_entries_count instead.",
},
{
Name: []string{"cache", "entries_count"},
Help: "Represents the number of entries in this cache.",
},
}
@ -45,18 +49,34 @@ var Gauges = []prometheus.GaugeDefinition{
var Counters = []prometheus.CounterDefinition{
{
Name: []string{"consul", "cache", "bypass"},
Help: "Deprecated - please use cache_bypass instead.",
},
{
Name: []string{"cache", "bypass"},
Help: "Counts how many times a request bypassed the cache because no cache-key was provided.",
},
{
Name: []string{"consul", "cache", "fetch_success"},
Help: "Deprecated - please use cache_fetch_success instead.",
},
{
Name: []string{"cache", "fetch_success"},
Help: "Counts the number of successful fetches by the cache.",
},
{
Name: []string{"consul", "cache", "fetch_error"},
Help: "Deprecated - please use cache_fetch_error instead.",
},
{
Name: []string{"cache", "fetch_error"},
Help: "Counts the number of failed fetches by the cache.",
},
{
Name: []string{"consul", "cache", "evict_expired"},
Help: "Deprecated - please use cache_evict_expired instead.",
},
{
Name: []string{"cache", "evict_expired"},
Help: "Counts the number of expired entries that are evicted.",
},
}
@ -397,6 +417,7 @@ func entryExceedsMaxAge(maxAge time.Duration, entry cacheEntry) bool {
func (c *Cache) getWithIndex(ctx context.Context, r getOptions) (interface{}, ResultMeta, error) {
if r.Info.Key == "" {
metrics.IncrCounter([]string{"consul", "cache", "bypass"}, 1)
metrics.IncrCounter([]string{"cache", "bypass"}, 1)
// If no key is specified, then we do not cache this request.
// Pass directly through to the backend.
@ -443,6 +464,7 @@ RETRY_GET:
meta := ResultMeta{Index: entry.Index}
if first {
metrics.IncrCounter([]string{"consul", "cache", r.TypeEntry.Name, "hit"}, 1)
metrics.IncrCounter([]string{"cache", r.TypeEntry.Name, "hit"}, 1)
meta.Hit = true
}
@ -496,6 +518,7 @@ RETRY_GET:
missKey = "miss_new"
}
metrics.IncrCounter([]string{"consul", "cache", r.TypeEntry.Name, missKey}, 1)
metrics.IncrCounter([]string{"cache", r.TypeEntry.Name, missKey}, 1)
}
// Set our timeout channel if we must
@ -588,6 +611,7 @@ func (c *Cache) fetch(key string, r getOptions, allowNew bool, attempt uint, ign
entry.Fetching = true
c.entries[key] = entry
metrics.SetGauge([]string{"consul", "cache", "entries_count"}, float32(len(c.entries)))
metrics.SetGauge([]string{"cache", "entries_count"}, float32(len(c.entries)))
tEntry := r.TypeEntry
// The actual Fetch must be performed in a goroutine.
@ -694,7 +718,9 @@ func (c *Cache) fetch(key string, r getOptions, allowNew bool, attempt uint, ign
labels := []metrics.Label{{Name: "result_not_modified", Value: strconv.FormatBool(result.NotModified)}}
// TODO(kit): move tEntry.Name to a label on the first write here and deprecate the second write
metrics.IncrCounterWithLabels([]string{"consul", "cache", "fetch_success"}, 1, labels)
metrics.IncrCounterWithLabels([]string{"cache", "fetch_success"}, 1, labels)
metrics.IncrCounterWithLabels([]string{"consul", "cache", tEntry.Name, "fetch_success"}, 1, labels)
metrics.IncrCounterWithLabels([]string{"cache", tEntry.Name, "fetch_success"}, 1, labels)
if result.Index > 0 {
// Reset the attempts counter so we don't have any backoff
@ -728,7 +754,9 @@ func (c *Cache) fetch(key string, r getOptions, allowNew bool, attempt uint, ign
// TODO(kit): Add tEntry.Name to label on fetch_error and deprecate second write
metrics.IncrCounterWithLabels([]string{"consul", "cache", "fetch_error"}, 1, labels)
metrics.IncrCounterWithLabels([]string{"cache", "fetch_error"}, 1, labels)
metrics.IncrCounterWithLabels([]string{"consul", "cache", tEntry.Name, "fetch_error"}, 1, labels)
metrics.IncrCounterWithLabels([]string{"cache", tEntry.Name, "fetch_error"}, 1, labels)
// Increment attempt counter
attempt++
@ -858,7 +886,9 @@ func (c *Cache) runExpiryLoop() {
// Set some metrics
metrics.IncrCounter([]string{"consul", "cache", "evict_expired"}, 1)
metrics.IncrCounter([]string{"cache", "evict_expired"}, 1)
metrics.SetGauge([]string{"consul", "cache", "entries_count"}, float32(len(c.entries)))
metrics.SetGauge([]string{"cache", "entries_count"}, float32(len(c.entries)))
c.entriesLock.Unlock()
}

View File

@ -125,10 +125,10 @@ type LoadResult struct {
//
// The sources are merged in the following order:
//
// * default configuration
// * config files in alphabetical order
// * command line arguments
// * overrides
// - default configuration
// - config files in alphabetical order
// - command line arguments
// - overrides
//
// The config sources are merged sequentially and later values overwrite
// previously set values. Slice values are merged by concatenating the two slices.
@ -433,6 +433,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
httpsPort := b.portVal("ports.https", c.Ports.HTTPS)
serverPort := b.portVal("ports.server", c.Ports.Server)
grpcPort := b.portVal("ports.grpc", c.Ports.GRPC)
grpcTlsPort := b.portVal("ports.grpc_tls", c.Ports.GRPCTLS)
serfPortLAN := b.portVal("ports.serf_lan", c.Ports.SerfLAN)
serfPortWAN := b.portVal("ports.serf_wan", c.Ports.SerfWAN)
proxyMinPort := b.portVal("ports.proxy_min_port", c.Ports.ProxyMinPort)
@ -563,6 +564,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
httpAddrs := b.makeAddrs(b.expandAddrs("addresses.http", c.Addresses.HTTP), clientAddrs, httpPort)
httpsAddrs := b.makeAddrs(b.expandAddrs("addresses.https", c.Addresses.HTTPS), clientAddrs, httpsPort)
grpcAddrs := b.makeAddrs(b.expandAddrs("addresses.grpc", c.Addresses.GRPC), clientAddrs, grpcPort)
grpcTlsAddrs := b.makeAddrs(b.expandAddrs("addresses.grpc_tls", c.Addresses.GRPCTLS), clientAddrs, grpcTlsPort)
for _, a := range dnsAddrs {
if x, ok := a.(*net.TCPAddr); ok {
@ -987,8 +989,10 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
EnableRemoteScriptChecks: enableRemoteScriptChecks,
EnableLocalScriptChecks: enableLocalScriptChecks,
EncryptKey: stringVal(c.EncryptKey),
GRPCPort: grpcPort,
GRPCAddrs: grpcAddrs,
GRPCPort: grpcPort,
GRPCTLSAddrs: grpcTlsAddrs,
GRPCTLSPort: grpcTlsPort,
HTTPMaxConnsPerClient: intVal(c.Limits.HTTPMaxConnsPerClient),
HTTPSHandshakeTimeout: b.durationVal("limits.https_handshake_timeout", c.Limits.HTTPSHandshakeTimeout),
KVMaxValueSize: uint64Val(c.Limits.KVMaxValueSize),
@ -2531,10 +2535,9 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza")
}
// TLS is only enabled on the gRPC listener if there's an HTTPS port configured
// for historic and backwards-compatibility reasons.
if rt.HTTPSPort <= 0 && (t.GRPC != TLSProtocolConfig{} && t.GRPCModifiedByDeprecatedConfig == nil) {
b.warn("tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)")
// And UseAutoCert right now only applies to external gRPC interface.
if t.Defaults.UseAutoCert != nil || t.HTTPS.UseAutoCert != nil || t.InternalRPC.UseAutoCert != nil {
return c, errors.New("use_auto_cert is only valid in the tls.grpc stanza")
}
defaultTLSMinVersion := b.tlsVersion("tls.defaults.tls_min_version", t.Defaults.TLSMinVersion)
@ -2591,6 +2594,7 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
mapCommon("https", t.HTTPS, &c.HTTPS)
mapCommon("grpc", t.GRPC, &c.GRPC)
c.GRPC.UseAutoCert = boolValWithDefault(t.GRPC.UseAutoCert, false)
c.ServerName = rt.ServerName
c.NodeName = rt.NodeName

View File

@ -332,10 +332,11 @@ type Consul struct {
}
type Addresses struct {
DNS *string `mapstructure:"dns"`
HTTP *string `mapstructure:"http"`
HTTPS *string `mapstructure:"https"`
GRPC *string `mapstructure:"grpc"`
DNS *string `mapstructure:"dns"`
HTTP *string `mapstructure:"http"`
HTTPS *string `mapstructure:"https"`
GRPC *string `mapstructure:"grpc"`
GRPCTLS *string `mapstructure:"grpc_tls"`
}
type AdvertiseAddrsConfig struct {
@ -694,6 +695,7 @@ type Ports struct {
SerfWAN *int `mapstructure:"serf_wan"`
Server *int `mapstructure:"server"`
GRPC *int `mapstructure:"grpc"`
GRPCTLS *int `mapstructure:"grpc_tls"`
ProxyMinPort *int `mapstructure:"proxy_min_port"`
ProxyMaxPort *int `mapstructure:"proxy_max_port"`
SidecarMinPort *int `mapstructure:"sidecar_min_port"`
@ -867,6 +869,7 @@ type TLSProtocolConfig struct {
VerifyIncoming *bool `mapstructure:"verify_incoming"`
VerifyOutgoing *bool `mapstructure:"verify_outgoing"`
VerifyServerHostname *bool `mapstructure:"verify_server_hostname"`
UseAutoCert *bool `mapstructure:"use_auto_cert"`
}
type TLS struct {

View File

@ -53,7 +53,8 @@ func AddFlags(fs *flag.FlagSet, f *LoadOpts) {
add(&f.FlagValues.EnableLocalScriptChecks, "enable-local-script-checks", "Enables health check scripts from configuration file.")
add(&f.FlagValues.HTTPConfig.AllowWriteHTTPFrom, "allow-write-http-from", "Only allow write endpoint calls from given network. CIDR format, can be specified multiple times.")
add(&f.FlagValues.EncryptKey, "encrypt", "Provides the gossip encryption key.")
add(&f.FlagValues.Ports.GRPC, "grpc-port", "Sets the gRPC API port to listen on (currently needed for Envoy xDS only).")
add(&f.FlagValues.Ports.GRPC, "grpc-port", "Sets the gRPC API port to listen on.")
add(&f.FlagValues.Ports.GRPCTLS, "grpc-tls-port", "Sets the gRPC-TLS API port to listen on.")
add(&f.FlagValues.Ports.HTTP, "http-port", "Sets the HTTP API port to listen on.")
add(&f.FlagValues.Ports.HTTPS, "https-port", "Sets the HTTPS API port to listen on.")
add(&f.FlagValues.StartJoinAddrsLAN, "join", "Address of an agent to join at start time. Can be specified multiple times.")

View File

@ -670,13 +670,18 @@ type RuntimeConfig struct {
// flag: -encrypt string
EncryptKey string
// GRPCPort is the port the gRPC server listens on. Currently this only
// exposes the xDS and ext_authz APIs for Envoy and it is disabled by default.
// GRPCPort is the port the gRPC server listens on. It is disabled by default.
//
// hcl: ports { grpc = int }
// flags: -grpc-port int
GRPCPort int
// GRPCTLSPort is the port the gRPC server listens on. It is disabled by default.
//
// hcl: ports { grpc_tls = int }
// flags: -grpc-tls-port int
GRPCTLSPort int
// GRPCAddrs contains the list of TCP addresses and UNIX sockets the gRPC
// server will bind to. If the gRPC endpoint is disabled (ports.grpc <= 0)
// the list is empty.
@ -692,6 +697,21 @@ type RuntimeConfig struct {
// hcl: client_addr = string addresses { grpc = string } ports { grpc = int }
GRPCAddrs []net.Addr
// GRPCTLSAddrs contains the list of TCP addresses and UNIX sockets the gRPC
// server will bind to. If the gRPC endpoint is disabled (ports.grpc <= 0)
// the list is empty.
//
// The addresses are taken from 'addresses.grpc_tls' which should contain a
// space separated list of ip addresses, UNIX socket paths and/or
// go-sockaddr templates. UNIX socket paths must be written as
// 'unix://<full path>', e.g. 'unix:///var/run/consul-grpc.sock'.
//
// If 'addresses.grpc_tls' was not provided the 'client_addr' addresses are
// used.
//
// hcl: client_addr = string addresses { grpc_tls = string } ports { grpc_tls = int }
GRPCTLSAddrs []net.Addr
// HTTPAddrs contains the list of TCP addresses and UNIX sockets the HTTP
// server will bind to. If the HTTP endpoint is disabled (ports.http <= 0)
// the list is empty.

View File

@ -5516,7 +5516,70 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
},
})
run(t, testCase{
desc: "tls.grpc without ports.https",
desc: "tls.grpc.use_auto_cert defaults to false",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
"tls": {
"grpc": {}
}
}
`},
hcl: []string{`
tls {
grpc {}
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
run(t, testCase{
desc: "tls.grpc.use_auto_cert defaults to false (II)",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
"tls": {}
}
`},
hcl: []string{`
tls {
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
run(t, testCase{
desc: "tls.grpc.use_auto_cert defaults to false (III)",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
}
`},
hcl: []string{`
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
run(t, testCase{
desc: "tls.grpc.use_auto_cert enabled when true",
args: []string{
`-data-dir=` + dataDir,
},
@ -5524,7 +5587,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
{
"tls": {
"grpc": {
"cert_file": "cert-1234"
"use_auto_cert": true
}
}
}
@ -5532,20 +5595,43 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
hcl: []string{`
tls {
grpc {
cert_file = "cert-1234"
use_auto_cert = true
}
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.CertFile = "cert-1234"
rt.TLS.GRPC.UseAutoCert = true
},
expectedWarnings: []string{
"tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)",
})
run(t, testCase{
desc: "tls.grpc.use_auto_cert disabled when false",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
"tls": {
"grpc": {
"use_auto_cert": false
}
}
}
`},
hcl: []string{`
tls {
grpc {
use_auto_cert = false
}
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
}
@ -5930,6 +6016,8 @@ func TestLoad_FullConfig(t *testing.T) {
GRPCPort: 4881,
GRPCAddrs: []net.Addr{tcpAddr("32.31.61.91:4881")},
GRPCTLSPort: 5201,
GRPCTLSAddrs: []net.Addr{tcpAddr("23.14.88.19:5201")},
HTTPAddrs: []net.Addr{tcpAddr("83.39.91.39:7999")},
HTTPBlockEndpoints: []string{"RBvAFcGD", "fWOWFznh"},
AllowWriteHTTPFrom: []*net.IPNet{cidr("127.0.0.0/8"), cidr("22.33.44.55/32"), cidr("0.0.0.0/0")},
@ -6340,6 +6428,7 @@ func TestLoad_FullConfig(t *testing.T) {
TLSMinVersion: types.TLSv1_0,
CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA},
VerifyOutgoing: false,
UseAutoCert: true,
},
HTTPS: tlsutil.ProtocolConfig{
VerifyIncoming: true,

View File

@ -192,6 +192,8 @@
"ExposeMinPort": 0,
"GRPCAddrs": [],
"GRPCPort": 0,
"GRPCTLSAddrs": [],
"GRPCTLSPort": 0,
"GossipLANGossipInterval": "0s",
"GossipLANGossipNodes": 0,
"GossipLANProbeInterval": "0s",
@ -374,7 +376,8 @@
"TLSMinVersion": "",
"VerifyIncoming": false,
"VerifyOutgoing": false,
"VerifyServerHostname": false
"VerifyServerHostname": false,
"UseAutoCert": false
},
"HTTPS": {
"CAFile": "",
@ -385,7 +388,8 @@
"TLSMinVersion": "",
"VerifyIncoming": false,
"VerifyOutgoing": false,
"VerifyServerHostname": false
"VerifyServerHostname": false,
"UseAutoCert": false
},
"InternalRPC": {
"CAFile": "",
@ -396,7 +400,8 @@
"TLSMinVersion": "",
"VerifyIncoming": false,
"VerifyOutgoing": false,
"VerifyServerHostname": false
"VerifyServerHostname": false,
"UseAutoCert": false
},
"NodeName": "",
"ServerName": ""
@ -466,4 +471,4 @@
"VersionMetadata": "",
"VersionPrerelease": "",
"Watches": []
}
}

View File

@ -44,6 +44,7 @@ addresses = {
http = "83.39.91.39"
https = "95.17.17.19"
grpc = "32.31.61.91"
grpc_tls = "23.14.88.19"
}
advertise_addr = "17.99.29.16"
advertise_addr_wan = "78.63.37.19"
@ -320,6 +321,7 @@ ports {
https = 15127
server = 3757
grpc = 4881
grpc_tls = 5201
proxy_min_port = 2000
proxy_max_port = 3000
sidecar_min_port = 8888
@ -697,6 +699,7 @@ tls {
tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
tls_min_version = "TLSv1_0"
verify_incoming = true
use_auto_cert = true
}
}
tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"

View File

@ -44,7 +44,8 @@
"dns": "93.95.95.81",
"http": "83.39.91.39",
"https": "95.17.17.19",
"grpc": "32.31.61.91"
"grpc": "32.31.61.91",
"grpc_tls": "23.14.88.19"
},
"advertise_addr": "17.99.29.16",
"advertise_addr_wan": "78.63.37.19",
@ -320,6 +321,7 @@
"https": 15127,
"server": 3757,
"grpc": 4881,
"grpc_tls": 5201,
"sidecar_min_port": 8888,
"sidecar_max_port": 9999,
"expose_min_port": 1111,
@ -692,7 +694,8 @@
"key_file": "1y4prKjl",
"tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
"tls_min_version": "TLSv1_0",
"verify_incoming": true
"verify_incoming": true,
"use_auto_cert": true
}
},
"tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",

View File

@ -0,0 +1,243 @@
package configentry
import (
"fmt"
"github.com/hashicorp/go-hclog"
"github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/agent/structs"
)
func ComputeResolvedServiceConfig(
args *structs.ServiceConfigRequest,
upstreamIDs []structs.ServiceID,
legacyUpstreams bool,
entries *ResolvedServiceConfigSet,
logger hclog.Logger,
) (*structs.ServiceConfigResponse, error) {
var thisReply structs.ServiceConfigResponse
thisReply.MeshGateway.Mode = structs.MeshGatewayModeDefault
// TODO(freddy) Refactor this into smaller set of state store functions
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
// blocking query, this function will be rerun and these state store lookups will both be current.
// We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
var proxyConfGlobalProtocol string
proxyConf := entries.GetProxyDefaults(args.PartitionOrDefault())
if proxyConf != nil {
// Apply the proxy defaults to the sidecar's proxy config
mapCopy, err := copystructure.Copy(proxyConf.Config)
if err != nil {
return nil, fmt.Errorf("failed to copy global proxy-defaults: %v", err)
}
thisReply.ProxyConfig = mapCopy.(map[string]interface{})
thisReply.Mode = proxyConf.Mode
thisReply.TransparentProxy = proxyConf.TransparentProxy
thisReply.MeshGateway = proxyConf.MeshGateway
thisReply.Expose = proxyConf.Expose
// Extract the global protocol from proxyConf for upstream configs.
rawProtocol := proxyConf.Config["protocol"]
if rawProtocol != nil {
var ok bool
proxyConfGlobalProtocol, ok = rawProtocol.(string)
if !ok {
return nil, fmt.Errorf("invalid protocol type %T", rawProtocol)
}
}
}
serviceConf := entries.GetServiceDefaults(
structs.NewServiceID(args.Name, &args.EnterpriseMeta),
)
if serviceConf != nil {
if serviceConf.Expose.Checks {
thisReply.Expose.Checks = true
}
if len(serviceConf.Expose.Paths) >= 1 {
thisReply.Expose.Paths = serviceConf.Expose.Paths
}
if serviceConf.MeshGateway.Mode != structs.MeshGatewayModeDefault {
thisReply.MeshGateway.Mode = serviceConf.MeshGateway.Mode
}
if serviceConf.Protocol != "" {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = make(map[string]interface{})
}
thisReply.ProxyConfig["protocol"] = serviceConf.Protocol
}
if serviceConf.TransparentProxy.OutboundListenerPort != 0 {
thisReply.TransparentProxy.OutboundListenerPort = serviceConf.TransparentProxy.OutboundListenerPort
}
if serviceConf.TransparentProxy.DialedDirectly {
thisReply.TransparentProxy.DialedDirectly = serviceConf.TransparentProxy.DialedDirectly
}
if serviceConf.Mode != structs.ProxyModeDefault {
thisReply.Mode = serviceConf.Mode
}
if serviceConf.Destination != nil {
thisReply.Destination = *serviceConf.Destination
}
if serviceConf.MaxInboundConnections > 0 {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = map[string]interface{}{}
}
thisReply.ProxyConfig["max_inbound_connections"] = serviceConf.MaxInboundConnections
}
if serviceConf.LocalConnectTimeoutMs > 0 {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = map[string]interface{}{}
}
thisReply.ProxyConfig["local_connect_timeout_ms"] = serviceConf.LocalConnectTimeoutMs
}
if serviceConf.LocalRequestTimeoutMs > 0 {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = map[string]interface{}{}
}
thisReply.ProxyConfig["local_request_timeout_ms"] = serviceConf.LocalRequestTimeoutMs
}
thisReply.Meta = serviceConf.Meta
}
// First collect all upstreams into a set of seen upstreams.
// Upstreams can come from:
// - Explicitly from proxy registrations, and therefore as an argument to this RPC endpoint
// - Implicitly from centralized upstream config in service-defaults
seenUpstreams := map[structs.ServiceID]struct{}{}
var (
noUpstreamArgs = len(upstreamIDs) == 0 && len(args.Upstreams) == 0
// Check the args and the resolved value. If it was exclusively set via a config entry, then args.Mode
// will never be transparent because the service config request does not use the resolved value.
tproxy = args.Mode == structs.ProxyModeTransparent || thisReply.Mode == structs.ProxyModeTransparent
)
// The upstreams passed as arguments to this endpoint are the upstreams explicitly defined in a proxy registration.
// If no upstreams were passed, then we should only return the resolved config if the proxy is in transparent mode.
// Otherwise we would return a resolved upstream config to a proxy with no configured upstreams.
if noUpstreamArgs && !tproxy {
return &thisReply, nil
}
// First store all upstreams that were provided in the request
for _, sid := range upstreamIDs {
if _, ok := seenUpstreams[sid]; !ok {
seenUpstreams[sid] = struct{}{}
}
}
// Then store upstreams inferred from service-defaults and mapify the overrides.
var (
upstreamConfigs = make(map[structs.ServiceID]*structs.UpstreamConfig)
upstreamDefaults *structs.UpstreamConfig
// usConfigs stores the opaque config map for each upstream and is keyed on the upstream's ID.
usConfigs = make(map[structs.ServiceID]map[string]interface{})
)
if serviceConf != nil && serviceConf.UpstreamConfig != nil {
for i, override := range serviceConf.UpstreamConfig.Overrides {
if override.Name == "" {
logger.Warn(
"Skipping UpstreamConfig.Overrides entry without a required name field",
"entryIndex", i,
"kind", serviceConf.GetKind(),
"name", serviceConf.GetName(),
"namespace", serviceConf.GetEnterpriseMeta().NamespaceOrEmpty(),
)
continue // skip this impossible condition
}
seenUpstreams[override.ServiceID()] = struct{}{}
upstreamConfigs[override.ServiceID()] = override
}
if serviceConf.UpstreamConfig.Defaults != nil {
upstreamDefaults = serviceConf.UpstreamConfig.Defaults
// Store the upstream defaults under a wildcard key so that they can be applied to
// upstreams that are inferred from intentions and do not have explicit upstream configuration.
cfgMap := make(map[string]interface{})
upstreamDefaults.MergeInto(cfgMap)
wildcard := structs.NewServiceID(structs.WildcardSpecifier, args.WithWildcardNamespace())
usConfigs[wildcard] = cfgMap
}
}
for upstream := range seenUpstreams {
resolvedCfg := make(map[string]interface{})
// The protocol of an upstream is resolved in this order:
// 1. Default protocol from proxy-defaults (how all services should be addressed)
// 2. Protocol for upstream service defined in its service-defaults (how the upstream wants to be addressed)
// 3. Protocol defined for the upstream in the service-defaults.(upstream_config.defaults|upstream_config.overrides) of the downstream
// (how the downstream wants to address it)
protocol := proxyConfGlobalProtocol
upstreamSvcDefaults := entries.GetServiceDefaults(
structs.NewServiceID(upstream.ID, &upstream.EnterpriseMeta),
)
if upstreamSvcDefaults != nil {
if upstreamSvcDefaults.Protocol != "" {
protocol = upstreamSvcDefaults.Protocol
}
}
if protocol != "" {
resolvedCfg["protocol"] = protocol
}
// Merge centralized defaults for all upstreams before configuration for specific upstreams
if upstreamDefaults != nil {
upstreamDefaults.MergeInto(resolvedCfg)
}
// The MeshGateway value from the proxy registration overrides the one from upstream_defaults
// because it is specific to the proxy instance.
//
// The goal is to flatten the mesh gateway mode in this order:
// 0. Value from centralized upstream_defaults
// 1. Value from local proxy registration
// 2. Value from centralized upstream_config
// 3. Value from local upstream definition. This last step is done in the client's service manager.
if !args.MeshGateway.IsZero() {
resolvedCfg["mesh_gateway"] = args.MeshGateway
}
if upstreamConfigs[upstream] != nil {
upstreamConfigs[upstream].MergeInto(resolvedCfg)
}
if len(resolvedCfg) > 0 {
usConfigs[upstream] = resolvedCfg
}
}
// don't allocate the slices just to not fill them
if len(usConfigs) == 0 {
return &thisReply, nil
}
if legacyUpstreams {
// For legacy upstreams we return a map that is only keyed on the string ID, since they precede namespaces
thisReply.UpstreamConfigs = make(map[string]map[string]interface{})
for us, conf := range usConfigs {
thisReply.UpstreamConfigs[us.ID] = conf
}
} else {
thisReply.UpstreamIDConfigs = make(structs.OpaqueUpstreamConfigs, 0, len(usConfigs))
for us, conf := range usConfigs {
thisReply.UpstreamIDConfigs = append(thisReply.UpstreamIDConfigs,
structs.OpaqueUpstreamConfig{Upstream: us, Config: conf})
}
}
return &thisReply, nil
}

View File

@ -0,0 +1,80 @@
package configentry
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
)
func Test_ComputeResolvedServiceConfig(t *testing.T) {
type args struct {
scReq *structs.ServiceConfigRequest
upstreamIDs []structs.ServiceID
entries *ResolvedServiceConfigSet
}
sid := structs.ServiceID{
ID: "sid",
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}
tests := []struct {
name string
args args
want *structs.ServiceConfigResponse
}{
{
name: "proxy with maxinboundsconnections",
args: args{
scReq: &structs.ServiceConfigRequest{
Name: "sid",
},
entries: &ResolvedServiceConfigSet{
ServiceDefaults: map[structs.ServiceID]*structs.ServiceConfigEntry{
sid: {
MaxInboundConnections: 20,
},
},
},
},
want: &structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"max_inbound_connections": 20,
},
},
},
{
name: "proxy with local_connect_timeout_ms and local_request_timeout_ms",
args: args{
scReq: &structs.ServiceConfigRequest{
Name: "sid",
},
entries: &ResolvedServiceConfigSet{
ServiceDefaults: map[structs.ServiceID]*structs.ServiceConfigEntry{
sid: {
MaxInboundConnections: 20,
LocalConnectTimeoutMs: 20000,
LocalRequestTimeoutMs: 30000,
},
},
},
},
want: &structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"max_inbound_connections": 20,
"local_connect_timeout_ms": 20000,
"local_request_timeout_ms": 30000,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ComputeResolvedServiceConfig(tt.args.scReq, tt.args.upstreamIDs,
false, tt.args.entries, nil)
require.NoError(t, err)
require.Equal(t, tt.want, got)
})
}
}

View File

@ -66,11 +66,10 @@ type VaultProvider struct {
stopWatcher func()
isPrimary bool
clusterID string
spiffeID *connect.SpiffeIDSigning
setupIntermediatePKIPathDone bool
logger hclog.Logger
isPrimary bool
clusterID string
spiffeID *connect.SpiffeIDSigning
logger hclog.Logger
}
func NewVaultProvider(logger hclog.Logger) *VaultProvider {
@ -174,6 +173,11 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error {
go v.renewToken(ctx, lifetimeWatcher)
}
// Update the intermediate (managed) PKI mount and role
if err := v.setupIntermediatePKIPath(); err != nil {
return err
}
return nil
}
@ -363,8 +367,8 @@ func (v *VaultProvider) GenerateIntermediateCSR() (string, error) {
}
func (v *VaultProvider) setupIntermediatePKIPath() error {
if v.setupIntermediatePKIPathDone {
return nil
mountConfig := vaultapi.MountConfigInput{
MaxLeaseTTL: v.config.IntermediateCertTTL.String(),
}
_, err := v.getCA(v.config.IntermediatePKINamespace, v.config.IntermediatePKIPath)
@ -373,9 +377,7 @@ func (v *VaultProvider) setupIntermediatePKIPath() error {
err := v.mountNamespaced(v.config.IntermediatePKINamespace, v.config.IntermediatePKIPath, &vaultapi.MountInput{
Type: "pki",
Description: "intermediate CA backend for Consul Connect",
Config: vaultapi.MountConfigInput{
MaxLeaseTTL: v.config.IntermediateCertTTL.String(),
},
Config: mountConfig,
})
if err != nil {
return err
@ -383,39 +385,28 @@ func (v *VaultProvider) setupIntermediatePKIPath() error {
} else {
return err
}
}
// Create the role for issuing leaf certs if it doesn't exist yet
rolePath := v.config.IntermediatePKIPath + "roles/" + VaultCALeafCertRole
role, err := v.readNamespaced(v.config.IntermediatePKINamespace, rolePath)
if err != nil {
return err
}
if role == nil {
_, err := v.writeNamespaced(v.config.IntermediatePKINamespace, rolePath, map[string]interface{}{
"allow_any_name": true,
"allowed_uri_sans": "spiffe://*",
"key_type": "any",
"max_ttl": v.config.LeafCertTTL.String(),
"no_store": true,
"require_cn": false,
})
} else {
err := v.tuneMountNamespaced(v.config.IntermediatePKINamespace, v.config.IntermediatePKIPath, &mountConfig)
if err != nil {
return err
}
}
v.setupIntermediatePKIPathDone = true
return nil
// Create the role for issuing leaf certs if it doesn't exist yet
rolePath := v.config.IntermediatePKIPath + "roles/" + VaultCALeafCertRole
_, err = v.writeNamespaced(v.config.IntermediatePKINamespace, rolePath, map[string]interface{}{
"allow_any_name": true,
"allowed_uri_sans": "spiffe://*",
"key_type": "any",
"max_ttl": v.config.LeafCertTTL.String(),
"no_store": true,
"require_cn": false,
})
return err
}
func (v *VaultProvider) generateIntermediateCSR() (string, error) {
err := v.setupIntermediatePKIPath()
if err != nil {
return "", err
}
// Generate a new intermediate CSR for the root to sign.
uid, err := connect.CompactUID()
if err != nil {
@ -465,10 +456,6 @@ func (v *VaultProvider) SetIntermediate(intermediatePEM, rootPEM string) error {
// ActiveIntermediate returns the current intermediate certificate.
func (v *VaultProvider) ActiveIntermediate() (string, error) {
if err := v.setupIntermediatePKIPath(); err != nil {
return "", err
}
cert, err := v.getCA(v.config.IntermediatePKINamespace, v.config.IntermediatePKIPath)
// This error is expected when calling initializeSecondaryCA for the
@ -737,6 +724,19 @@ func (v *VaultProvider) mountNamespaced(namespace, path string, mountInfo *vault
return err
}
func (v *VaultProvider) tuneMountNamespaced(namespace, path string, mountConfig *vaultapi.MountConfigInput) error {
defer v.setNamespace(namespace)()
r := v.client.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path))
if err := r.SetJSONBody(mountConfig); err != nil {
return err
}
resp, err := v.client.RawRequest(r)
if resp != nil {
defer resp.Body.Close()
}
return err
}
func (v *VaultProvider) unmountNamespaced(namespace, path string) error {
defer v.setNamespace(namespace)()
r := v.client.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path))

View File

@ -19,6 +19,16 @@ import (
"github.com/hashicorp/consul/sdk/testutil/retry"
)
const pkiTestPolicy = `
path "sys/mounts/*"
{
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
}
path "pki-intermediate/*"
{
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
}`
func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) {
cases := map[string]struct {
rawConfig map[string]interface{}
@ -653,7 +663,7 @@ func TestVaultProvider_ConfigureWithAuthMethod(t *testing.T) {
authMethodType: "userpass",
configureAuthMethodFunc: func(t *testing.T, vaultClient *vaultapi.Client) map[string]interface{} {
_, err := vaultClient.Logical().Write("/auth/userpass/users/test",
map[string]interface{}{"password": "foo", "policies": "admins"})
map[string]interface{}{"password": "foo", "policies": "pki"})
require.NoError(t, err)
return map[string]interface{}{
"Type": "userpass",
@ -667,7 +677,8 @@ func TestVaultProvider_ConfigureWithAuthMethod(t *testing.T) {
{
authMethodType: "approle",
configureAuthMethodFunc: func(t *testing.T, vaultClient *vaultapi.Client) map[string]interface{} {
_, err := vaultClient.Logical().Write("auth/approle/role/my-role", nil)
_, err := vaultClient.Logical().Write("auth/approle/role/my-role",
map[string]interface{}{"token_policies": "pki"})
require.NoError(t, err)
resp, err := vaultClient.Logical().Read("auth/approle/role/my-role/role-id")
require.NoError(t, err)
@ -695,6 +706,9 @@ func TestVaultProvider_ConfigureWithAuthMethod(t *testing.T) {
err := testVault.Client().Sys().EnableAuthWithOptions(c.authMethodType, &vaultapi.EnableAuthOptions{Type: c.authMethodType})
require.NoError(t, err)
err = testVault.Client().Sys().PutPolicy("pki", pkiTestPolicy)
require.NoError(t, err)
authMethodConf := c.configureAuthMethodFunc(t, testVault.Client())
conf := map[string]interface{}{
@ -726,11 +740,18 @@ func TestVaultProvider_RotateAuthMethodToken(t *testing.T) {
testVault := NewTestVaultServer(t)
err := testVault.Client().Sys().EnableAuthWithOptions("approle", &vaultapi.EnableAuthOptions{Type: "approle"})
err := testVault.Client().Sys().PutPolicy("pki", pkiTestPolicy)
require.NoError(t, err)
err = testVault.Client().Sys().EnableAuthWithOptions("approle", &vaultapi.EnableAuthOptions{Type: "approle"})
require.NoError(t, err)
_, err = testVault.Client().Logical().Write("auth/approle/role/my-role",
map[string]interface{}{"token_ttl": "2s", "token_explicit_max_ttl": "2s"})
map[string]interface{}{
"token_ttl": "2s",
"token_explicit_max_ttl": "2s",
"token_policies": "pki",
})
require.NoError(t, err)
resp, err := testVault.Client().Logical().Read("auth/approle/role/my-role/role-id")
require.NoError(t, err)

View File

@ -178,20 +178,43 @@ func TestQuerySNI(t *testing.T) {
func TestTargetSNI(t *testing.T) {
// empty namespace, empty subset
require.Equal(t, "api.default.foo."+testTrustDomainSuffix1,
TargetSNI(structs.NewDiscoveryTarget("api", "", "", "default", "foo"), testTrustDomain1))
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain1))
require.Equal(t, "api.default.foo."+testTrustDomainSuffix1,
TargetSNI(structs.NewDiscoveryTarget("api", "", "", "", "foo"), testTrustDomain1))
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
Datacenter: "foo",
}), testTrustDomain1))
// set namespace, empty subset
require.Equal(t, "api.neighbor.foo."+testTrustDomainSuffix2,
TargetSNI(structs.NewDiscoveryTarget("api", "", "neighbor", "default", "foo"), testTrustDomain2))
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
Namespace: "neighbor",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain2))
// empty namespace, set subset
require.Equal(t, "v2.api.default.foo."+testTrustDomainSuffix1,
TargetSNI(structs.NewDiscoveryTarget("api", "v2", "", "default", "foo"), testTrustDomain1))
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
ServiceSubset: "v2",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain1))
// set namespace, set subset
require.Equal(t, "canary.api.neighbor.foo."+testTrustDomainSuffix2,
TargetSNI(structs.NewDiscoveryTarget("api", "canary", "neighbor", "default", "foo"), testTrustDomain2))
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
ServiceSubset: "canary",
Namespace: "neighbor",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain2))
}

View File

@ -24,6 +24,8 @@ var (
`^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`)
spiffeIDAgentRegexp = regexp.MustCompile(
`^(?:/ap/([^/]+))?/agent/client/dc/([^/]+)/id/([^/]+)$`)
spiffeIDServerRegexp = regexp.MustCompile(
`^/agent/server/dc/([^/]+)$`)
spiffeIDMeshGatewayRegexp = regexp.MustCompile(
`^(?:/ap/([^/]+))?/gateway/mesh/dc/([^/]+)$`)
)
@ -144,6 +146,19 @@ func ParseCertURI(input *url.URL) (CertURI, error) {
Partition: ap,
Datacenter: dc,
}, nil
} else if v := spiffeIDServerRegexp.FindStringSubmatch(path); v != nil {
dc := v[1]
if input.RawPath != "" {
var err error
if dc, err = url.PathUnescape(v[1]); err != nil {
return nil, fmt.Errorf("Invalid datacenter: %s", err)
}
}
return &SpiffeIDServer{
Host: input.Host,
Datacenter: dc,
}, nil
}
// Test for signing ID

View File

@ -0,0 +1,20 @@
package connect
import (
"fmt"
"net/url"
)
type SpiffeIDServer struct {
Host string
Datacenter string
}
// URI returns the *url.URL for this SPIFFE ID.
func (id SpiffeIDServer) URI() *url.URL {
var result url.URL
result.Scheme = "spiffe"
result.Host = id.Host
result.Path = fmt.Sprintf("/agent/server/dc/%s", id.Datacenter)
return &result
}

View File

@ -54,6 +54,12 @@ func (id SpiffeIDSigning) CanSign(cu CertURI) bool {
// worry about Unicode domains if we start allowing customisation beyond the
// built-in cluster ids.
return strings.ToLower(other.Host) == id.Host()
case *SpiffeIDServer:
// The host component of the service must be an exact match for now under
// ascii case folding (since hostnames are case-insensitive). Later we might
// worry about Unicode domains if we start allowing customisation beyond the
// built-in cluster ids.
return strings.ToLower(other.Host) == id.Host()
default:
return false
}

View File

@ -78,7 +78,7 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
want: true,
},
{
name: "service - good midex case",
name: "service - good mixed case",
id: testSigning,
input: &SpiffeIDService{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Namespace: "defAUlt", Datacenter: "dc1", Service: "WEB"},
want: true,
@ -102,7 +102,7 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
want: true,
},
{
name: "mesh gateway - good midex case",
name: "mesh gateway - good mixed case",
id: testSigning,
input: &SpiffeIDMeshGateway{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Datacenter: "dc1"},
want: true,
@ -119,6 +119,30 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
input: &SpiffeIDMeshGateway{Host: TestClusterID + ".fake", Datacenter: "dc1"},
want: false,
},
{
name: "server - good",
id: testSigning,
input: &SpiffeIDServer{Host: TestClusterID + ".consul", Datacenter: "dc1"},
want: true,
},
{
name: "server - good mixed case",
id: testSigning,
input: &SpiffeIDServer{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Datacenter: "dc1"},
want: true,
},
{
name: "server - different cluster",
id: testSigning,
input: &SpiffeIDServer{Host: "55555555-4444-3333-2222-111111111111.consul", Datacenter: "dc1"},
want: false,
},
{
name: "server - different TLD",
id: testSigning,
input: &SpiffeIDServer{Host: TestClusterID + ".fake", Datacenter: "dc1"},
want: false,
},
}
for _, tt := range tests {

View File

@ -19,109 +19,118 @@ func TestParseCertURIFromString(t *testing.T) {
ParseError string
}{
{
"invalid scheme",
"http://google.com/",
nil,
"scheme",
Name: "invalid scheme",
URI: "http://google.com/",
Struct: nil,
ParseError: "scheme",
},
{
"basic service ID",
"spiffe://1234.consul/ns/default/dc/dc01/svc/web",
&SpiffeIDService{
Name: "basic service ID",
URI: "spiffe://1234.consul/ns/default/dc/dc01/svc/web",
Struct: &SpiffeIDService{
Host: "1234.consul",
Partition: defaultEntMeta.PartitionOrDefault(),
Namespace: "default",
Datacenter: "dc01",
Service: "web",
},
"",
ParseError: "",
},
{
"basic service ID with partition",
"spiffe://1234.consul/ap/bizdev/ns/default/dc/dc01/svc/web",
&SpiffeIDService{
Name: "basic service ID with partition",
URI: "spiffe://1234.consul/ap/bizdev/ns/default/dc/dc01/svc/web",
Struct: &SpiffeIDService{
Host: "1234.consul",
Partition: "bizdev",
Namespace: "default",
Datacenter: "dc01",
Service: "web",
},
"",
ParseError: "",
},
{
"basic agent ID",
"spiffe://1234.consul/agent/client/dc/dc1/id/uuid",
&SpiffeIDAgent{
Name: "basic agent ID",
URI: "spiffe://1234.consul/agent/client/dc/dc1/id/uuid",
Struct: &SpiffeIDAgent{
Host: "1234.consul",
Partition: defaultEntMeta.PartitionOrDefault(),
Datacenter: "dc1",
Agent: "uuid",
},
"",
ParseError: "",
},
{
"basic agent ID with partition",
"spiffe://1234.consul/ap/bizdev/agent/client/dc/dc1/id/uuid",
&SpiffeIDAgent{
Name: "basic agent ID with partition",
URI: "spiffe://1234.consul/ap/bizdev/agent/client/dc/dc1/id/uuid",
Struct: &SpiffeIDAgent{
Host: "1234.consul",
Partition: "bizdev",
Datacenter: "dc1",
Agent: "uuid",
},
"",
ParseError: "",
},
{
"mesh-gateway with no partition",
"spiffe://1234.consul/gateway/mesh/dc/dc1",
&SpiffeIDMeshGateway{
Name: "basic server",
URI: "spiffe://1234.consul/agent/server/dc/dc1",
Struct: &SpiffeIDServer{
Host: "1234.consul",
Datacenter: "dc1",
},
ParseError: "",
},
{
Name: "mesh-gateway with no partition",
URI: "spiffe://1234.consul/gateway/mesh/dc/dc1",
Struct: &SpiffeIDMeshGateway{
Host: "1234.consul",
Partition: "default",
Datacenter: "dc1",
},
"",
ParseError: "",
},
{
"mesh-gateway with partition",
"spiffe://1234.consul/ap/bizdev/gateway/mesh/dc/dc1",
&SpiffeIDMeshGateway{
Name: "mesh-gateway with partition",
URI: "spiffe://1234.consul/ap/bizdev/gateway/mesh/dc/dc1",
Struct: &SpiffeIDMeshGateway{
Host: "1234.consul",
Partition: "bizdev",
Datacenter: "dc1",
},
"",
ParseError: "",
},
{
"service with URL-encoded values",
"spiffe://1234.consul/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
&SpiffeIDService{
Name: "service with URL-encoded values",
URI: "spiffe://1234.consul/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
Struct: &SpiffeIDService{
Host: "1234.consul",
Partition: defaultEntMeta.PartitionOrDefault(),
Namespace: "foo/bar",
Datacenter: "bar/baz",
Service: "baz/qux",
},
"",
ParseError: "",
},
{
"service with URL-encoded values with partition",
"spiffe://1234.consul/ap/biz%2Fdev/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
&SpiffeIDService{
Name: "service with URL-encoded values with partition",
URI: "spiffe://1234.consul/ap/biz%2Fdev/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
Struct: &SpiffeIDService{
Host: "1234.consul",
Partition: "biz/dev",
Namespace: "foo/bar",
Datacenter: "bar/baz",
Service: "baz/qux",
},
"",
ParseError: "",
},
{
"signing ID",
"spiffe://1234.consul",
&SpiffeIDSigning{
Name: "signing ID",
URI: "spiffe://1234.consul",
Struct: &SpiffeIDSigning{
ClusterID: "1234",
Domain: "consul",
},
"",
ParseError: "",
},
}
@ -139,3 +148,12 @@ func TestParseCertURIFromString(t *testing.T) {
})
}
}
func TestSpiffeIDServer_URI(t *testing.T) {
srv := &SpiffeIDServer{
Host: "1234.consul",
Datacenter: "dc1",
}
require.Equal(t, "spiffe://1234.consul/agent/server/dc/dc1", srv.URI().String())
}

View File

@ -5,6 +5,7 @@ import (
"crypto/x509"
"encoding/base64"
"fmt"
"regexp"
"github.com/hashicorp/consul/acl"
@ -12,6 +13,7 @@ import (
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/authmethod/ssoauth"
"github.com/hashicorp/consul/agent/dns"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib/template"
"github.com/hashicorp/consul/proto/pbautoconf"
@ -51,6 +53,11 @@ type jwtAuthorizer struct {
claimAssertions []string
}
// Invalidate any quote or whitespace characters that could cause an escape with bexpr.
// This includes an extra single-quote character not specified in the grammar for safety in case it is later added.
// https://github.com/hashicorp/go-bexpr/blob/v0.1.11/grammar/grammar.peg#L188-L191
var invalidSegmentName = regexp.MustCompile("[`'\"\\s]+")
func (a *jwtAuthorizer) Authorize(req *pbautoconf.AutoConfigRequest) (AutoConfigOptions, error) {
// perform basic JWT Authorization
identity, err := a.validator.ValidateLogin(context.Background(), req.JWT)
@ -59,6 +66,21 @@ func (a *jwtAuthorizer) Authorize(req *pbautoconf.AutoConfigRequest) (AutoConfig
return AutoConfigOptions{}, acl.PermissionDenied("Failed JWT authorization: %v", err)
}
// Ensure provided data cannot escape the RHS of a bexpr for security.
// This is not the cleanest way to prevent this behavior. Ideally, the bexpr would allow us to
// inject a variable on the RHS for comparison as well, but it would be a complex change to implement
// that would likely break backwards-compatibility in certain circumstances.
if dns.InvalidNameRe.MatchString(req.Node) {
return AutoConfigOptions{}, fmt.Errorf("Invalid request field. %v = `%v`", "node", req.Node)
}
if invalidSegmentName.MatchString(req.Segment) {
return AutoConfigOptions{}, fmt.Errorf("Invalid request field. %v = `%v`", "segment", req.Segment)
}
if req.Partition != "" && !dns.IsValidLabel(req.Partition) {
return AutoConfigOptions{}, fmt.Errorf("Invalid request field. %v = `%v`", "partition", req.Partition)
}
// Ensure that every value in this mapping is safe to interpolate before using it.
varMap := map[string]string{
"node": req.Node,
"segment": req.Segment,
@ -372,9 +394,12 @@ func parseAutoConfigCSR(csr string) (*x509.CertificateRequest, *connect.SpiffeID
return nil, nil, fmt.Errorf("Failed to parse CSR: %w", err)
}
// ensure that a URI SAN is present
if len(x509CSR.URIs) < 1 {
return nil, nil, fmt.Errorf("CSR didn't include any URI SANs")
// ensure that exactly one URI SAN is present
if len(x509CSR.URIs) != 1 {
return nil, nil, fmt.Errorf("CSR SAN contains an invalid number of URIs: %v", len(x509CSR.URIs))
}
if len(x509CSR.EmailAddresses) > 0 {
return nil, nil, fmt.Errorf("CSR SAN does not allow specifying email addresses")
}
// Parse the SPIFFE ID

View File

@ -1,12 +1,17 @@
package consul
import (
"bytes"
"crypto"
crand "crypto/rand"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/url"
"path"
"testing"
"time"
@ -92,9 +97,9 @@ func signJWTWithStandardClaims(t *testing.T, privKey string, claims interface{})
// TestAutoConfigInitialConfiguration is really an integration test of all the moving parts of the AutoConfig.InitialConfiguration RPC.
// Full testing of the individual parts will not be done in this test:
//
// * Any implementations of the AutoConfigAuthorizer interface (although these test do use the jwtAuthorizer)
// * Each of the individual config generation functions. These can be unit tested separately and should NOT
// require running test servers
// - Any implementations of the AutoConfigAuthorizer interface (although these test do use the jwtAuthorizer)
// - Each of the individual config generation functions. These can be unit tested separately and should NOT
// require running test servers
func TestAutoConfigInitialConfiguration(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
@ -236,6 +241,29 @@ func TestAutoConfigInitialConfiguration(t *testing.T) {
},
err: "Permission denied: Failed JWT authorization: no known key successfully validated the token signature",
},
"bad-req-node": {
request: &pbautoconf.AutoConfigRequest{
Node: "bad node",
JWT: signJWTWithStandardClaims(t, priv, map[string]interface{}{"consul_node_name": "test-node"}),
},
err: "Invalid request field. node =",
},
"bad-req-segment": {
request: &pbautoconf.AutoConfigRequest{
Node: "test-node",
Segment: "bad segment",
JWT: signJWTWithStandardClaims(t, priv, map[string]interface{}{"consul_node_name": "test-node"}),
},
err: "Invalid request field. segment =",
},
"bad-req-partition": {
request: &pbautoconf.AutoConfigRequest{
Node: "test-node",
Partition: "bad partition",
JWT: signJWTWithStandardClaims(t, priv, map[string]interface{}{"consul_node_name": "test-node"}),
},
err: "Invalid request field. partition =",
},
"claim-assertion-failed": {
request: &pbautoconf.AutoConfigRequest{
Node: "test-node",
@ -850,3 +878,159 @@ func TestAutoConfig_updateJoinAddressesInConfig(t *testing.T) {
backend.AssertExpectations(t)
}
func TestAutoConfig_parseAutoConfigCSR(t *testing.T) {
// createCSR copies the behavior of connect.CreateCSR with some
// customizations to allow for better unit testing.
createCSR := func(tmpl *x509.CertificateRequest, privateKey crypto.Signer) (string, error) {
connect.HackSANExtensionForCSR(tmpl)
bs, err := x509.CreateCertificateRequest(crand.Reader, tmpl, privateKey)
require.NoError(t, err)
var csrBuf bytes.Buffer
err = pem.Encode(&csrBuf, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: bs})
require.NoError(t, err)
return csrBuf.String(), nil
}
pk, _, err := connect.GeneratePrivateKey()
require.NoError(t, err)
agentURI := connect.SpiffeIDAgent{
Host: "test-host",
Datacenter: "tdc1",
Agent: "test-agent",
}.URI()
tests := []struct {
name string
setup func() string
expectErr string
}{
{
name: "err_garbage_data",
expectErr: "Failed to parse CSR",
setup: func() string { return "garbage" },
},
{
name: "err_not_one_uri",
expectErr: "CSR SAN contains an invalid number of URIs",
setup: func() string {
tmpl := &x509.CertificateRequest{
URIs: []*url.URL{agentURI, agentURI},
SignatureAlgorithm: connect.SigAlgoForKey(pk),
}
csr, err := createCSR(tmpl, pk)
require.NoError(t, err)
return csr
},
},
{
name: "err_email",
expectErr: "CSR SAN does not allow specifying email addresses",
setup: func() string {
tmpl := &x509.CertificateRequest{
URIs: []*url.URL{agentURI},
EmailAddresses: []string{"test@example.com"},
SignatureAlgorithm: connect.SigAlgoForKey(pk),
}
csr, err := createCSR(tmpl, pk)
require.NoError(t, err)
return csr
},
},
{
name: "err_spiffe_parse_uri",
expectErr: "Failed to parse the SPIFFE URI",
setup: func() string {
tmpl := &x509.CertificateRequest{
URIs: []*url.URL{connect.SpiffeIDAgent{}.URI()},
SignatureAlgorithm: connect.SigAlgoForKey(pk),
}
csr, err := createCSR(tmpl, pk)
require.NoError(t, err)
return csr
},
},
{
name: "err_not_agent",
expectErr: "SPIFFE ID is not an Agent ID",
setup: func() string {
spiffe := connect.SpiffeIDService{
Namespace: "tns",
Datacenter: "tdc1",
Service: "test-service",
}
tmpl := &x509.CertificateRequest{
URIs: []*url.URL{spiffe.URI()},
SignatureAlgorithm: connect.SigAlgoForKey(pk),
}
csr, err := createCSR(tmpl, pk)
require.NoError(t, err)
return csr
},
},
{
name: "success",
setup: func() string {
tmpl := &x509.CertificateRequest{
URIs: []*url.URL{agentURI},
SignatureAlgorithm: connect.SigAlgoForKey(pk),
}
csr, err := createCSR(tmpl, pk)
require.NoError(t, err)
return csr
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
req, spif, err := parseAutoConfigCSR(tc.setup())
if tc.expectErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectErr)
} else {
require.NoError(t, err)
// TODO better verification of these
require.NotNil(t, req)
require.NotNil(t, spif)
}
})
}
}
func TestAutoConfig_invalidSegmentName(t *testing.T) {
invalid := []string{
"\n",
"\r",
"\t",
"`",
`'`,
`"`,
` `,
`a b`,
`a'b`,
`a or b`,
`a and b`,
`segment name`,
`segment"name`,
`"segment"name`,
`"segment" name`,
`segment'name'`,
}
valid := []string{
``,
`a`,
`a.b`,
`a.b.c`,
`a-b-c`,
`segment.name`,
}
for _, s := range invalid {
require.True(t, invalidSegmentName.MatchString(s), "incorrect match: %v", s)
}
for _, s := range valid {
require.False(t, invalidSegmentName.MatchString(s), "incorrect match: %v", s)
}
}

View File

@ -55,6 +55,14 @@ func (d *AutopilotDelegate) NotifyState(state *autopilot.State) {
}
d.readyServersPublisher.PublishReadyServersEvents(state)
var readyServers uint32
for _, server := range state.Servers {
if autopilotevents.IsServerReady(server) {
readyServers++
}
}
d.server.xdsCapacityController.SetServerCount(readyServers)
}
func (d *AutopilotDelegate) RemoveFailedServer(srv *autopilot.Server) {

View File

@ -4,6 +4,8 @@ package autopilotevents
import (
acl "github.com/hashicorp/consul/acl"
memdb "github.com/hashicorp/go-memdb"
mock "github.com/stretchr/testify/mock"
structs "github.com/hashicorp/consul/agent/structs"
@ -48,6 +50,36 @@ func (_m *MockStateStore) GetNodeID(_a0 types.NodeID, _a1 *acl.EnterpriseMeta, _
return r0, r1, r2
}
// NodeService provides a mock function with given fields: ws, nodeName, serviceID, entMeta, peerName
func (_m *MockStateStore) NodeService(ws memdb.WatchSet, nodeName string, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeService, error) {
ret := _m.Called(ws, nodeName, serviceID, entMeta, peerName)
var r0 uint64
if rf, ok := ret.Get(0).(func(memdb.WatchSet, string, string, *acl.EnterpriseMeta, string) uint64); ok {
r0 = rf(ws, nodeName, serviceID, entMeta, peerName)
} else {
r0 = ret.Get(0).(uint64)
}
var r1 *structs.NodeService
if rf, ok := ret.Get(1).(func(memdb.WatchSet, string, string, *acl.EnterpriseMeta, string) *structs.NodeService); ok {
r1 = rf(ws, nodeName, serviceID, entMeta, peerName)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*structs.NodeService)
}
}
var r2 error
if rf, ok := ret.Get(2).(func(memdb.WatchSet, string, string, *acl.EnterpriseMeta, string) error); ok {
r2 = rf(ws, nodeName, serviceID, entMeta, peerName)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// NewMockStateStore creates a new instance of MockStateStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockStateStore(t testing.TB) *MockStateStore {
mock := &MockStateStore{}

View File

@ -4,9 +4,11 @@ import (
"fmt"
"net"
"sort"
"strconv"
"sync"
"time"
"github.com/hashicorp/go-memdb"
autopilot "github.com/hashicorp/raft-autopilot"
"github.com/hashicorp/consul/acl"
@ -26,6 +28,7 @@ type ReadyServerInfo struct {
ID string
Address string
TaggedAddresses map[string]string
ExtGRPCPort int
Version string
}
@ -122,6 +125,7 @@ func NewReadyServersEventPublisher(config Config) *ReadyServersEventPublisher {
//go:generate mockery --name StateStore --inpackage --filename mock_StateStore_test.go
type StateStore interface {
GetNodeID(types.NodeID, *acl.EnterpriseMeta, string) (uint64, *structs.Node, error)
NodeService(ws memdb.WatchSet, nodeName string, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeService, error)
}
//go:generate mockery --name Publisher --inpackage --filename mock_Publisher_test.go
@ -194,25 +198,32 @@ func (r *ReadyServersEventPublisher) readyServersEvents(state *autopilot.State)
return []stream.Event{r.newReadyServersEvent(servers)}, true
}
// IsServerReady determines whether the given server (from the autopilot state)
// is "ready" - by which we mean that they would be an acceptable target for
// stale queries.
func IsServerReady(srv *autopilot.ServerState) bool {
// All healthy servers are caught up enough to be considered ready.
// Servers with voting rights that are still healthy according to Serf are
// also included as they have likely just fallen behind the leader a little
// after initially replicating state. They are still acceptable targets
// for most stale queries and clients can bound the staleness if necessary.
// Including them is a means to prevent flapping the list of servers we
// advertise as ready and flooding the network with notifications to all
// dataplanes of server updates.
//
// TODO (agentless) for a non-voting server that is still alive but fell
// behind, should we cause it to be removed. For voters we know they were caught
// up at some point but for non-voters we cannot know the same thing.
return srv.Health.Healthy || (srv.HasVotingRights() && srv.Server.NodeStatus == autopilot.NodeAlive)
}
// autopilotStateToReadyServers will iterate through all servers in the autopilot
// state and compile a list of servers which are "ready". Readiness means that
// they would be an acceptable target for stale queries.
func (r *ReadyServersEventPublisher) autopilotStateToReadyServers(state *autopilot.State) EventPayloadReadyServers {
var servers EventPayloadReadyServers
for _, srv := range state.Servers {
// All healthy servers are caught up enough to be included in a ready servers.
// Servers with voting rights that are still healthy according to Serf are
// also included as they have likely just fallen behind the leader a little
// after initially replicating state. They are still acceptable targets
// for most stale queries and clients can bound the staleness if necessary.
// Including them is a means to prevent flapping the list of servers we
// advertise as ready and flooding the network with notifications to all
// dataplanes of server updates.
//
// TODO (agentless) for a non-voting server that is still alive but fell
// behind, should we cause it to be removed. For voters we know they were caught
// up at some point but for non-voters we cannot know the same thing.
if srv.Health.Healthy || (srv.HasVotingRights() && srv.Server.NodeStatus == autopilot.NodeAlive) {
if IsServerReady(srv) {
// autopilot information contains addresses in the <host>:<port> form. We only care about the
// the host so we parse it out here and discard the port.
host, err := extractHost(string(srv.Server.Address))
@ -226,6 +237,7 @@ func (r *ReadyServersEventPublisher) autopilotStateToReadyServers(state *autopil
Address: host,
Version: srv.Server.Version,
TaggedAddresses: r.getTaggedAddresses(srv),
ExtGRPCPort: r.getGRPCPort(srv),
})
}
}
@ -254,7 +266,7 @@ func (r *ReadyServersEventPublisher) getTaggedAddresses(srv *autopilot.ServerSta
// code and reason about and having those addresses be updated within 30s is good enough.
_, node, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
if err != nil || node == nil {
// no catalog information means we should return a nil addres map
// no catalog information means we should return a nil address map
return nil
}
@ -276,6 +288,38 @@ func (r *ReadyServersEventPublisher) getTaggedAddresses(srv *autopilot.ServerSta
return addrs
}
// getGRPCPort will get the external gRPC port for a Consul server.
// Returns 0 if there is none assigned or if an error is encountered.
func (r *ReadyServersEventPublisher) getGRPCPort(srv *autopilot.ServerState) int {
if r.GetStore == nil {
return 0
}
_, n, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
if err != nil || n == nil {
return 0
}
_, ns, err := r.GetStore().NodeService(
nil,
n.Node,
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
)
if err != nil || ns == nil || ns.Meta == nil {
return 0
}
if str, ok := ns.Meta["grpc_port"]; ok {
grpcPort, err := strconv.Atoi(str)
if err == nil {
return grpcPort
}
}
return 0
}
// newReadyServersEvent will create a stream.Event with the provided ready server info.
func (r *ReadyServersEventPublisher) newReadyServersEvent(servers EventPayloadReadyServers) stream.Event {
now := time.Now()

View File

@ -4,6 +4,7 @@ import (
"testing"
time "time"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/raft"
autopilot "github.com/hashicorp/raft-autopilot"
mock "github.com/stretchr/testify/mock"
@ -164,9 +165,21 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) {
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-1", TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-1",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
nil,
nil,
)
@ -174,9 +187,21 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) {
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-2", TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-2",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
nil,
nil,
)
@ -184,9 +209,119 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) {
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-3", TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-3",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
nil,
nil,
)
r := NewReadyServersEventPublisher(Config{
GetStore: func() StateStore { return store },
})
actual := r.autopilotStateToReadyServers(exampleState)
require.ElementsMatch(t, expected, actual)
}
func TestAutopilotStateToReadyServersWithExtGRPCPort(t *testing.T) {
expected := EventPayloadReadyServers{
{
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
Address: "198.18.0.2",
ExtGRPCPort: 1234,
Version: "v1.12.0",
},
{
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
Address: "198.18.0.3",
ExtGRPCPort: 2345,
Version: "v1.12.0",
},
{
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
Address: "198.18.0.4",
ExtGRPCPort: 3456,
Version: "v1.12.0",
},
}
store := &MockStateStore{}
t.Cleanup(func() { store.AssertExpectations(t) })
store.On("GetNodeID",
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-1"},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-1",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.NodeService{Meta: map[string]string{"grpc_port": "1234"}},
nil,
)
store.On("GetNodeID",
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-2"},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-2",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.NodeService{Meta: map[string]string{"grpc_port": "2345"}},
nil,
)
store.On("GetNodeID",
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-3"},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-3",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.NodeService{Meta: map[string]string{"grpc_port": "3456"}},
nil,
)
@ -493,9 +628,21 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) {
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-1", TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-1",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
nil,
nil,
)
@ -503,9 +650,21 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) {
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-2", TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-2",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
nil,
nil,
)
@ -513,9 +672,21 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) {
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Times(2).Return(
uint64(0),
&structs.Node{Node: "node-3", TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
nil,
)
store.On("NodeService",
memdb.WatchSet(nil),
"node-3",
structs.ConsulServiceID,
structs.NodeEnterpriseMetaInDefaultPartition(),
structs.DefaultPeerKeyword,
).Once().Return(
uint64(0),
&structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
nil,
nil,
)

View File

@ -565,6 +565,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
return err
}
filter, err := bexpr.CreateFilter(args.Filter, nil, []*structs.ServiceNode{})
if err != nil {
return err
}
// Set reply enterprise metadata after resolving and validating the token so
// that we can properly infer metadata from the token.
reply.EnterpriseMeta = args.EnterpriseMeta
@ -574,10 +579,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
var err error
var serviceNodes structs.ServiceNodes
if len(args.NodeMetaFilters) > 0 {
reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
reply.Index, serviceNodes, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
} else {
reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName)
reply.Index, serviceNodes, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName)
}
if err != nil {
return err
@ -588,11 +594,43 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
return nil
}
raw, err := filter.Execute(serviceNodes)
if err != nil {
return err
}
reply.Services = servicesTagsByName(raw.(structs.ServiceNodes))
c.srv.filterACLWithAuthorizer(authz, reply)
return nil
})
}
func servicesTagsByName(services []*structs.ServiceNode) structs.Services {
unique := make(map[string]map[string]struct{})
for _, svc := range services {
tags, ok := unique[svc.ServiceName]
if !ok {
unique[svc.ServiceName] = make(map[string]struct{})
tags = unique[svc.ServiceName]
}
for _, tag := range svc.ServiceTags {
tags[tag] = struct{}{}
}
}
// Generate the output structure.
var results = make(structs.Services)
for service, tags := range unique {
results[service] = make([]string, 0, len(tags))
for tag := range tags {
results[service] = append(results[service], tag)
}
}
return results
}
// ServiceList is used to query the services in a DC.
// Returns services as a list of ServiceNames.
func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.IndexedServiceList) error {

View File

@ -1523,6 +1523,45 @@ func TestCatalog_ListServices_NodeMetaFilter(t *testing.T) {
}
}
func TestCatalog_ListServices_Filter(t *testing.T) {
t.Parallel()
_, s1 := testServer(t)
codec := rpcClient(t, s1)
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
// prep the cluster with some data we can use in our filters
registerTestCatalogEntries(t, codec)
// Run the tests against the test server
t.Run("ListServices", func(t *testing.T) {
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
args.Filter = "ServiceName == redis"
out := new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Contains(t, out.Services, "redis")
require.ElementsMatch(t, []string{"v1", "v2"}, out.Services["redis"])
args.Filter = "NodeMeta.os == NoSuchOS"
out = new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Len(t, out.Services, 0)
args.Filter = "NodeMeta.NoSuchMetadata == linux"
out = new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Len(t, out.Services, 0)
args.Filter = "InvalidField == linux"
out = new(structs.IndexedServices)
require.Error(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
})
}
func TestCatalog_ListServices_Blocking(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")

View File

@ -62,6 +62,8 @@ func (c *Client) setupSerf(conf *serf.Config, ch chan serf.Event, path string) (
return nil, err
}
addSerfMetricsLabels(conf, false, c.config.Segment, c.config.AgentEnterpriseMeta().PartitionOrDefault(), "")
addEnterpriseSerfTags(conf.Tags, c.config.AgentEnterpriseMeta())
conf.ReconnectTimeoutOverride = libserf.NewReconnectOverride(c.logger)

View File

@ -18,6 +18,7 @@ import (
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc-external/limiter"
grpc "github.com/hashicorp/consul/agent/grpc-internal"
"github.com/hashicorp/consul/agent/grpc-internal/resolver"
"github.com/hashicorp/consul/agent/pool"
@ -553,6 +554,7 @@ func newDefaultDeps(t *testing.T, c *Config) Deps {
NewRequestRecorderFunc: middleware.NewRequestRecorder,
GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor,
EnterpriseDeps: newDefaultDepsEnterprise(t, logger, c),
XDSStreamLimiter: limiter.NewSessionLimiter(),
}
}
@ -893,8 +895,8 @@ func TestClient_RPC_Timeout(t *testing.T) {
}
})
// waiter will sleep for 50ms
require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 50 * time.Millisecond}))
// waiter will sleep for 101ms which is 1ms more than the DefaultQueryTime
require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 101 * time.Millisecond}))
// Requests with QueryOptions have a default timeout of RPCHoldTimeout (10ms)
// so we expect the RPC call to timeout.
@ -903,7 +905,8 @@ func TestClient_RPC_Timeout(t *testing.T) {
require.Error(t, err)
require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached")
// Blocking requests have a longer timeout (100ms) so this should pass
// Blocking requests have a longer timeout (100ms) so this should pass since we
// add the maximum jitter which should be 16ms
out = struct{}{}
err = c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{
QueryOptions: structs.QueryOptions{

View File

@ -133,6 +133,9 @@ type Config struct {
// GRPCPort is the port the public gRPC server listens on.
GRPCPort int
// GRPCTLSPort is the port the public gRPC TLS server listens on.
GRPCTLSPort int
// (Enterprise-only) The network segment this agent is part of.
Segment string
@ -584,6 +587,7 @@ func CloneSerfLANConfig(base *serf.Config) *serf.Config {
cfg.MemberlistConfig.ProbeTimeout = base.MemberlistConfig.ProbeTimeout
cfg.MemberlistConfig.SuspicionMult = base.MemberlistConfig.SuspicionMult
cfg.MemberlistConfig.RetransmitMult = base.MemberlistConfig.RetransmitMult
cfg.MemberlistConfig.MetricLabels = base.MemberlistConfig.MetricLabels
// agent/keyring.go
cfg.MemberlistConfig.Keyring = base.MemberlistConfig.Keyring
@ -593,6 +597,7 @@ func CloneSerfLANConfig(base *serf.Config) *serf.Config {
cfg.ReapInterval = base.ReapInterval
cfg.TombstoneTimeout = base.TombstoneTimeout
cfg.MemberlistConfig.SecretKey = base.MemberlistConfig.SecretKey
cfg.MetricLabels = base.MetricLabels
return cfg
}

View File

@ -12,6 +12,7 @@ import (
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
)
@ -510,7 +511,7 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
ranOnce = true
}
thisReply, err := computeResolvedServiceConfig(
thisReply, err := configentry.ComputeResolvedServiceConfig(
args,
upstreamIDs,
legacyUpstreams,

View File

@ -1399,8 +1399,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
Protocol: "http",
MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeRemote},
PassiveHealthCheck: &structs.PassiveHealthCheck{
Interval: 10,
MaxFailures: 2,
Interval: 10,
MaxFailures: 2,
EnforcingConsecutive5xx: uintPointer(60),
},
},
Overrides: []*structs.UpstreamConfig{
@ -1432,8 +1433,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
Upstream: wildcard,
Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{
"Interval": int64(10),
"MaxFailures": int64(2),
"Interval": int64(10),
"MaxFailures": int64(2),
"EnforcingConsecutive5xx": int64(60),
},
"mesh_gateway": map[string]interface{}{
"Mode": "remote",
@ -1445,8 +1447,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
Upstream: mysql,
Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{
"Interval": int64(10),
"MaxFailures": int64(2),
"Interval": int64(10),
"MaxFailures": int64(2),
"EnforcingConsecutive5xx": int64(60),
},
"mesh_gateway": map[string]interface{}{
"Mode": "local",
@ -2507,3 +2510,7 @@ func Test_gateWriteToSecondary_AllowedKinds(t *testing.T) {
})
}
}
func uintPointer(v uint32) *uint32 {
return &v
}

View File

@ -56,8 +56,17 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
return &resp, nil
}
newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
if opts.Namespace == "" {
opts.Namespace = "default"
}
if opts.Partition == "" {
opts.Partition = "default"
}
if opts.Datacenter == "" {
opts.Datacenter = "dc1"
}
t := structs.NewDiscoveryTarget(opts)
t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul")
t.Name = t.SNI
t.ConnectTimeout = 5 * time.Second // default
@ -119,7 +128,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"),
"web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
},
},
}
@ -245,7 +254,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc1"),
newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
33*time.Second,
),
},

View File

@ -8,6 +8,7 @@ import (
"github.com/mitchellh/hashstructure"
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs"
@ -576,7 +577,10 @@ func (c *compiler) assembleChain() error {
if router == nil {
// If no router is configured, move on down the line to the next hop of
// the chain.
node, err := c.getSplitterOrResolverNode(c.newTarget(c.serviceName, "", "", "", ""))
node, err := c.getSplitterOrResolverNode(c.newTarget(structs.DiscoveryTargetOpts{
Service: c.serviceName,
}))
if err != nil {
return err
}
@ -626,11 +630,20 @@ func (c *compiler) assembleChain() error {
)
if dest.ServiceSubset == "" {
node, err = c.getSplitterOrResolverNode(
c.newTarget(svc, "", destNamespace, destPartition, ""),
)
c.newTarget(structs.DiscoveryTargetOpts{
Service: svc,
Namespace: destNamespace,
Partition: destPartition,
},
))
} else {
node, err = c.getResolverNode(
c.newTarget(svc, dest.ServiceSubset, destNamespace, destPartition, ""),
c.newTarget(structs.DiscoveryTargetOpts{
Service: svc,
ServiceSubset: dest.ServiceSubset,
Namespace: destNamespace,
Partition: destPartition,
}),
false,
)
}
@ -642,7 +655,12 @@ func (c *compiler) assembleChain() error {
// If we have a router, we'll add a catch-all route at the end to send
// unmatched traffic to the next hop in the chain.
defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(router.Name, "", router.NamespaceOrDefault(), router.PartitionOrDefault(), ""))
opts := structs.DiscoveryTargetOpts{
Service: router.Name,
Namespace: router.NamespaceOrDefault(),
Partition: router.PartitionOrDefault(),
}
defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(opts))
if err != nil {
return err
}
@ -674,26 +692,36 @@ func newDefaultServiceRoute(serviceName, namespace, partition string) *structs.S
}
}
func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
if service == "" {
func (c *compiler) newTarget(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
if opts.Service == "" {
panic("newTarget called with empty service which makes no sense")
}
t := structs.NewDiscoveryTarget(
service,
serviceSubset,
defaultIfEmpty(namespace, c.evaluateInNamespace),
defaultIfEmpty(partition, c.evaluateInPartition),
defaultIfEmpty(datacenter, c.evaluateInDatacenter),
)
if opts.Peer == "" {
opts.Datacenter = defaultIfEmpty(opts.Datacenter, c.evaluateInDatacenter)
opts.Namespace = defaultIfEmpty(opts.Namespace, c.evaluateInNamespace)
opts.Partition = defaultIfEmpty(opts.Partition, c.evaluateInPartition)
} else {
// Don't allow Peer and Datacenter.
opts.Datacenter = ""
// Peer and Partition cannot both be set.
opts.Partition = acl.PartitionOrDefault("")
// Default to "default" rather than c.evaluateInNamespace.
opts.Namespace = acl.PartitionOrDefault(opts.Namespace)
}
// Set default connect SNI. This will be overridden later if the service
// has an explicit SNI value configured in service-defaults.
t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain)
t := structs.NewDiscoveryTarget(opts)
// Use the same representation for the name. This will NOT be overridden
// later.
t.Name = t.SNI
// We don't have the peer's trust domain yet so we can't construct the SNI.
if opts.Peer == "" {
// Set default connect SNI. This will be overridden later if the service
// has an explicit SNI value configured in service-defaults.
t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain)
// Use the same representation for the name. This will NOT be overridden
// later.
t.Name = t.SNI
}
prev, ok := c.loadedTargets[t.ID]
if ok {
@ -703,34 +731,30 @@ func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datac
return t
}
func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, service, serviceSubset, partition, namespace, datacenter string) *structs.DiscoveryTarget {
var (
service2 = t.Service
serviceSubset2 = t.ServiceSubset
partition2 = t.Partition
namespace2 = t.Namespace
datacenter2 = t.Datacenter
)
func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
mergedOpts := t.ToDiscoveryTargetOpts()
if service != "" && service != service2 {
service2 = service
if opts.Service != "" && opts.Service != mergedOpts.Service {
mergedOpts.Service = opts.Service
// Reset the chosen subset if we reference a service other than our own.
serviceSubset2 = ""
mergedOpts.ServiceSubset = ""
}
if serviceSubset != "" {
serviceSubset2 = serviceSubset
if opts.ServiceSubset != "" {
mergedOpts.ServiceSubset = opts.ServiceSubset
}
if partition != "" {
partition2 = partition
if opts.Partition != "" {
mergedOpts.Partition = opts.Partition
}
if namespace != "" {
namespace2 = namespace
// Only use explicit Namespace with Peer
if opts.Namespace != "" || opts.Peer != "" {
mergedOpts.Namespace = opts.Namespace
}
if datacenter != "" {
datacenter2 = datacenter
if opts.Datacenter != "" {
mergedOpts.Datacenter = opts.Datacenter
}
mergedOpts.Peer = opts.Peer
return c.newTarget(service2, serviceSubset2, namespace2, partition2, datacenter2)
return c.newTarget(mergedOpts)
}
func (c *compiler) getSplitterOrResolverNode(target *structs.DiscoveryTarget) (*structs.DiscoveryGraphNode, error) {
@ -803,10 +827,13 @@ func (c *compiler) getSplitterNode(sid structs.ServiceID) (*structs.DiscoveryGra
// fall through to group-resolver
}
node, err := c.getResolverNode(
c.newTarget(splitID.ID, split.ServiceSubset, splitID.NamespaceOrDefault(), splitID.PartitionOrDefault(), ""),
false,
)
opts := structs.DiscoveryTargetOpts{
Service: splitID.ID,
ServiceSubset: split.ServiceSubset,
Namespace: splitID.NamespaceOrDefault(),
Partition: splitID.PartitionOrDefault(),
}
node, err := c.getResolverNode(c.newTarget(opts), false)
if err != nil {
return nil, err
}
@ -881,11 +908,7 @@ RESOLVE_AGAIN:
redirectedTarget := c.rewriteTarget(
target,
redirect.Service,
redirect.ServiceSubset,
redirect.Partition,
redirect.Namespace,
redirect.Datacenter,
redirect.ToDiscoveryTargetOpts(),
)
if redirectedTarget.ID != target.ID {
target = redirectedTarget
@ -895,14 +918,9 @@ RESOLVE_AGAIN:
// Handle default subset.
if target.ServiceSubset == "" && resolver.DefaultSubset != "" {
target = c.rewriteTarget(
target,
"",
resolver.DefaultSubset,
"",
"",
"",
)
target = c.rewriteTarget(target, structs.DiscoveryTargetOpts{
ServiceSubset: resolver.DefaultSubset,
})
goto RESOLVE_AGAIN
}
@ -1027,56 +1045,54 @@ RESOLVE_AGAIN:
failover, ok = f["*"]
}
if ok {
// Determine which failover definitions apply.
var failoverTargets []*structs.DiscoveryTarget
if len(failover.Datacenters) > 0 {
for _, dc := range failover.Datacenters {
// Rewrite the target as per the failover policy.
failoverTarget := c.rewriteTarget(
target,
failover.Service,
failover.ServiceSubset,
target.Partition,
failover.Namespace,
dc,
)
if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
}
}
} else {
if !ok {
return node, nil
}
// Determine which failover definitions apply.
var failoverTargets []*structs.DiscoveryTarget
if len(failover.Datacenters) > 0 {
opts := failover.ToDiscoveryTargetOpts()
for _, dc := range failover.Datacenters {
// Rewrite the target as per the failover policy.
failoverTarget := c.rewriteTarget(
target,
failover.Service,
failover.ServiceSubset,
target.Partition,
failover.Namespace,
"",
)
opts.Datacenter = dc
failoverTarget := c.rewriteTarget(target, opts)
if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
}
}
// If we filtered everything out then no point in having a failover.
if len(failoverTargets) > 0 {
df := &structs.DiscoveryFailover{}
node.Resolver.Failover = df
// Take care of doing any redirects or configuration loading
// related to targets by cheating a bit and recursing into
// ourselves.
for _, target := range failoverTargets {
failoverResolveNode, err := c.getResolverNode(target, true)
if err != nil {
return nil, err
}
failoverTarget := failoverResolveNode.Resolver.Target
df.Targets = append(df.Targets, failoverTarget)
} else if len(failover.Targets) > 0 {
for _, t := range failover.Targets {
// Rewrite the target as per the failover policy.
failoverTarget := c.rewriteTarget(target, t.ToDiscoveryTargetOpts())
if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
}
}
} else {
// Rewrite the target as per the failover policy.
failoverTarget := c.rewriteTarget(target, failover.ToDiscoveryTargetOpts())
if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
}
}
// If we filtered everything out then no point in having a failover.
if len(failoverTargets) > 0 {
df := &structs.DiscoveryFailover{}
node.Resolver.Failover = df
// Take care of doing any redirects or configuration loading
// related to targets by cheating a bit and recursing into
// ourselves.
for _, target := range failoverTargets {
failoverResolveNode, err := c.getResolverNode(target, true)
if err != nil {
return nil, err
}
failoverTarget := failoverResolveNode.Resolver.Target
df.Targets = append(df.Targets, failoverTarget)
}
}
}

View File

@ -39,6 +39,7 @@ func TestCompile(t *testing.T) {
"service redirect": testcase_ServiceRedirect(),
"service and subset redirect": testcase_ServiceAndSubsetRedirect(),
"datacenter redirect": testcase_DatacenterRedirect(),
"redirect to cluster peer": testcase_PeerRedirect(),
"datacenter redirect with mesh gateways": testcase_DatacenterRedirect_WithMeshGateways(),
"service failover": testcase_ServiceFailover(),
"service failover through redirect": testcase_ServiceFailoverThroughRedirect(),
@ -46,6 +47,7 @@ func TestCompile(t *testing.T) {
"service and subset failover": testcase_ServiceAndSubsetFailover(),
"datacenter failover": testcase_DatacenterFailover(),
"datacenter failover with mesh gateways": testcase_DatacenterFailover_WithMeshGateways(),
"target failover": testcase_Failover_Targets(),
"noop split to resolver with default subset": testcase_NoopSplit_WithDefaultSubset(),
"resolver with default subset": testcase_Resolve_WithDefaultSubset(),
"default resolver with external sni": testcase_DefaultResolver_ExternalSNI(),
@ -182,7 +184,7 @@ func testcase_JustRouterWithDefaults() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -244,7 +246,7 @@ func testcase_JustRouterWithNoDestination() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -294,7 +296,7 @@ func testcase_RouterWithDefaults_NoSplit_WithResolver() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc1", nil),
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
33*time.Second,
),
},
@ -361,7 +363,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_DefaultResolver() compileTestCase
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -426,7 +428,10 @@ func testcase_NoopSplit_DefaultResolver_ProtocolFromProxyDefaults() compileTestC
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc1",
}, nil),
},
}
@ -498,7 +503,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_WithResolver() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc1", nil),
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
33*time.Second,
),
},
@ -584,8 +589,11 @@ func testcase_RouteBypassesSplit() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"bypass.other.default.default.dc1": newTarget("other", "bypass", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"bypass.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "other",
ServiceSubset: "bypass",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == bypass",
}
@ -638,7 +646,7 @@ func testcase_NoopSplit_DefaultResolver() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -694,7 +702,7 @@ func testcase_NoopSplit_WithResolver() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc1", nil),
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
33*time.Second,
),
},
@ -776,12 +784,19 @@ func testcase_SubsetSplit() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2",
}
}),
"v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v1",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 1",
}
@ -855,8 +870,8 @@ func testcase_ServiceSplit() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil),
"bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil),
"foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil),
"bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil),
},
}
@ -935,7 +950,10 @@ func testcase_SplitBypassesSplit() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"bypassed.next.default.default.dc1": newTarget("next", "bypassed", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"bypassed.next.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "next",
ServiceSubset: "bypassed",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == bypass",
}
@ -973,7 +991,7 @@ func testcase_ServiceRedirect() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
},
}
@ -1019,7 +1037,10 @@ func testcase_ServiceAndSubsetRedirect() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"v2.other.default.default.dc1": newTarget("other", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "other",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2",
}
@ -1055,7 +1076,51 @@ func testcase_DatacenterRedirect() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", nil),
"main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc9",
}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
}
func testcase_PeerRedirect() compileTestCase {
entries := newEntries()
entries.AddResolvers(
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
Peer: "cluster-01",
},
},
)
expect := &structs.CompiledDiscoveryChain{
Protocol: "tcp",
StartNode: "resolver:other.default.default.external.cluster-01",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:other.default.default.external.cluster-01": {
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "other.default.default.external.cluster-01",
Resolver: &structs.DiscoveryResolver{
Default: true,
ConnectTimeout: 5 * time.Second,
Target: "other.default.default.external.cluster-01",
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{
Service: "other",
Peer: "cluster-01",
}, func(t *structs.DiscoveryTarget) {
t.SNI = ""
t.Name = ""
t.Datacenter = ""
}),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1095,7 +1160,10 @@ func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", func(t *structs.DiscoveryTarget) {
"main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc9",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
@ -1134,8 +1202,8 @@ func testcase_ServiceFailover() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1177,8 +1245,8 @@ func testcase_ServiceFailoverThroughRedirect() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"actual.default.default.dc1": newTarget("actual", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"actual.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "actual"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1220,8 +1288,8 @@ func testcase_Resolver_CircularFailover() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1261,8 +1329,11 @@ func testcase_ServiceAndSubsetFailover() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"backup.main.default.default.dc1": newTarget("main", "backup", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"backup.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "backup",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == backup",
}
@ -1301,9 +1372,15 @@ func testcase_DatacenterFailover() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", nil),
"main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc2",
}, nil),
"main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc4",
}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1350,17 +1427,105 @@ func testcase_DatacenterFailover_WithMeshGateways() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", func(t *structs.DiscoveryTarget) {
"main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc2",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", func(t *structs.DiscoveryTarget) {
"main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc4",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
},
}
return compileTestCase{entries: entries, expect: expect}
}
func testcase_Failover_Targets() compileTestCase {
entries := newEntries()
entries.AddProxyDefaults(&structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
},
})
entries.AddResolvers(
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Targets: []structs.ServiceResolverFailoverTarget{
{Datacenter: "dc3"},
{Service: "new-main"},
{Peer: "cluster-01"},
},
},
},
},
)
expect := &structs.CompiledDiscoveryChain{
Protocol: "tcp",
StartNode: "resolver:main.default.default.dc1",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:main.default.default.dc1": {
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "main.default.default.dc1",
Resolver: &structs.DiscoveryResolver{
ConnectTimeout: 5 * time.Second,
Target: "main.default.default.dc1",
Failover: &structs.DiscoveryFailover{
Targets: []string{
"main.default.default.dc3",
"new-main.default.default.dc1",
"main.default.default.external.cluster-01",
},
},
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"main.default.default.dc3": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc3",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"new-main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "new-main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"main.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Peer: "cluster-01",
}, func(t *structs.DiscoveryTarget) {
t.SNI = ""
t.Name = ""
t.Datacenter = ""
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
@ -1422,7 +1587,10 @@ func testcase_NoopSplit_WithDefaultSubset() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2",
}
@ -1452,7 +1620,7 @@ func testcase_DefaultResolver() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1488,7 +1656,7 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
@ -1530,7 +1698,7 @@ func testcase_ServiceMetaProjection() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -1588,7 +1756,7 @@ func testcase_ServiceMetaProjectionWithRedirect() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
},
}
@ -1623,7 +1791,7 @@ func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
},
}
@ -1658,7 +1826,10 @@ func testcase_Resolve_WithDefaultSubset() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2",
}
@ -1692,7 +1863,7 @@ func testcase_DefaultResolver_ExternalSNI() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.SNI = "main.some.other.service.mesh"
t.External = true
}),
@ -1857,11 +2028,17 @@ func testcase_MultiDatacenterCanary() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc2": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc2", nil),
newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc2",
}, nil),
33*time.Second,
),
"main.default.default.dc3": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc3", nil),
newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc3",
}, nil),
33*time.Second,
),
},
@ -2155,27 +2332,42 @@ func testcase_AllBellsAndWhistles() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"prod.redirected.default.default.dc1": newTarget("redirected", "prod", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"prod.redirected.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "redirected",
ServiceSubset: "prod",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "ServiceMeta.env == prod",
}
}),
"v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v1",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 1",
}
}),
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2",
}
}),
"v3.main.default.default.dc1": newTarget("main", "v3", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v3.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v3",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 3",
}
}),
"default-subset.main.default.default.dc1": newTarget("main", "default-subset", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"default-subset.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "default-subset",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{OnlyPassing: true}
}),
},
@ -2379,7 +2571,7 @@ func testcase_ResolverProtocolOverride() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect,
@ -2413,7 +2605,7 @@ func testcase_ResolverProtocolOverrideIgnored() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect,
@ -2451,7 +2643,7 @@ func testcase_RouterIgnored_ResolverProtocolOverride() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect,
@ -2685,9 +2877,9 @@ func testcase_LBSplitterAndResolver() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil),
"bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil),
"baz.default.default.dc1": newTarget("baz", "", "default", "default", "dc1", nil),
"foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil),
"bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil),
"baz.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "baz"}, nil),
},
}
@ -2743,7 +2935,7 @@ func testcase_LBResolver() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -2791,8 +2983,17 @@ func newEntries() *configentry.DiscoveryChainSet {
}
}
func newTarget(service, serviceSubset, namespace, partition, datacenter string, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
func newTarget(opts structs.DiscoveryTargetOpts, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget {
if opts.Namespace == "" {
opts.Namespace = "default"
}
if opts.Partition == "" {
opts.Partition = "default"
}
if opts.Datacenter == "" {
opts.Datacenter = "dc1"
}
t := structs.NewDiscoveryTarget(opts)
t.SNI = connect.TargetSNI(t, "trustdomain.consul")
t.Name = t.SNI
t.ConnectTimeout = 5 * time.Second // default

View File

@ -21,7 +21,7 @@ import (
var IntentionSummaries = []prometheus.SummaryDefinition{
{
Name: []string{"consul", "intention", "apply"},
Help: "",
Help: "Deprecated - please use intention_apply",
},
{
Name: []string{"intention", "apply"},

View File

@ -153,64 +153,87 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
// we don't support calling this endpoint for a specific peer
if args.PeerName != "" {
return fmt.Errorf("this endpoint does not support specifying a peer: %q", args.PeerName)
}
// this maxIndex will be the max of the ServiceDump calls and the PeeringList call
var maxIndex uint64
// get a local dump for services
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
if err != nil {
return fmt.Errorf("could not get a service dump for local nodes: %w", err)
}
if index > maxIndex {
maxIndex = index
}
reply.Nodes = nodes
// get a list of all peerings
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("could not list peers for service dump %w", err)
}
if index > maxIndex {
maxIndex = index
}
for _, p := range listedPeerings {
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, p.Name)
// If PeerName is not empty, we return only the imported services from that peer
if args.PeerName != "" {
// get a local dump for services
index, nodes, err := state.ServiceDump(ws,
args.ServiceKind,
args.UseServiceKind,
// Note we fetch imported services with wildcard namespace because imported services' namespaces
// are in a different locality; regardless of our local namespace, we return all imported services
// of the local partition.
args.EnterpriseMeta.WithWildcardNamespace(),
args.PeerName)
if err != nil {
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
return fmt.Errorf("could not get a service dump for peer %q: %w", args.PeerName, err)
}
if index > maxIndex {
maxIndex = index
}
reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...)
}
reply.Index = maxIndex
reply.ImportedNodes = nodes
// Get, store, and filter gateway services
idx, gatewayServices, err := state.DumpGatewayServices(ws)
if err != nil {
return err
}
reply.Gateways = gatewayServices
} else {
// otherwise return both local and all imported services
if idx > maxIndex {
maxIndex = idx
}
reply.Index = maxIndex
// get a local dump for services
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
if err != nil {
return fmt.Errorf("could not get a service dump for local nodes: %w", err)
}
raw, err := filter.Execute(reply.Nodes)
if err != nil {
return fmt.Errorf("could not filter local service dump: %w", err)
if index > maxIndex {
maxIndex = index
}
reply.Nodes = nodes
// get a list of all peerings
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("could not list peers for service dump %w", err)
}
if index > maxIndex {
maxIndex = index
}
for _, p := range listedPeerings {
// Note we fetch imported services with wildcard namespace because imported services' namespaces
// are in a different locality; regardless of our local namespace, we return all imported services
// of the local partition.
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, args.EnterpriseMeta.WithWildcardNamespace(), p.Name)
if err != nil {
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
}
if index > maxIndex {
maxIndex = index
}
reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...)
}
// Get, store, and filter gateway services
idx, gatewayServices, err := state.DumpGatewayServices(ws)
if err != nil {
return err
}
reply.Gateways = gatewayServices
if idx > maxIndex {
maxIndex = idx
}
reply.Index = maxIndex
raw, err := filter.Execute(reply.Nodes)
if err != nil {
return fmt.Errorf("could not filter local service dump: %w", err)
}
reply.Nodes = raw.(structs.CheckServiceNodes)
}
reply.Nodes = raw.(structs.CheckServiceNodes)
importedRaw, err := filter.Execute(reply.ImportedNodes)
if err != nil {

View File

@ -49,7 +49,7 @@ func kvsPreApply(logger hclog.Logger, srv *Server, authz resolver.Result, op api
return false, err
}
case api.KVGet, api.KVGetTree:
case api.KVGet, api.KVGetTree, api.KVGetOrEmpty:
// Filtering for GETs is done on the output side.
case api.KVCheckSession, api.KVCheckIndex:

View File

@ -1073,9 +1073,11 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri
},
}
grpcPortStr := member.Tags["grpc_port"]
if v, err := strconv.Atoi(grpcPortStr); err == nil && v > 0 {
service.Meta["grpc_port"] = grpcPortStr
if parts.ExternalGRPCPort > 0 {
service.Meta["grpc_port"] = strconv.Itoa(parts.ExternalGRPCPort)
}
if parts.ExternalGRPCTLSPort > 0 {
service.Meta["grpc_tls_port"] = strconv.Itoa(parts.ExternalGRPCTLSPort)
}
// Attempt to join the consul server

View File

@ -564,22 +564,9 @@ func (c *CAManager) primaryInitialize(provider ca.Provider, conf *structs.CAConf
return nil
}
// Get the highest index
idx, _, err := state.CARoots(nil)
if err != nil {
if err := c.persistNewRootAndConfig(provider, rootCA, conf); err != nil {
return err
}
// Store the root cert in raft
_, err = c.delegate.ApplyCARequest(&structs.CARequest{
Op: structs.CAOpSetRoots,
Index: idx,
Roots: []*structs.CARoot{rootCA},
})
if err != nil {
return fmt.Errorf("raft apply failed: %w", err)
}
c.setCAProvider(provider, rootCA)
c.logger.Info("initialized primary datacenter CA with provider", "provider", conf.Provider)
@ -1098,11 +1085,36 @@ func setLeafSigningCert(caRoot *structs.CARoot, pem string) error {
return fmt.Errorf("error parsing leaf signing cert: %w", err)
}
if err := pruneExpiredIntermediates(caRoot); err != nil {
return err
}
caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem)
caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId)
return nil
}
// pruneExpiredIntermediates removes expired intermediate certificates
// from the given CARoot.
func pruneExpiredIntermediates(caRoot *structs.CARoot) error {
var newIntermediates []string
now := time.Now()
for _, intermediatePEM := range caRoot.IntermediateCerts {
cert, err := connect.ParseCert(intermediatePEM)
if err != nil {
return fmt.Errorf("error parsing leaf signing cert: %w", err)
}
// Only keep the intermediate cert if it's still valid.
if cert.NotAfter.After(now) {
newIntermediates = append(newIntermediates, intermediatePEM)
}
}
caRoot.IntermediateCerts = newIntermediates
return nil
}
// runRenewIntermediate periodically attempts to renew the intermediate cert.
func (c *CAManager) runRenewIntermediate(ctx context.Context) error {
isPrimary := c.serverConf.Datacenter == c.serverConf.PrimaryDatacenter
@ -1381,10 +1393,15 @@ func (l *connectSignRateLimiter) getCSRRateLimiterWithLimit(limit rate.Limit) *r
// identified by the SPIFFE ID in the given CSR's SAN. It performs authorization
// using the given acl.Authorizer.
func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, authz acl.Authorizer) (*structs.IssuedCert, error) {
// Parse the SPIFFE ID from the CSR SAN.
if len(csr.URIs) == 0 {
return nil, connect.InvalidCSRError("CSR SAN does not contain a SPIFFE ID")
// Note that only one spiffe id is allowed currently. If more than one is desired
// in future implmentations, then each ID should have authorization checks.
if len(csr.URIs) != 1 {
return nil, connect.InvalidCSRError("CSR SAN contains an invalid number of URIs: %v", len(csr.URIs))
}
if len(csr.EmailAddresses) > 0 {
return nil, connect.InvalidCSRError("CSR SAN does not allow specifying email addresses")
}
// Parse the SPIFFE ID from the CSR SAN.
spiffeID, err := connect.ParseCertURI(csr.URIs[0])
if err != nil {
return nil, err
@ -1426,8 +1443,21 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+
"we are %s", v.Datacenter, dc)
}
case *connect.SpiffeIDServer:
// The authorizer passed in should have unlimited permissions.
if err := allow.ACLWriteAllowed(&authzContext); err != nil {
return nil, err
}
// Verify that the DC in the URI matches us.
// The request must have been issued by a local server.
dc := c.serverConf.Datacenter
if v.Datacenter != dc {
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+
"we are %s", v.Datacenter, dc)
}
default:
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service or agent ID")
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service, mesh-gateway, or agent ID")
}
return c.SignCertificate(csr, spiffeID)
@ -1447,9 +1477,11 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
if err != nil {
return nil, err
}
signingID := connect.SpiffeIDSigningForCluster(config.ClusterID)
serviceID, isService := spiffeID.(*connect.SpiffeIDService)
agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent)
serverID, isServer := spiffeID.(*connect.SpiffeIDServer)
mgwID, isMeshGateway := spiffeID.(*connect.SpiffeIDMeshGateway)
var entMeta acl.EnterpriseMeta
@ -1468,6 +1500,12 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
}
entMeta.Merge(mgwID.GetEnterpriseMeta())
case isServer:
if !signingID.CanSign(spiffeID) {
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+
"we are %s", serverID.Host, signingID.Host())
}
entMeta.Normalize()
case isAgent:
// isAgent - if we support more ID types then this would need to be an else if
// here we are just automatically fixing the trust domain. For auto-encrypt and
@ -1494,7 +1532,7 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
entMeta.Merge(agentID.GetEnterpriseMeta())
default:
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service, agent, or mesh gateway ID")
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service, agent, server, or mesh gateway ID")
}
commonCfg, err := config.GetCommonConfig()
@ -1583,6 +1621,8 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
case isAgent:
reply.Agent = agentID.Agent
reply.AgentURI = cert.URIs[0].String()
case isServer:
reply.ServerURI = cert.URIs[0].String()
default:
return nil, errors.New("not possible")
}

View File

@ -24,6 +24,7 @@ import (
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
"github.com/hashicorp/consul-net-rpc/net/rpc"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/connect"
ca "github.com/hashicorp/consul/agent/connect/ca"
"github.com/hashicorp/consul/agent/consul/fsm"
@ -435,7 +436,6 @@ func TestCAManager_SignCertificate_WithExpiredCert(t *testing.T) {
errorMsg string
}{
{"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
{"intermediate expired", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), true, "intermediate expired: certificate expired, expiration date"},
{"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"},
// a cert that is not yet valid is ok, assume it will be valid soon enough
{"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""},
@ -1043,3 +1043,180 @@ func setupPrimaryCA(t *testing.T, client *vaultapi.Client, path string, rootPEM
require.NoError(t, err, "failed to set signed intermediate")
return lib.EnsureTrailingNewline(buf.String())
}
func TestCAManager_Sign_SpiffeIDServer(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
_, s1 := testServerWithConfig(t)
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
codec := rpcClient(t, s1)
roots := structs.IndexedCARoots{}
retry.Run(t, func(r *retry.R) {
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(r, err)
require.Len(r, roots.Roots, 1)
})
pk, _, err := connect.GeneratePrivateKey()
require.NoError(t, err)
// Request a leaf certificate for a server.
spiffeID := &connect.SpiffeIDServer{
Host: roots.TrustDomain,
Datacenter: "dc1",
}
csr, err := connect.CreateCSR(spiffeID, pk, nil, nil)
require.NoError(t, err)
req := structs.CASignRequest{CSR: csr}
cert := structs.IssuedCert{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(t, err)
// Verify the chain of trust.
verifyLeafCert(t, roots.Roots[0], cert.CertPEM)
// Verify the Server's URI.
require.Equal(t, fmt.Sprintf("spiffe://%s/agent/server/dc/dc1", roots.TrustDomain), cert.ServerURI)
}
func TestCAManager_AuthorizeAndSignCertificate(t *testing.T) {
conf := DefaultConfig()
conf.PrimaryDatacenter = "dc1"
conf.Datacenter = "dc2"
manager := NewCAManager(nil, nil, testutil.Logger(t), conf)
agentURL := connect.SpiffeIDAgent{
Agent: "test-agent",
Datacenter: conf.PrimaryDatacenter,
Host: "test-host",
}.URI()
serviceURL := connect.SpiffeIDService{
Datacenter: conf.PrimaryDatacenter,
Namespace: "ns1",
Service: "test-service",
}.URI()
meshURL := connect.SpiffeIDMeshGateway{
Datacenter: conf.PrimaryDatacenter,
Host: "test-host",
Partition: "test-partition",
}.URI()
tests := []struct {
name string
expectErr string
getCSR func() *x509.CertificateRequest
authAllow bool
}{
{
name: "err_not_one_uri",
expectErr: "CSR SAN contains an invalid number of URIs",
getCSR: func() *x509.CertificateRequest {
return &x509.CertificateRequest{
URIs: []*url.URL{agentURL, agentURL},
}
},
},
{
name: "err_email",
expectErr: "CSR SAN does not allow specifying email addresses",
getCSR: func() *x509.CertificateRequest {
return &x509.CertificateRequest{
URIs: []*url.URL{agentURL},
EmailAddresses: []string{"test@example.com"},
}
},
},
{
name: "err_invalid_spiffe_id",
expectErr: "SPIFFE ID is not in the expected format",
getCSR: func() *x509.CertificateRequest {
return &x509.CertificateRequest{
URIs: []*url.URL{connect.SpiffeIDAgent{}.URI()},
}
},
},
{
name: "err_service_write_not_allowed",
expectErr: "Permission denied",
getCSR: func() *x509.CertificateRequest {
return &x509.CertificateRequest{
URIs: []*url.URL{serviceURL},
}
},
},
{
name: "err_service_different_dc",
expectErr: "SPIFFE ID in CSR from a different datacenter",
authAllow: true,
getCSR: func() *x509.CertificateRequest {
return &x509.CertificateRequest{
URIs: []*url.URL{serviceURL},
}
},
},
{
name: "err_agent_write_not_allowed",
expectErr: "Permission denied",
getCSR: func() *x509.CertificateRequest {
return &x509.CertificateRequest{
URIs: []*url.URL{agentURL},
}
},
},
{
name: "err_meshgw_write_not_allowed",
expectErr: "Permission denied",
getCSR: func() *x509.CertificateRequest {
return &x509.CertificateRequest{
URIs: []*url.URL{meshURL},
}
},
},
{
name: "err_meshgw_different_dc",
expectErr: "SPIFFE ID in CSR from a different datacenter",
authAllow: true,
getCSR: func() *x509.CertificateRequest {
return &x509.CertificateRequest{
URIs: []*url.URL{meshURL},
}
},
},
{
name: "err_invalid_spiffe_type",
expectErr: "SPIFFE ID in CSR must be a service, mesh-gateway, or agent ID",
getCSR: func() *x509.CertificateRequest {
u := connect.SpiffeIDSigning{
ClusterID: "test-cluster-id",
Domain: "test-domain",
}.URI()
return &x509.CertificateRequest{
URIs: []*url.URL{u},
}
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
authz := acl.DenyAll()
if tc.authAllow {
authz = acl.AllowAll()
}
cert, err := manager.AuthorizeAndSignCertificate(tc.getCSR(), authz)
if tc.expectErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectErr)
} else {
require.NoError(t, err)
require.NotNil(t, cert)
}
})
}
}

View File

@ -401,6 +401,18 @@ func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) {
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(t, err)
verifyLeafCert(t, activeRoot, cert.CertPEM)
// Wait for the primary's old intermediate to be pruned after expiring.
oldIntermediate := activeRoot.IntermediateCerts[0]
retry.Run(t, func(r *retry.R) {
store := s1.caManager.delegate.State()
_, storedRoot, err := store.CARootActive(nil)
r.Check(err)
if storedRoot.IntermediateCerts[0] == oldIntermediate {
r.Fatal("old intermediate should be gone")
}
})
}
func patchIntermediateCertRenewInterval(t *testing.T) {
@ -516,6 +528,18 @@ func TestCAManager_RenewIntermediate_Secondary(t *testing.T) {
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(t, err)
verifyLeafCert(t, activeRoot, cert.CertPEM)
// Wait for dc2's old intermediate to be pruned after expiring.
oldIntermediate := activeRoot.IntermediateCerts[0]
retry.Run(t, func(r *retry.R) {
store := s2.caManager.delegate.State()
_, storedRoot, err := store.CARootActive(nil)
r.Check(err)
if storedRoot.IntermediateCerts[0] == oldIntermediate {
r.Fatal("old intermediate should be gone")
}
})
}
func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
@ -667,6 +691,71 @@ func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
require.NoError(t, err)
}
func TestCAManager_Initialize_Vault_KeepOldRoots_Primary(t *testing.T) {
ca.SkipIfVaultNotPresent(t)
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
testVault := ca.NewTestVaultServer(t)
defer testVault.Stop()
dir1pre, s1pre := testServer(t)
defer os.RemoveAll(dir1pre)
defer s1pre.Shutdown()
codec := rpcClient(t, s1pre)
defer codec.Close()
testrpc.WaitForLeader(t, s1pre.RPC, "dc1")
// Update the CA config to use Vault - this should force the generation of a new root cert.
vaultCAConf := &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": testVault.Addr,
"Token": testVault.RootToken,
"RootPKIPath": "pki-root/",
"IntermediatePKIPath": "pki-intermediate/",
},
}
args := &structs.CARequest{
Datacenter: "dc1",
Config: vaultCAConf,
}
var reply interface{}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
// Should have 2 roots now.
_, roots, err := s1pre.fsm.State().CARoots(nil)
require.NoError(t, err)
require.Len(t, roots, 2)
// Shutdown s1pre and restart it to trigger the primary CA init.
s1pre.Shutdown()
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.DataDir = s1pre.config.DataDir
c.NodeName = s1pre.config.NodeName
c.NodeID = s1pre.config.NodeID
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Roots should be unchanged
_, rootsAfterRestart, err := s1.fsm.State().CARoots(nil)
require.NoError(t, err)
require.Len(t, rootsAfterRestart, 2)
require.Equal(t, roots[0].ID, rootsAfterRestart[0].ID)
require.Equal(t, roots[1].ID, rootsAfterRestart[1].ID)
}
func TestCAManager_Initialize_Vault_FixesSigningKeyID_Primary(t *testing.T) {
ca.SkipIfVaultNotPresent(t)

View File

@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"math"
"strings"
"time"
"github.com/armon/go-metrics"
@ -30,12 +31,29 @@ import (
"github.com/hashicorp/consul/proto/pbpeerstream"
)
var leaderExportedServicesCountKey = []string{"consul", "peering", "exported_services"}
var leaderExportedServicesCountKeyDeprecated = []string{"consul", "peering", "exported_services"}
var leaderExportedServicesCountKey = []string{"peering", "exported_services"}
var leaderHealthyPeeringKeyDeprecated = []string{"consul", "peering", "healthy"}
var leaderHealthyPeeringKey = []string{"peering", "healthy"}
var LeaderPeeringMetrics = []prometheus.GaugeDefinition{
{
Name: leaderExportedServicesCountKeyDeprecated,
Help: fmt.Sprint("Deprecated - please use ", strings.Join(leaderExportedServicesCountKey, "_")),
},
{
Name: leaderExportedServicesCountKey,
Help: "A gauge that tracks how many services are exported for the peering. " +
"The labels are \"peering\" and, for enterprise, \"partition\". " +
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
"We emit this metric every 9 seconds",
},
{
Name: leaderHealthyPeeringKeyDeprecated,
Help: fmt.Sprint("Deprecated - please use ", strings.Join(leaderExportedServicesCountKey, "_")),
},
{
Name: leaderHealthyPeeringKey,
Help: "A gauge that tracks how if a peering is healthy (1) or not (0). " +
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
"We emit this metric every 9 seconds",
},
}
@ -68,6 +86,7 @@ func (s *Server) runPeeringMetrics(ctx context.Context) error {
// "Zero-out" the metric on exit so that when prometheus scrapes this
// metric from a non-leader, it does not get a stale value.
metrics.SetGauge(leaderExportedServicesCountKeyDeprecated, float32(0))
metrics.SetGauge(leaderExportedServicesCountKey, float32(0))
return nil
case <-ticker.C:
@ -85,13 +104,6 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
}
for _, peer := range peers {
status, found := s.peerStreamServer.StreamStatus(peer.ID)
if !found {
logger.Trace("did not find status for", "peer_name", peer.Name)
continue
}
esc := status.GetExportedServicesCount()
part := peer.Partition
labels := []metrics.Label{
{Name: "peer_name", Value: peer.Name},
@ -101,7 +113,28 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
labels = append(labels, metrics.Label{Name: "partition", Value: part})
}
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
status, found := s.peerStreamServer.StreamStatus(peer.ID)
if found {
// exported services count metric
esc := status.GetExportedServicesCount()
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKeyDeprecated, float32(esc), labels)
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
}
// peering health metric
if status.NeverConnected {
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKeyDeprecated, float32(math.NaN()), labels)
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
} else {
healthy := s.peerStreamServer.Tracker.IsHealthy(status)
healthyInt := 0
if healthy {
healthyInt = 1
}
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKeyDeprecated, float32(healthyInt), labels)
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(healthyInt), labels)
}
}
return nil
@ -277,13 +310,6 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
return fmt.Errorf("failed to build TLS dial option from peering: %w", err)
}
// Create a ring buffer to cycle through peer addresses in the retry loop below.
buffer := ring.New(len(peer.PeerServerAddresses))
for _, addr := range peer.PeerServerAddresses {
buffer.Value = addr
buffer = buffer.Next()
}
secret, err := s.fsm.State().PeeringSecretsRead(ws, peer.ID)
if err != nil {
return fmt.Errorf("failed to read secret for peering: %w", err)
@ -294,27 +320,26 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
logger.Trace("establishing stream to peer")
retryCtx, cancel := context.WithCancel(ctx)
cancelFns[peer.ID] = cancel
streamStatus, err := s.peerStreamTracker.Register(peer.ID)
streamStatus, err := s.peerStreamServer.Tracker.Register(peer.ID)
if err != nil {
return fmt.Errorf("failed to register stream: %v", err)
}
streamCtx, cancel := context.WithCancel(ctx)
cancelFns[peer.ID] = cancel
// Start a goroutine to watch for updates to peer server addresses.
// The latest valid server address can be received from nextServerAddr.
nextServerAddr := make(chan string)
go s.watchPeerServerAddrs(streamCtx, peer, nextServerAddr)
// Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes.
go retryLoopBackoffPeering(retryCtx, logger, func() error {
go retryLoopBackoffPeering(streamCtx, logger, func() error {
// Try a new address on each iteration by advancing the ring buffer on errors.
defer func() {
buffer = buffer.Next()
}()
addr, ok := buffer.Value.(string)
if !ok {
return fmt.Errorf("peer server address type %T is not a string", buffer.Value)
}
addr := <-nextServerAddr
logger.Trace("dialing peer", "addr", addr)
conn, err := grpc.DialContext(retryCtx, addr,
conn, err := grpc.DialContext(streamCtx, addr,
// TODO(peering): use a grpc.WithStatsHandler here?)
tlsOption,
// For keep alive parameters there is a larger comment in ClientConnPool.dial about that.
@ -331,7 +356,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
defer conn.Close()
client := pbpeerstream.NewPeerStreamServiceClient(conn)
stream, err := client.StreamResources(retryCtx)
stream, err := client.StreamResources(streamCtx)
if err != nil {
return err
}
@ -379,6 +404,74 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
return nil
}
// watchPeerServerAddrs sends an up-to-date peer server address to nextServerAddr.
// It loads the server addresses into a ring buffer and cycles through them until:
// 1. streamCtx is cancelled (peer is deleted)
// 2. the peer is modified and the watchset fires.
//
// In case (2) we refetch the peering and rebuild the ring buffer.
func (s *Server) watchPeerServerAddrs(ctx context.Context, peer *pbpeering.Peering, nextServerAddr chan<- string) {
defer close(nextServerAddr)
// we initialize the ring buffer with the peer passed to `establishStream`
// because the caller has pre-checked `peer.ShouldDial`, guaranteeing
// at least one server address.
//
// IMPORTANT: ringbuf must always be length > 0 or else `<-nextServerAddr` may block.
ringbuf := ring.New(len(peer.PeerServerAddresses))
for _, addr := range peer.PeerServerAddresses {
ringbuf.Value = addr
ringbuf = ringbuf.Next()
}
innerWs := memdb.NewWatchSet()
_, _, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
if err != nil {
s.logger.Warn("failed to watch for changes to peer; server addresses may become stale over time.",
"peer_id", peer.ID,
"error", err)
}
fetchAddrs := func() error {
// reinstantiate innerWs to prevent it from growing indefinitely
innerWs = memdb.NewWatchSet()
_, peering, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
if err != nil {
return fmt.Errorf("failed to fetch peer %q: %w", peer.ID, err)
}
if !peering.IsActive() {
return fmt.Errorf("peer %q is no longer active", peer.ID)
}
if len(peering.PeerServerAddresses) == 0 {
return fmt.Errorf("peer %q has no addresses to dial", peer.ID)
}
ringbuf = ring.New(len(peering.PeerServerAddresses))
for _, addr := range peering.PeerServerAddresses {
ringbuf.Value = addr
ringbuf = ringbuf.Next()
}
return nil
}
for {
select {
case nextServerAddr <- ringbuf.Value.(string):
ringbuf = ringbuf.Next()
case err := <-innerWs.WatchCh(ctx):
if err != nil {
// context was cancelled
return
}
// watch fired so we refetch the peering and rebuild the ring buffer
if err := fetchAddrs(); err != nil {
s.logger.Warn("watchset for peer was fired but failed to update server addresses",
"peer_id", peer.ID,
"error", err)
}
}
}
}
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
}
@ -391,6 +484,12 @@ func (s *Server) runPeeringDeletions(ctx context.Context) error {
// process. This includes deletion of the peerings themselves in addition to any peering data
raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate))
for {
select {
case <-ctx.Done():
return nil
default:
}
ws := memdb.NewWatchSet()
state := s.fsm.State()
_, peerings, err := s.fsm.State().PeeringListDeleted(ws)

View File

@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io/ioutil"
"math"
"testing"
"time"
@ -17,6 +18,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
@ -24,6 +26,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/types"
@ -37,6 +40,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
})
}
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
if testing.Short() {
t.Skip("too slow for testing.Short")
@ -134,9 +138,11 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
// Delete the peering to trigger the termination sequence.
deleted := &pbpeering.Peering{
ID: p.Peering.ID,
Name: "my-peer-acceptor",
DeletedAt: structs.TimeToProto(time.Now()),
ID: p.Peering.ID,
Name: "my-peer-acceptor",
State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: p.Peering.PeerServerAddresses,
DeletedAt: structs.TimeToProto(time.Now()),
}
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
dialer.logger.Trace("deleted peering for my-peer-acceptor")
@ -259,6 +265,7 @@ func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS b
deleted := &pbpeering.Peering{
ID: p.Peering.PeerID,
Name: "my-peer-dialer",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
}
@ -428,6 +435,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
Peering: &pbpeering.Peering{
ID: peerID,
Name: peerName,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
}))
@ -974,6 +982,7 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
var (
s2PeerID1 = generateUUID()
s2PeerID2 = generateUUID()
s2PeerID3 = generateUUID()
testContextTimeout = 60 * time.Second
lastIdx = uint64(0)
)
@ -1063,6 +1072,24 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
// mimic tracking exported services
mst2.TrackExportedService(structs.ServiceName{Name: "d-service"})
mst2.TrackExportedService(structs.ServiceName{Name: "e-service"})
// pretend that the hearbeat happened
mst2.TrackRecvHeartbeat()
}
// Simulate a peering that never connects
{
p3 := &pbpeering.Peering{
ID: s2PeerID3,
Name: "my-peer-s4",
PeerID: token.PeerID, // doesn't much matter what these values are
PeerCAPems: token.CA,
PeerServerName: token.ServerName,
PeerServerAddresses: token.ServerAddresses,
}
require.True(t, p3.ShouldDial())
lastIdx++
require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: p3}))
}
// set up a metrics sink
@ -1092,6 +1119,18 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric2))
require.Equal(r, float32(2), metric2.Value) // for d, e services
keyHealthyMetric2 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s3;peer_id=%s", s2PeerID2)
healthyMetric2, ok := intv.Gauges[keyHealthyMetric2]
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric2))
require.Equal(r, float32(1), healthyMetric2.Value)
keyHealthyMetric3 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s4;peer_id=%s", s2PeerID3)
healthyMetric3, ok := intv.Gauges[keyHealthyMetric3]
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric3))
require.True(r, math.IsNaN(float64(healthyMetric3.Value)))
})
}
@ -1131,6 +1170,7 @@ func TestLeader_Peering_NoDeletionWhenPeeringDisabled(t *testing.T) {
Peering: &pbpeering.Peering{
ID: peerID,
Name: peerName,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
}))
@ -1182,7 +1222,7 @@ func TestLeader_Peering_NoEstablishmentWhenPeeringDisabled(t *testing.T) {
}))
require.Never(t, func() bool {
_, found := s1.peerStreamTracker.StreamStatus(peerID)
_, found := s1.peerStreamServer.StreamStatus(peerID)
return found
}, 7*time.Second, 1*time.Second, "peering should not have been established")
}
@ -1343,3 +1383,138 @@ func Test_isFailedPreconditionErr(t *testing.T) {
werr := fmt.Errorf("wrapped: %w", err)
assert.True(t, isFailedPreconditionErr(werr))
}
func Test_Leader_PeeringSync_ServerAddressUpdates(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
// We want 1s retries for this test
orig := maxRetryBackoff
maxRetryBackoff = 1
t.Cleanup(func() { maxRetryBackoff = orig })
_, acceptor := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptor"
c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul"
})
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
// Create a peering by generating a token
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err := grpc.DialContext(ctx, acceptor.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(acceptor.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
acceptorClient := pbpeering.NewPeeringServiceClient(conn)
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-dialer",
}
resp, err := acceptorClient.GenerateToken(ctx, &req)
require.NoError(t, err)
// Bring up dialer and establish a peering with acceptor's token so that it attempts to dial.
_, dialer := testServerWithConfig(t, func(c *Config) {
c.NodeName = "dialer"
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc2"
})
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
// Create a peering at dialer by establishing a peering with acceptor's token
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err = grpc.DialContext(ctx, dialer.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(dialer.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
dialerClient := pbpeering.NewPeeringServiceClient(conn)
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-acceptor",
PeeringToken: resp.PeeringToken,
}
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
require.True(r, status.Connected)
})
testutil.RunStep(t, "calling establish with active connection does not overwrite server addresses", func(t *testing.T) {
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
// generate a new token from the acceptor
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-dialer",
}
resp, err := acceptorClient.GenerateToken(ctx, &req)
require.NoError(t, err)
token, err := acceptor.peeringBackend.DecodeToken([]byte(resp.PeeringToken))
require.NoError(t, err)
// we will update the token with bad addresses to assert it doesn't clobber existing ones
token.ServerAddresses = []string{"1.2.3.4:1234"}
badToken, err := acceptor.peeringBackend.EncodeToken(token)
require.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
// Try establishing.
// This call will only succeed if the bad address was not used in the calls to exchange the peering secret.
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-acceptor",
PeeringToken: string(badToken),
}
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
require.NotContains(t, p.Peering.PeerServerAddresses, "1.2.3.4:1234")
})
testutil.RunStep(t, "updated server addresses are picked up by the leader", func(t *testing.T) {
// force close the acceptor's gRPC server so the dialier retries with a new address.
acceptor.externalGRPCServer.Stop()
clone := proto.Clone(p.Peering)
updated := clone.(*pbpeering.Peering)
// start with a bad address so we can assert for a specific error
updated.PeerServerAddresses = append([]string{
"bad",
}, p.Peering.PeerServerAddresses...)
// this write will wake up the watch on the leader to refetch server addresses
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: updated}))
retry.Run(t, func(r *retry.R) {
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
// We assert for this error to be set which would indicate that we iterated
// through a bad address.
require.Contains(r, status.LastSendErrorMessage, "transport: Error while dialing dial tcp: address bad: missing port in address")
require.False(r, status.Connected)
})
})
}

View File

@ -5,6 +5,7 @@ import (
"fmt"
"io"
"os"
"strconv"
"strings"
"testing"
"time"
@ -19,6 +20,7 @@ import (
"github.com/hashicorp/consul/agent/structs"
tokenStore "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
@ -355,8 +357,10 @@ func TestLeader_CheckServersMeta(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
ports := freeport.GetN(t, 2) // s3 grpc, s3 grpc_tls
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -383,6 +387,8 @@ func TestLeader_CheckServersMeta(t *testing.T) {
c.ACLInitialManagementToken = "root"
c.ACLResolverSettings.ACLDefaultPolicy = "allow"
c.Bootstrap = false
c.GRPCPort = ports[0]
c.GRPCTLSPort = ports[1]
})
defer os.RemoveAll(dir3)
defer s3.Shutdown()
@ -456,6 +462,14 @@ func TestLeader_CheckServersMeta(t *testing.T) {
if newVersion != versionToExpect {
r.Fatalf("Expected version to be updated to %s, was %s", versionToExpect, newVersion)
}
grpcPort := service.Meta["grpc_port"]
if grpcPort != strconv.Itoa(ports[0]) {
r.Fatalf("Expected grpc port to be %d, was %s", ports[0], grpcPort)
}
grpcTLSPort := service.Meta["grpc_tls_port"]
if grpcTLSPort != strconv.Itoa(ports[1]) {
r.Fatalf("Expected grpc tls port to be %d, was %s", ports[1], grpcTLSPort)
}
})
}

View File

@ -3,13 +3,14 @@ package consul
import (
"fmt"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/go-hclog"
memdb "github.com/hashicorp/go-memdb"
"github.com/imdario/mergo"
"github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
)
// mergeNodeServiceWithCentralConfig merges a service instance (NodeService) with the
@ -66,7 +67,7 @@ func mergeNodeServiceWithCentralConfig(
ns.ID, err)
}
defaults, err := computeResolvedServiceConfig(
defaults, err := configentry.ComputeResolvedServiceConfig(
configReq,
upstreams,
false,
@ -87,218 +88,6 @@ func mergeNodeServiceWithCentralConfig(
return cfgIndex, mergedns, nil
}
func computeResolvedServiceConfig(
args *structs.ServiceConfigRequest,
upstreamIDs []structs.ServiceID,
legacyUpstreams bool,
entries *configentry.ResolvedServiceConfigSet,
logger hclog.Logger,
) (*structs.ServiceConfigResponse, error) {
var thisReply structs.ServiceConfigResponse
thisReply.MeshGateway.Mode = structs.MeshGatewayModeDefault
// TODO(freddy) Refactor this into smaller set of state store functions
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
// blocking query, this function will be rerun and these state store lookups will both be current.
// We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
var proxyConfGlobalProtocol string
proxyConf := entries.GetProxyDefaults(args.PartitionOrDefault())
if proxyConf != nil {
// Apply the proxy defaults to the sidecar's proxy config
mapCopy, err := copystructure.Copy(proxyConf.Config)
if err != nil {
return nil, fmt.Errorf("failed to copy global proxy-defaults: %v", err)
}
thisReply.ProxyConfig = mapCopy.(map[string]interface{})
thisReply.Mode = proxyConf.Mode
thisReply.TransparentProxy = proxyConf.TransparentProxy
thisReply.MeshGateway = proxyConf.MeshGateway
thisReply.Expose = proxyConf.Expose
// Extract the global protocol from proxyConf for upstream configs.
rawProtocol := proxyConf.Config["protocol"]
if rawProtocol != nil {
var ok bool
proxyConfGlobalProtocol, ok = rawProtocol.(string)
if !ok {
return nil, fmt.Errorf("invalid protocol type %T", rawProtocol)
}
}
}
serviceConf := entries.GetServiceDefaults(
structs.NewServiceID(args.Name, &args.EnterpriseMeta),
)
if serviceConf != nil {
if serviceConf.Expose.Checks {
thisReply.Expose.Checks = true
}
if len(serviceConf.Expose.Paths) >= 1 {
thisReply.Expose.Paths = serviceConf.Expose.Paths
}
if serviceConf.MeshGateway.Mode != structs.MeshGatewayModeDefault {
thisReply.MeshGateway.Mode = serviceConf.MeshGateway.Mode
}
if serviceConf.Protocol != "" {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = make(map[string]interface{})
}
thisReply.ProxyConfig["protocol"] = serviceConf.Protocol
}
if serviceConf.TransparentProxy.OutboundListenerPort != 0 {
thisReply.TransparentProxy.OutboundListenerPort = serviceConf.TransparentProxy.OutboundListenerPort
}
if serviceConf.TransparentProxy.DialedDirectly {
thisReply.TransparentProxy.DialedDirectly = serviceConf.TransparentProxy.DialedDirectly
}
if serviceConf.Mode != structs.ProxyModeDefault {
thisReply.Mode = serviceConf.Mode
}
if serviceConf.Destination != nil {
thisReply.Destination = *serviceConf.Destination
}
thisReply.Meta = serviceConf.Meta
}
// First collect all upstreams into a set of seen upstreams.
// Upstreams can come from:
// - Explicitly from proxy registrations, and therefore as an argument to this RPC endpoint
// - Implicitly from centralized upstream config in service-defaults
seenUpstreams := map[structs.ServiceID]struct{}{}
var (
noUpstreamArgs = len(upstreamIDs) == 0 && len(args.Upstreams) == 0
// Check the args and the resolved value. If it was exclusively set via a config entry, then args.Mode
// will never be transparent because the service config request does not use the resolved value.
tproxy = args.Mode == structs.ProxyModeTransparent || thisReply.Mode == structs.ProxyModeTransparent
)
// The upstreams passed as arguments to this endpoint are the upstreams explicitly defined in a proxy registration.
// If no upstreams were passed, then we should only return the resolved config if the proxy is in transparent mode.
// Otherwise we would return a resolved upstream config to a proxy with no configured upstreams.
if noUpstreamArgs && !tproxy {
return &thisReply, nil
}
// First store all upstreams that were provided in the request
for _, sid := range upstreamIDs {
if _, ok := seenUpstreams[sid]; !ok {
seenUpstreams[sid] = struct{}{}
}
}
// Then store upstreams inferred from service-defaults and mapify the overrides.
var (
upstreamConfigs = make(map[structs.ServiceID]*structs.UpstreamConfig)
upstreamDefaults *structs.UpstreamConfig
// usConfigs stores the opaque config map for each upstream and is keyed on the upstream's ID.
usConfigs = make(map[structs.ServiceID]map[string]interface{})
)
if serviceConf != nil && serviceConf.UpstreamConfig != nil {
for i, override := range serviceConf.UpstreamConfig.Overrides {
if override.Name == "" {
logger.Warn(
"Skipping UpstreamConfig.Overrides entry without a required name field",
"entryIndex", i,
"kind", serviceConf.GetKind(),
"name", serviceConf.GetName(),
"namespace", serviceConf.GetEnterpriseMeta().NamespaceOrEmpty(),
)
continue // skip this impossible condition
}
seenUpstreams[override.ServiceID()] = struct{}{}
upstreamConfigs[override.ServiceID()] = override
}
if serviceConf.UpstreamConfig.Defaults != nil {
upstreamDefaults = serviceConf.UpstreamConfig.Defaults
// Store the upstream defaults under a wildcard key so that they can be applied to
// upstreams that are inferred from intentions and do not have explicit upstream configuration.
cfgMap := make(map[string]interface{})
upstreamDefaults.MergeInto(cfgMap)
wildcard := structs.NewServiceID(structs.WildcardSpecifier, args.WithWildcardNamespace())
usConfigs[wildcard] = cfgMap
}
}
for upstream := range seenUpstreams {
resolvedCfg := make(map[string]interface{})
// The protocol of an upstream is resolved in this order:
// 1. Default protocol from proxy-defaults (how all services should be addressed)
// 2. Protocol for upstream service defined in its service-defaults (how the upstream wants to be addressed)
// 3. Protocol defined for the upstream in the service-defaults.(upstream_config.defaults|upstream_config.overrides) of the downstream
// (how the downstream wants to address it)
protocol := proxyConfGlobalProtocol
upstreamSvcDefaults := entries.GetServiceDefaults(
structs.NewServiceID(upstream.ID, &upstream.EnterpriseMeta),
)
if upstreamSvcDefaults != nil {
if upstreamSvcDefaults.Protocol != "" {
protocol = upstreamSvcDefaults.Protocol
}
}
if protocol != "" {
resolvedCfg["protocol"] = protocol
}
// Merge centralized defaults for all upstreams before configuration for specific upstreams
if upstreamDefaults != nil {
upstreamDefaults.MergeInto(resolvedCfg)
}
// The MeshGateway value from the proxy registration overrides the one from upstream_defaults
// because it is specific to the proxy instance.
//
// The goal is to flatten the mesh gateway mode in this order:
// 0. Value from centralized upstream_defaults
// 1. Value from local proxy registration
// 2. Value from centralized upstream_config
// 3. Value from local upstream definition. This last step is done in the client's service manager.
if !args.MeshGateway.IsZero() {
resolvedCfg["mesh_gateway"] = args.MeshGateway
}
if upstreamConfigs[upstream] != nil {
upstreamConfigs[upstream].MergeInto(resolvedCfg)
}
if len(resolvedCfg) > 0 {
usConfigs[upstream] = resolvedCfg
}
}
// don't allocate the slices just to not fill them
if len(usConfigs) == 0 {
return &thisReply, nil
}
if legacyUpstreams {
// For legacy upstreams we return a map that is only keyed on the string ID, since they precede namespaces
thisReply.UpstreamConfigs = make(map[string]map[string]interface{})
for us, conf := range usConfigs {
thisReply.UpstreamConfigs[us.ID] = conf
}
} else {
thisReply.UpstreamIDConfigs = make(structs.OpaqueUpstreamConfigs, 0, len(usConfigs))
for us, conf := range usConfigs {
thisReply.UpstreamIDConfigs = append(thisReply.UpstreamIDConfigs,
structs.OpaqueUpstreamConfig{Upstream: us, Config: conf})
}
}
return &thisReply, nil
}
// MergeServiceConfig merges the service into defaults to produce the final effective
// config for the specified service.
func MergeServiceConfig(defaults *structs.ServiceConfigResponse, service *structs.NodeService) (*structs.NodeService, error) {

View File

@ -3,10 +3,11 @@ package consul
import (
"testing"
"github.com/hashicorp/consul/agent/structs"
"github.com/mitchellh/copystructure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
)
func Test_MergeServiceConfig_TransparentProxy(t *testing.T) {
@ -153,6 +154,12 @@ func Test_MergeServiceConfig_UpstreamOverrides(t *testing.T) {
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zap",
Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{
"Interval": int64(20),
"MaxFailures": int64(4),
},
},
},
},
},
@ -171,8 +178,8 @@ func Test_MergeServiceConfig_UpstreamOverrides(t *testing.T) {
DestinationName: "zap",
Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{
"Interval": int64(10),
"MaxFailures": int64(2),
"Interval": int64(20),
"MaxFailures": int64(4),
},
"protocol": "grpc",
},

View File

@ -7,6 +7,7 @@ import (
"github.com/hashicorp/consul-net-rpc/net/rpc"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc-external/limiter"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/router"
"github.com/hashicorp/consul/agent/rpc/middleware"
@ -15,14 +16,15 @@ import (
)
type Deps struct {
EventPublisher *stream.EventPublisher
Logger hclog.InterceptLogger
TLSConfigurator *tlsutil.Configurator
Tokens *token.Store
Router *router.Router
ConnPool *pool.ConnPool
GRPCConnPool GRPCClientConner
LeaderForwarder LeaderForwarder
EventPublisher *stream.EventPublisher
Logger hclog.InterceptLogger
TLSConfigurator *tlsutil.Configurator
Tokens *token.Store
Router *router.Router
ConnPool *pool.ConnPool
GRPCConnPool GRPCClientConner
LeaderForwarder LeaderForwarder
XDSStreamLimiter *limiter.SessionLimiter
// GetNetRPCInterceptorFunc, if not nil, sets the net/rpc rpc.ServerServiceCallInterceptor on
// the server side to record metrics around the RPC requests. If nil, no interceptor is added to
// the rpc server.

View File

@ -66,11 +66,19 @@ func (b *PeeringBackend) GetServerAddresses() ([]string, error) {
}
var addrs []string
for _, node := range nodes {
grpcPortStr := node.ServiceMeta["grpc_port"]
if v, err := strconv.Atoi(grpcPortStr); err != nil || v < 1 {
continue // skip server that isn't exporting public gRPC properly
// Prefer the TLS port if it is defined.
grpcPortStr := node.ServiceMeta["grpc_tls_port"]
if v, err := strconv.Atoi(grpcPortStr); err == nil && v > 0 {
addrs = append(addrs, node.Address+":"+grpcPortStr)
continue
}
addrs = append(addrs, node.Address+":"+grpcPortStr)
// Fallback to the standard port if TLS is not defined.
grpcPortStr = node.ServiceMeta["grpc_port"]
if v, err := strconv.Atoi(grpcPortStr); err == nil && v > 0 {
addrs = append(addrs, node.Address+":"+grpcPortStr)
continue
}
// Skip node if neither defined.
}
if len(addrs) == 0 {
return nil, fmt.Errorf("a grpc bind port must be specified in the configuration for all servers")

View File

@ -39,6 +39,7 @@ import (
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/consul/usagemetrics"
"github.com/hashicorp/consul/agent/consul/wanfed"
"github.com/hashicorp/consul/agent/consul/xdscapacity"
aclgrpc "github.com/hashicorp/consul/agent/grpc-external/services/acl"
"github.com/hashicorp/consul/agent/grpc-external/services/connectca"
"github.com/hashicorp/consul/agent/grpc-external/services/dataplane"
@ -253,7 +254,7 @@ type Server struct {
// enable RPC forwarding.
externalConnectCAServer *connectca.Server
// externalGRPCServer is the gRPC server exposed on the dedicated gRPC port, as
// externalGRPCServer has a gRPC server exposed on the dedicated gRPC ports, as
// opposed to the multiplexed "server" port which is served by grpcHandler.
externalGRPCServer *grpc.Server
@ -370,14 +371,17 @@ type Server struct {
// peerStreamServer is a server used to handle peering streams from external clusters.
peerStreamServer *peerstream.Server
// peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens.
peeringServer *peering.Server
peerStreamTracker *peerstream.Tracker
peeringServer *peering.Server
// xdsCapacityController controls the number of concurrent xDS streams the
// server is able to handle.
xdsCapacityController *xdscapacity.Controller
// embedded struct to hold all the enterprise specific data
EnterpriseServer
}
type connHandler interface {
Run() error
Handle(conn net.Conn)
@ -724,11 +728,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
Logger: logger.Named("grpc-api.server-discovery"),
}).Register(s.externalGRPCServer)
s.peerStreamTracker = peerstream.NewTracker()
s.peeringBackend = NewPeeringBackend(s)
s.peerStreamServer = peerstream.NewServer(peerstream.Config{
Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
GetStore: func() peerstream.StateStore { return s.FSM().State() },
Logger: logger.Named("grpc-api.peerstream"),
ACLResolver: s.ACLResolver,
@ -752,6 +754,13 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
s.grpcLeaderForwarder = flat.LeaderForwarder
go s.trackLeaderChanges()
s.xdsCapacityController = xdscapacity.NewController(xdscapacity.Config{
Logger: s.logger.Named(logging.XDSCapacityController),
GetStore: func() xdscapacity.Store { return s.fsm.State() },
SessionLimiter: flat.XDSStreamLimiter,
})
go s.xdsCapacityController.Run(&lib.StopChannelContext{StopCh: s.shutdownCh})
// Initialize Autopilot. This must happen before starting leadership monitoring
// as establishing leadership could attempt to use autopilot and cause a panic.
s.initAutopilot(config)
@ -790,7 +799,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler
p := peering.NewServer(peering.Config{
Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
Tracker: s.peerStreamServer.Tracker,
Logger: deps.Logger.Named("grpc-api.peering"),
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
// Only forward the request if the dc in the request matches the server's datacenter.
@ -1361,6 +1370,11 @@ func (s *Server) WANMembers() []serf.Member {
return s.serfWAN.Members()
}
// GetPeeringBackend is a test helper.
func (s *Server) GetPeeringBackend() peering.Backend {
return s.peeringBackend
}
// RemoveFailedNode is used to remove a failed node from the cluster.
func (s *Server) RemoveFailedNode(node string, prune bool, entMeta *acl.EnterpriseMeta) error {
var removeFn func(*serf.Serf, string) error
@ -1574,12 +1588,12 @@ func (s *Server) Stats() map[string]map[string]string {
// GetLANCoordinate returns the coordinate of the node in the LAN gossip
// pool.
//
// - Clients return a single coordinate for the single gossip pool they are
// in (default, segment, or partition).
// - Clients return a single coordinate for the single gossip pool they are
// in (default, segment, or partition).
//
// - Servers return one coordinate for their canonical gossip pool (i.e.
// default partition/segment) and one per segment they are also ancillary
// members of.
// - Servers return one coordinate for their canonical gossip pool (i.e.
// default partition/segment) and one per segment they are also ancillary
// members of.
//
// NOTE: servers do not emit coordinates for partitioned gossip pools they
// are ancillary members of.

View File

@ -159,3 +159,18 @@ func (s *Server) addEnterpriseStats(stats map[string]map[string]string) {
func getSerfMemberEnterpriseMeta(member serf.Member) *acl.EnterpriseMeta {
return structs.NodeEnterpriseMetaInDefaultPartition()
}
func addSerfMetricsLabels(conf *serf.Config, wan bool, segment string, partition string, areaID string) {
conf.MetricLabels = []metrics.Label{}
networkMetric := metrics.Label{
Name: "network",
}
if wan {
networkMetric.Value = "wan"
} else {
networkMetric.Value = "lan"
}
conf.MetricLabels = append(conf.MetricLabels, networkMetric)
}

Some files were not shown because too many files have changed in this diff Show More