Merge branch 'main' into fix-kv_entries-metric

This commit is contained in:
Max Bowsher 2022-08-29 22:22:10 +01:00
commit 3aefc4123f
278 changed files with 13773 additions and 5413 deletions

3
.changelog/11742.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
api: Add filtering support to Catalog's List Services (v1/catalog/services)
```

3
.changelog/13493.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
cli: Fix Consul kv CLI 'GET' flags 'keys' and 'recurse' to be set together
```

4
.changelog/13613.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:feature
connect: Adds a new `destination` field to the `service-default` config entry that allows routing egress traffic
through a terminating gateway in transparent proxy mode without modifying the catalog.
```

4
.changelog/13958.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:bug
connect: Ingress gateways with a wildcard service entry should no longer pick up non-connect services as upstreams.
connect: Terminating gateways with a wildcard service entry should no longer pick up connect services as upstreams.
```

3
.changelog/14021.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Fixes an issue where client side validation errors were not showing in certain areas
```

3
.changelog/14034.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
cli: When launching a sidecar proxy with `consul connect envoy` or `consul connect proxy`, the `-sidecar-for` service ID argument is now treated as case-insensitive.
```

3
.changelog/14081.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
agent: Fixes an issue where an agent that fails to start due to bad addresses won't clean up any existing listeners
```

3
.changelog/14119.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
connect: Fixed some spurious issues during peering establishment when a follower is dialed
```

3
.changelog/14149.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
agent: Fixed a compatibility issue when restoring snapshots from pre-1.13.0 versions of Consul [[GH-14107](https://github.com/hashicorp/consul/issues/14107)]
```

3
.changelog/14161.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
metrics: add labels of segment, partition, network area, network (lan or wan) to serf and memberlist metrics
```

5
.changelog/14162.txt Normal file
View File

@ -0,0 +1,5 @@
```release-note:improvement
config-entry: Validate that service-resolver `Failover`s and `Redirect`s only
specify `Partition` and `Namespace` on Consul Enterprise. This prevents scenarios
where OSS Consul would save service-resolvers that require Consul Enterprise.
```

4
.changelog/14178.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:breaking-change
xds: Convert service mesh failover to use Envoy's aggregate clusters. This
changes the names of some [Envoy dynamic HTTP metrics](https://www.envoyproxy.io/docs/envoy/latest/configuration/upstream/cluster_manager/cluster_stats#dynamic-http-statistics).
```

3
.changelog/14233.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
rpc: Adds max jitter to client deadlines to prevent i/o deadline errors on blocking queries
```

3
.changelog/14269.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
connect: Fix issue where `auto_config` and `auto_encrypt` could unintentionally enable TLS for gRPC xDS connections.
```

3
.changelog/14373.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
xds: Set `max_ejection_percent` on Envoy's outlier detection to 100% for peered services.
```

5
.changelog/14378.txt Normal file
View File

@ -0,0 +1,5 @@
```release-note:bug
api: Fix a breaking change caused by renaming `QueryDatacenterOptions` to
`QueryFailoverOptions`. This adds `QueryDatacenterOptions` back as an alias to
`QueryFailoverOptions` and marks it as deprecated.
```

View File

@ -28,6 +28,10 @@ references:
- "1.21.4"
- "1.22.2"
- "1.23.0"
nomad-versions: &supported_nomad_versions
- &default_nomad_version "1.3.3"
- "1.2.10"
- "1.1.16"
images:
# When updating the Go version, remember to also update the versions in the
# workflows section for go-test-lib jobs.
@ -105,15 +109,18 @@ commands:
type: env_var_name
default: ROLE_ARN
steps:
# Only run the assume-role command for the main repo. The AWS credentials aren't available for forks.
- run: |
export AWS_ACCESS_KEY_ID="${<< parameters.access-key >>}"
export AWS_SECRET_ACCESS_KEY="${<< parameters.secret-key >>}"
export ROLE_ARN="${<< parameters.role-arn >>}"
# assume role has duration of 15 min (the minimum allowed)
CREDENTIALS="$(aws sts assume-role --duration-seconds 900 --role-arn ${ROLE_ARN} --role-session-name build-${CIRCLE_SHA1} | jq '.Credentials')"
echo "export AWS_ACCESS_KEY_ID=$(echo $CREDENTIALS | jq -r '.AccessKeyId')" >> $BASH_ENV
echo "export AWS_SECRET_ACCESS_KEY=$(echo $CREDENTIALS | jq -r '.SecretAccessKey')" >> $BASH_ENV
echo "export AWS_SESSION_TOKEN=$(echo $CREDENTIALS | jq -r '.SessionToken')" >> $BASH_ENV
if [[ "${CIRCLE_BRANCH%%/*}/" != "pull/" ]]; then
export AWS_ACCESS_KEY_ID="${<< parameters.access-key >>}"
export AWS_SECRET_ACCESS_KEY="${<< parameters.secret-key >>}"
export ROLE_ARN="${<< parameters.role-arn >>}"
# assume role has duration of 15 min (the minimum allowed)
CREDENTIALS="$(aws sts assume-role --duration-seconds 900 --role-arn ${ROLE_ARN} --role-session-name build-${CIRCLE_SHA1} | jq '.Credentials')"
echo "export AWS_ACCESS_KEY_ID=$(echo $CREDENTIALS | jq -r '.AccessKeyId')" >> $BASH_ENV
echo "export AWS_SECRET_ACCESS_KEY=$(echo $CREDENTIALS | jq -r '.SecretAccessKey')" >> $BASH_ENV
echo "export AWS_SESSION_TOKEN=$(echo $CREDENTIALS | jq -r '.SessionToken')" >> $BASH_ENV
fi
run-go-test-full:
parameters:
@ -560,17 +567,20 @@ jobs:
- run: make ci.dev-docker
- run: *notify-slack-failure
# Nomad 0.8 builds on go1.10
# Run integration tests on nomad/v0.8.7
nomad-integration-0_8:
nomad-integration-test: &NOMAD_TESTS
docker:
- image: docker.mirror.hashicorp.services/cimg/go:1.10
- image: docker.mirror.hashicorp.services/cimg/go:1.19
parameters:
nomad-version:
type: enum
enum: *supported_nomad_versions
default: *default_nomad_version
environment:
<<: *ENVIRONMENT
NOMAD_WORKING_DIR: &NOMAD_WORKING_DIR /home/circleci/go/src/github.com/hashicorp/nomad
NOMAD_VERSION: v0.8.7
NOMAD_VERSION: << parameters.nomad-version >>
steps: &NOMAD_INTEGRATION_TEST_STEPS
- run: git clone https://github.com/hashicorp/nomad.git --branch ${NOMAD_VERSION} ${NOMAD_WORKING_DIR}
- run: git clone https://github.com/hashicorp/nomad.git --branch v${NOMAD_VERSION} ${NOMAD_WORKING_DIR}
# get consul binary
- attach_workspace:
@ -601,16 +611,6 @@ jobs:
path: *TEST_RESULTS_DIR
- run: *notify-slack-failure
# run integration tests on nomad/main
nomad-integration-main:
docker:
- image: docker.mirror.hashicorp.services/cimg/go:1.18
environment:
<<: *ENVIRONMENT
NOMAD_WORKING_DIR: /home/circleci/go/src/github.com/hashicorp/nomad
NOMAD_VERSION: main
steps: *NOMAD_INTEGRATION_TEST_STEPS
# build frontend yarn cache
frontend-cache:
docker:
@ -1117,12 +1117,12 @@ workflows:
- dev-upload-docker:
<<: *dev-upload
context: consul-ci
- nomad-integration-main:
requires:
- dev-build
- nomad-integration-0_8:
- nomad-integration-test:
requires:
- dev-build
matrix:
parameters:
nomad-version: *supported_nomad_versions
- envoy-integration-test:
requires:
- dev-build

View File

@ -6,6 +6,7 @@ on:
branches:
# Push events on the main branch
- main
- release/**
env:
PKG_NAME: consul
@ -409,8 +410,8 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
arch: ["i386", "x86_64", "armv7hl", "aarch64"]
# fail-fast: true
# TODO(eculver): re-enable when there is a smaller verification container available
arch: ["i386", "x86_64"] #, "armv7hl", "aarch64"]
env:
version: ${{ needs.get-product-version.outputs.product-version }}

View File

@ -11,10 +11,7 @@ project "consul" {
repository = "consul"
release_branches = [
"main",
"release/1.9.x",
"release/1.10.x",
"release/1.11.x",
"release/1.12.x",
"release/**",
]
}
}
@ -265,3 +262,16 @@ event "promote-production-packaging" {
on = "always"
}
}
event "post-publish-website" {
depends = ["promote-production-packaging"]
action "post-publish-website" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "post-publish-website"
}
notification {
on = "always"
}
}

View File

@ -1,3 +1,91 @@
## 1.13.1 (August 11, 2022)
BUG FIXES:
* agent: Fixed a compatibility issue when restoring snapshots from pre-1.13.0 versions of Consul [[GH-14107](https://github.com/hashicorp/consul/issues/14107)] [[GH-14149](https://github.com/hashicorp/consul/issues/14149)]
* connect: Fixed some spurious issues during peering establishment when a follower is dialed [[GH-14119](https://github.com/hashicorp/consul/issues/14119)]
## 1.12.4 (August 11, 2022)
BUG FIXES:
* cli: when `acl token read` is used with the `-self` and `-expanded` flags, return an error instead of panicking [[GH-13787](https://github.com/hashicorp/consul/issues/13787)]
* connect: Fixed a goroutine/memory leak that would occur when using the ingress gateway. [[GH-13847](https://github.com/hashicorp/consul/issues/13847)]
* connect: Ingress gateways with a wildcard service entry should no longer pick up non-connect services as upstreams.
connect: Terminating gateways with a wildcard service entry should no longer pick up connect services as upstreams. [[GH-13958](https://github.com/hashicorp/consul/issues/13958)]
* ui: Fixes an issue where client side validation errors were not showing in certain areas [[GH-14021](https://github.com/hashicorp/consul/issues/14021)]
## 1.11.8 (August 11, 2022)
BUG FIXES:
* connect: Fixed a goroutine/memory leak that would occur when using the ingress gateway. [[GH-13847](https://github.com/hashicorp/consul/issues/13847)]
* connect: Ingress gateways with a wildcard service entry should no longer pick up non-connect services as upstreams.
connect: Terminating gateways with a wildcard service entry should no longer pick up connect services as upstreams. [[GH-13958](https://github.com/hashicorp/consul/issues/13958)]
## 1.13.0 (August 9, 2022)
BREAKING CHANGES:
* config-entry: Exporting a specific service name across all namespace is invalid.
* connect: contains an upgrade compatibility issue when restoring snapshots containing service mesh proxy registrations from pre-1.13 versions of Consul [[GH-14107](https://github.com/hashicorp/consul/issues/14107)]. Fixed in 1.13.1 [[GH-14149](https://github.com/hashicorp/consul/issues/14149)]. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#all-service-mesh-deployments) for more information.
* connect: if using auto-encrypt or auto-config, TLS is required for gRPC communication between Envoy and Consul as of 1.13.0; this TLS for gRPC requirement will be removed in a future 1.13 patch release. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#service-mesh-deployments-using-auto-encrypt-or-auto-config) for more information.
* connect: if a pre-1.13 Consul agent's HTTPS port was not enabled, upgrading to 1.13 may turn on TLS for gRPC communication for Envoy and Consul depending on the agent's TLS configuration. Refer to [1.13 upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#grpc-tls) for more information.
* connect: Removes support for Envoy 1.19 [[GH-13807](https://github.com/hashicorp/consul/issues/13807)]
* telemetry: config flag `telemetry { disable_compat_1.9 = (true|false) }` has been removed. Before upgrading you should remove this flag from your config if the flag is being used. [[GH-13532](https://github.com/hashicorp/consul/issues/13532)]
FEATURES:
* **Cluster Peering (Beta)** This version adds a new model to federate Consul clusters for both service mesh and traditional service discovery. Cluster peering allows for service interconnectivity with looser coupling than the existing WAN federation. For more information refer to the [cluster peering](https://www.consul.io/docs/connect/cluster-peering) documentation.
* **Transparent proxying through terminating gateways** This version adds egress traffic control to destinations outside of Consul's catalog, such as APIs on the public internet. Transparent proxies can dial [destinations defined in service-defaults](https://www.consul.io/docs/connect/config-entries/service-defaults#destination) and have the traffic routed through terminating gateways. For more information refer to the [terminating gateway](https://www.consul.io/docs/connect/gateways/terminating-gateway#terminating-gateway-configuration) documentation.
* acl: It is now possible to login and logout using the gRPC API [[GH-12935](https://github.com/hashicorp/consul/issues/12935)]
* agent: Added information about build date alongside other version information for Consul. Extended /agent/self endpoint and `consul version` commands
to report this. Agent also reports build date in log on startup. [[GH-13357](https://github.com/hashicorp/consul/issues/13357)]
* ca: Leaf certificates can now be obtained via the gRPC API: `Sign` [[GH-12787](https://github.com/hashicorp/consul/issues/12787)]
* checks: add UDP health checks.. [[GH-12722](https://github.com/hashicorp/consul/issues/12722)]
* cli: A new flag for config delete to delete a config entry in a
valid config file, e.g., config delete -filename intention-allow.hcl [[GH-13677](https://github.com/hashicorp/consul/issues/13677)]
* connect: Adds a new `destination` field to the `service-default` config entry that allows routing egress traffic
through a terminating gateway in transparent proxy mode without modifying the catalog. [[GH-13613](https://github.com/hashicorp/consul/issues/13613)]
* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-12825](https://github.com/hashicorp/consul/issues/12825)]
* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-1717](https://github.com/hashicorp/consul/issues/1717)]
* grpc: New gRPC service and endpoint to return the list of supported consul dataplane features [[GH-12695](https://github.com/hashicorp/consul/issues/12695)]
* server: broadcast the public grpc port using lan serf and update the consul service in the catalog with the same data [[GH-13687](https://github.com/hashicorp/consul/issues/13687)]
* streaming: Added topic that can be used to consume updates about the list of services in a datacenter [[GH-13722](https://github.com/hashicorp/consul/issues/13722)]
* streaming: Added topics for `ingress-gateway`, `mesh`, `service-intentions` and `service-resolver` config entry events. [[GH-13658](https://github.com/hashicorp/consul/issues/13658)]
IMPROVEMENTS:
* api: `merge-central-config` query parameter support added to `/catalog/node-services/:node-name` API, to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13450](https://github.com/hashicorp/consul/issues/13450)]
* api: `merge-central-config` query parameter support added to `/catalog/node-services/:node-name` API, to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-2046](https://github.com/hashicorp/consul/issues/2046)]
* api: `merge-central-config` query parameter support added to some catalog and health endpoints to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13001](https://github.com/hashicorp/consul/issues/13001)]
* api: add the ability to specify a path prefix for when consul is behind a reverse proxy or API gateway [[GH-12914](https://github.com/hashicorp/consul/issues/12914)]
* catalog: Add per-node indexes to reduce watchset firing for unrelated nodes and services. [[GH-12399](https://github.com/hashicorp/consul/issues/12399)]
* connect: add validation to ensure connect native services have a port or socketpath specified on catalog registration.
This was the only missing piece to ensure all mesh services are validated for a port (or socketpath) specification on catalog registration. [[GH-12881](https://github.com/hashicorp/consul/issues/12881)]
* ui: Add new CopyableCode component and use it in certain pre-existing areas [[GH-13686](https://github.com/hashicorp/consul/issues/13686)]
* acl: Clarify node/service identities must be lowercase [[GH-12807](https://github.com/hashicorp/consul/issues/12807)]
* command: Add support for enabling TLS in the Envoy Prometheus endpoint via the `consul connect envoy` command.
Adds the `-prometheus-ca-file`, `-prometheus-ca-path`, `-prometheus-cert-file` and `-prometheus-key-file` flags. [[GH-13481](https://github.com/hashicorp/consul/issues/13481)]
* connect: Add Envoy 1.23.0 to support matrix [[GH-13807](https://github.com/hashicorp/consul/issues/13807)]
* connect: Added a `max_inbound_connections` setting to service-defaults for limiting the number of concurrent inbound connections to each service instance. [[GH-13143](https://github.com/hashicorp/consul/issues/13143)]
* grpc: Add a new ServerDiscovery.WatchServers gRPC endpoint for being notified when the set of ready servers has changed. [[GH-12819](https://github.com/hashicorp/consul/issues/12819)]
* telemetry: Added `consul.raft.thread.main.saturation` and `consul.raft.thread.fsm.saturation` metrics to measure approximate saturation of the Raft goroutines [[GH-12865](https://github.com/hashicorp/consul/issues/12865)]
* ui: removed external dependencies for serving UI assets in favor of Go's native embed capabilities [[GH-10996](https://github.com/hashicorp/consul/issues/10996)]
* ui: upgrade ember-composable-helpers to v5.x [[GH-13394](https://github.com/hashicorp/consul/issues/13394)]
BUG FIXES:
* acl: Fixed a bug where the ACL down policy wasn't being applied on remote errors from the primary datacenter. [[GH-12885](https://github.com/hashicorp/consul/issues/12885)]
* cli: when `acl token read` is used with the `-self` and `-expanded` flags, return an error instead of panicking [[GH-13787](https://github.com/hashicorp/consul/issues/13787)]
* connect: Fixed a goroutine/memory leak that would occur when using the ingress gateway. [[GH-13847](https://github.com/hashicorp/consul/issues/13847)]
* connect: Ingress gateways with a wildcard service entry should no longer pick up non-connect services as upstreams.
connect: Terminating gateways with a wildcard service entry should no longer pick up connect services as upstreams. [[GH-13958](https://github.com/hashicorp/consul/issues/13958)]
* proxycfg: Fixed a minor bug that would cause configuring a terminating gateway to watch too many service resolvers and waste resources doing filtering. [[GH-13012](https://github.com/hashicorp/consul/issues/13012)]
* raft: upgrade to v1.3.8 which fixes a bug where non cluster member can still be able to participate in an election. [[GH-12844](https://github.com/hashicorp/consul/issues/12844)]
* serf: upgrade serf to v0.9.8 which fixes a bug that crashes Consul when serf keyrings are listed [[GH-13062](https://github.com/hashicorp/consul/issues/13062)]
* ui: Fixes an issue where client side validation errors were not showing in certain areas [[GH-14021](https://github.com/hashicorp/consul/issues/14021)]
## 1.12.3 (July 13, 2022)
IMPROVEMENTS:
@ -36,61 +124,6 @@ BUG FIXES:
* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13264](https://github.com/hashicorp/consul/issues/13264)]
* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)]
## 1.13.0-alpha2 (June 21, 2022)
IMPROVEMENTS:
* api: `merge-central-config` query parameter support added to `/catalog/node-services/:node-name` API, to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13450](https://github.com/hashicorp/consul/issues/13450)]
* connect: Update Envoy support matrix to latest patch releases (1.22.2, 1.21.3, 1.20.4, 1.19.5) [[GH-13431](https://github.com/hashicorp/consul/issues/13431)]
BUG FIXES:
* ui: Fix incorrect text on certain page empty states [[GH-13409](https://github.com/hashicorp/consul/issues/13409)]
## 1.13.0-alpha1 (June 15, 2022)
BREAKING CHANGES:
* config-entry: Exporting a specific service name across all namespace is invalid.
FEATURES:
* acl: It is now possible to login and logout using the gRPC API [[GH-12935](https://github.com/hashicorp/consul/issues/12935)]
* agent: Added information about build date alongside other version information for Consul. Extended /agent/self endpoint and `consul version` commands
to report this. Agent also reports build date in log on startup. [[GH-13357](https://github.com/hashicorp/consul/issues/13357)]
* ca: Leaf certificates can now be obtained via the gRPC API: `Sign` [[GH-12787](https://github.com/hashicorp/consul/issues/12787)]
* checks: add UDP health checks.. [[GH-12722](https://github.com/hashicorp/consul/issues/12722)]
* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-12825](https://github.com/hashicorp/consul/issues/12825)]
* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-1717](https://github.com/hashicorp/consul/issues/1717)]
* grpc: New gRPC service and endpoint to return the list of supported consul dataplane features [[GH-12695](https://github.com/hashicorp/consul/issues/12695)]
IMPROVEMENTS:
* api: `merge-central-config` query parameter support added to some catalog and health endpoints to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13001](https://github.com/hashicorp/consul/issues/13001)]
* api: add the ability to specify a path prefix for when consul is behind a reverse proxy or API gateway [[GH-12914](https://github.com/hashicorp/consul/issues/12914)]
* connect: add validation to ensure connect native services have a port or socketpath specified on catalog registration.
This was the only missing piece to ensure all mesh services are validated for a port (or socketpath) specification on catalog registration. [[GH-12881](https://github.com/hashicorp/consul/issues/12881)]
* Support Vault namespaces in Connect CA by adding RootPKINamespace and
IntermediatePKINamespace fields to the config. [[GH-12904](https://github.com/hashicorp/consul/issues/12904)]
* acl: Clarify node/service identities must be lowercase [[GH-12807](https://github.com/hashicorp/consul/issues/12807)]
* connect: Added a `max_inbound_connections` setting to service-defaults for limiting the number of concurrent inbound connections to each service instance. [[GH-13143](https://github.com/hashicorp/consul/issues/13143)]
* dns: Added support for specifying admin partition in node lookups. [[GH-13421](https://github.com/hashicorp/consul/issues/13421)]
* grpc: Add a new ServerDiscovery.WatchServers gRPC endpoint for being notified when the set of ready servers has changed. [[GH-12819](https://github.com/hashicorp/consul/issues/12819)]
* telemetry: Added `consul.raft.thread.main.saturation` and `consul.raft.thread.fsm.saturation` metrics to measure approximate saturation of the Raft goroutines [[GH-12865](https://github.com/hashicorp/consul/issues/12865)]
* telemetry: Added a `consul.server.isLeader` metric to track if a server is a leader or not. [[GH-13304](https://github.com/hashicorp/consul/issues/13304)]
* ui: removed external dependencies for serving UI assets in favor of Go's native embed capabilities [[GH-10996](https://github.com/hashicorp/consul/issues/10996)]
* ui: upgrade ember-composable-helpers to v5.x [[GH-13394](https://github.com/hashicorp/consul/issues/13394)]
BUG FIXES:
* acl: Fixed a bug where the ACL down policy wasn't being applied on remote errors from the primary datacenter. [[GH-12885](https://github.com/hashicorp/consul/issues/12885)]
* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13256](https://github.com/hashicorp/consul/issues/13256)]
* deps: Update go-grpc/grpc, resolving connection memory leak [[GH-13051](https://github.com/hashicorp/consul/issues/13051)]
* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)]
* proxycfg: Fixed a minor bug that would cause configuring a terminating gateway to watch too many service resolvers and waste resources doing filtering. [[GH-13012](https://github.com/hashicorp/consul/issues/13012)]
* raft: upgrade to v1.3.8 which fixes a bug where non cluster member can still be able to participate in an election. [[GH-12844](https://github.com/hashicorp/consul/issues/12844)]
* serf: upgrade serf to v0.9.8 which fixes a bug that crashes Consul when serf keyrings are listed [[GH-13062](https://github.com/hashicorp/consul/issues/13062)]
## 1.12.2 (June 3, 2022)
BUG FIXES:
@ -914,6 +947,24 @@ NOTES:
* legal: **(Enterprise only)** Enterprise binary downloads will now include a copy of the EULA and Terms of Evaluation in the zip archive
## 1.9.17 (April 13, 2022)
SECURITY:
* agent: Added a new check field, `disable_redirects`, that allows for disabling the following of redirects for HTTP checks. The intention is to default this to true in a future release so that redirects must explicitly be enabled. [[GH-12685](https://github.com/hashicorp/consul/issues/12685)]
* connect: Properly set SNI when configured for services behind a terminating gateway. [[GH-12672](https://github.com/hashicorp/consul/issues/12672)]
DEPRECATIONS:
* tls: With the upgrade to Go 1.17, the ordering of `tls_cipher_suites` will no longer be honored, and `tls_prefer_server_cipher_suites` is now ignored. [[GH-12767](https://github.com/hashicorp/consul/issues/12767)]
BUG FIXES:
* connect/ca: cancel old Vault renewal on CA configuration. Provide a 1 - 6 second backoff on repeated token renewal requests to prevent overwhelming Vault. [[GH-12607](https://github.com/hashicorp/consul/issues/12607)]
* memberlist: fixes a bug which prevented members from joining a cluster with
large amounts of churn [[GH-253](https://github.com/hashicorp/memberlist/issues/253)] [[GH-12046](https://github.com/hashicorp/consul/issues/12046)]
* replication: Fixed a bug which could prevent ACL replication from continuing successfully after a leader election. [[GH-12565](https://github.com/hashicorp/consul/issues/12565)]
## 1.9.16 (February 28, 2022)
FEATURES:

View File

@ -22,10 +22,11 @@ LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
org.opencontainers.image.url="https://www.consul.io/" \
org.opencontainers.image.documentation="https://www.consul.io/docs" \
org.opencontainers.image.source="https://github.com/hashicorp/consul" \
org.opencontainers.image.version=$VERSION \
org.opencontainers.image.version=${VERSION} \
org.opencontainers.image.vendor="HashiCorp" \
org.opencontainers.image.title="consul" \
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \
version=${VERSION}
# This is the location of the releases.
ENV HASHICORP_RELEASES=https://releases.hashicorp.com
@ -110,13 +111,13 @@ CMD ["agent", "-dev", "-client", "0.0.0.0"]
# Remember, this image cannot be built locally.
FROM docker.mirror.hashicorp.services/alpine:3.15 as default
ARG VERSION
ARG PRODUCT_VERSION
ARG BIN_NAME
# PRODUCT_NAME and PRODUCT_VERSION are the name of the software on releases.hashicorp.com
# and the version to download. Example: PRODUCT_NAME=consul PRODUCT_VERSION=1.2.3.
ENV BIN_NAME=$BIN_NAME
ENV VERSION=$VERSION
ENV PRODUCT_VERSION=$PRODUCT_VERSION
ARG PRODUCT_REVISION
ARG PRODUCT_NAME=$BIN_NAME
@ -128,10 +129,11 @@ LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
org.opencontainers.image.url="https://www.consul.io/" \
org.opencontainers.image.documentation="https://www.consul.io/docs" \
org.opencontainers.image.source="https://github.com/hashicorp/consul" \
org.opencontainers.image.version=$VERSION \
org.opencontainers.image.version=${PRODUCT_VERSION} \
org.opencontainers.image.vendor="HashiCorp" \
org.opencontainers.image.title="consul" \
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \
version=${PRODUCT_VERSION}
# Set up certificates and base tools.
# libc6-compat is needed to symlink the shared libraries for ARM builds
@ -217,10 +219,11 @@ LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
org.opencontainers.image.url="https://www.consul.io/" \
org.opencontainers.image.documentation="https://www.consul.io/docs" \
org.opencontainers.image.source="https://github.com/hashicorp/consul" \
org.opencontainers.image.version=$VERSION \
org.opencontainers.image.version=${PRODUCT_VERSION} \
org.opencontainers.image.vendor="HashiCorp" \
org.opencontainers.image.title="consul" \
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \
version=${PRODUCT_VERSION}
# Copy license for Red Hat certification.
COPY LICENSE /licenses/mozilla.txt
@ -284,4 +287,4 @@ USER 100
# By default you'll get an insecure single-node development server that stores
# everything in RAM, exposes a web UI and HTTP endpoints, and bootstraps itself.
# Don't use this configuration for production.
CMD ["agent", "-dev", "-client", "0.0.0.0"]
CMD ["agent", "-dev", "-client", "0.0.0.0"]

View File

@ -16,6 +16,7 @@ PROTOC_GO_INJECT_TAG_VERSION='v1.3.0'
GOTAGS ?=
GOPATH=$(shell go env GOPATH)
GOARCH?=$(shell go env GOARCH)
MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1)
export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH)
@ -152,7 +153,28 @@ dev-docker: linux
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
# 'consul:local' tag is needed to run the integration tests
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
@docker buildx use default && docker buildx build -t 'consul:local' \
--platform linux/$(GOARCH) \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--load \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
check-remote-dev-image-env:
ifndef REMOTE_DEV_IMAGE
$(error REMOTE_DEV_IMAGE is undefined: set this image to <your_docker_repo>/<your_docker_image>:<image_tag>, e.g. hashicorp/consul-k8s-dev:latest)
endif
remote-docker: check-remote-dev-image-env
$(MAKE) GOARCH=amd64 linux
$(MAKE) GOARCH=arm64 linux
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building and Pushing Consul Development container - $(REMOTE_DEV_IMAGE)"
@docker buildx use default && docker buildx build -t '$(REMOTE_DEV_IMAGE)' \
--platform linux/amd64,linux/arm64 \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--push \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
# should only run in CI and not locally.
@ -174,10 +196,10 @@ ifeq ($(CIRCLE_BRANCH), main)
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
endif
# linux builds a linux binary independent of the source platform
# linux builds a linux binary compatible with the source platform
linux:
@mkdir -p ./pkg/bin/linux_amd64
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./pkg/bin/linux_amd64 -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
@mkdir -p ./pkg/bin/linux_$(GOARCH)
CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o ./pkg/bin/linux_$(GOARCH) -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
# dist builds binaries for all platforms and packages them for distribution
dist:

View File

@ -863,8 +863,18 @@ func (a *Agent) listenAndServeDNS() error {
return merr.ErrorOrNil()
}
// startListeners will return a net.Listener for every address unless an
// error is encountered, in which case it will close all previously opened
// listeners and return the error.
func (a *Agent) startListeners(addrs []net.Addr) ([]net.Listener, error) {
var ln []net.Listener
var lns []net.Listener
closeAll := func() {
for _, l := range lns {
l.Close()
}
}
for _, addr := range addrs {
var l net.Listener
var err error
@ -873,22 +883,25 @@ func (a *Agent) startListeners(addrs []net.Addr) ([]net.Listener, error) {
case *net.UnixAddr:
l, err = a.listenSocket(x.Name)
if err != nil {
closeAll()
return nil, err
}
case *net.TCPAddr:
l, err = net.Listen("tcp", x.String())
if err != nil {
closeAll()
return nil, err
}
l = &tcpKeepAliveListener{l.(*net.TCPListener)}
default:
closeAll()
return nil, fmt.Errorf("unsupported address type %T", addr)
}
ln = append(ln, l)
lns = append(lns, l)
}
return ln, nil
return lns, nil
}
// listenHTTP binds listeners to the provided addresses and also returns

View File

@ -1123,8 +1123,8 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid Service Meta: %v", err)}
}
// Run validation. This is the same validation that would happen on
// the catalog endpoint so it helps ensure the sync will work properly.
// Run validation. This same validation would happen on the catalog endpoint,
// so it helps ensure the sync will work properly.
if err := ns.Validate(); err != nil {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Validation failed: %v", err.Error())}
}
@ -1164,7 +1164,7 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid SidecarService: %s", err)}
}
if sidecar != nil {
if err := sidecar.Validate(); err != nil {
if err := sidecar.ValidateForAgent(); err != nil {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Failed Validation: %v", err.Error())}
}
// Make sure we are allowed to register the sidecar using the token

View File

@ -6799,7 +6799,7 @@ func TestAgentConnectCALeafCert_good(t *testing.T) {
ca2 := connect.TestCAConfigSet(t, a, nil)
// Issue a blocking query to ensure that the cert gets updated appropriately
{
t.Run("test blocking queries update leaf cert", func(t *testing.T) {
resp := httptest.NewRecorder()
req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+index, nil)
a.srv.h.ServeHTTP(resp, req)
@ -6815,7 +6815,7 @@ func TestAgentConnectCALeafCert_good(t *testing.T) {
// Should not be a cache hit! The data was updated in response to the blocking
// query being made.
require.Equal(t, "MISS", resp.Header().Get("X-Cache"))
}
})
t.Run("test non-blocking queries update leaf cert", func(t *testing.T) {
resp := httptest.NewRecorder()
@ -6834,33 +6834,26 @@ func TestAgentConnectCALeafCert_good(t *testing.T) {
// Set a new CA
ca3 := connect.TestCAConfigSet(t, a, nil)
resp := httptest.NewRecorder()
req, err := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
require.NoError(t, err)
obj, err = a.srv.AgentConnectCALeafCert(resp, req)
require.NoError(t, err)
issued2 := obj.(*structs.IssuedCert)
require.NotEqual(t, issued.CertPEM, issued2.CertPEM)
require.NotEqual(t, issued.PrivateKeyPEM, issued2.PrivateKeyPEM)
// Verify that the cert is signed by the new CA
requireLeafValidUnderCA(t, issued2, ca3)
// Should not be a cache hit!
require.Equal(t, "MISS", resp.Header().Get("X-Cache"))
}
// Test caching for the leaf cert
{
for fetched := 0; fetched < 4; fetched++ {
// Fetch it again
retry.Run(t, func(r *retry.R) {
resp := httptest.NewRecorder()
obj2, err := a.srv.AgentConnectCALeafCert(resp, req)
require.NoError(t, err)
require.Equal(t, obj, obj2)
}
a.srv.h.ServeHTTP(resp, req)
// Should not be a cache hit!
require.Equal(r, "MISS", resp.Header().Get("X-Cache"))
dec := json.NewDecoder(resp.Body)
issued2 := &structs.IssuedCert{}
require.NoError(r, dec.Decode(issued2))
require.NotEqual(r, issued.CertPEM, issued2.CertPEM)
require.NotEqual(r, issued.PrivateKeyPEM, issued2.PrivateKeyPEM)
// Verify that the cert is signed by the new CA
requireLeafValidUnderCA(r, issued2, ca3)
})
}
})
}
@ -7405,7 +7398,7 @@ func waitForActiveCARoot(t *testing.T, srv *HTTPHandlers, expect *structs.CARoot
})
}
func requireLeafValidUnderCA(t *testing.T, issued *structs.IssuedCert, ca *structs.CARoot) {
func requireLeafValidUnderCA(t require.TestingT, issued *structs.IssuedCert, ca *structs.CARoot) {
leaf, intermediates, err := connect.ParseLeafCerts(issued.CertPEM)
require.NoError(t, err)

View File

@ -5857,6 +5857,73 @@ func Test_coalesceTimerTwoPeriods(t *testing.T) {
}
func TestAgent_startListeners(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
ports := freeport.GetN(t, 3)
bd := BaseDeps{
Deps: consul.Deps{
Logger: hclog.NewInterceptLogger(nil),
Tokens: new(token.Store),
GRPCConnPool: &fakeGRPCConnPool{},
},
RuntimeConfig: &config.RuntimeConfig{
HTTPAddrs: []net.Addr{},
},
Cache: cache.New(cache.Options{}),
}
bd, err := initEnterpriseBaseDeps(bd, nil)
require.NoError(t, err)
agent, err := New(bd)
require.NoError(t, err)
// use up an address
used := net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[2]}
l, err := net.Listen("tcp", used.String())
require.NoError(t, err)
t.Cleanup(func() { l.Close() })
var lns []net.Listener
t.Cleanup(func() {
for _, ln := range lns {
ln.Close()
}
})
// first two addresses open listeners but third address should fail
lns, err = agent.startListeners([]net.Addr{
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[0]},
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[1]},
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[2]},
})
require.Contains(t, err.Error(), "address already in use")
// first two ports should be freed up
retry.Run(t, func(r *retry.R) {
lns, err = agent.startListeners([]net.Addr{
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[0]},
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[1]},
})
require.NoError(r, err)
require.Len(r, lns, 2)
})
// first two ports should be in use
retry.Run(t, func(r *retry.R) {
_, err = agent.startListeners([]net.Addr{
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[0]},
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[1]},
})
require.Contains(r, err.Error(), "address already in use")
})
}
func getExpectedCaPoolByFile(t *testing.T) *x509.CertPool {
pool := x509.NewCertPool()
data, err := ioutil.ReadFile("../test/ca/root.cer")

View File

@ -8,6 +8,8 @@ import (
"github.com/mitchellh/hashstructure"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/hashicorp/consul/agent/cache"
external "github.com/hashicorp/consul/agent/grpc-external"
@ -87,6 +89,13 @@ func (t *TrustBundles) Fetch(_ cache.FetchOptions, req cache.Request) (cache.Fet
// Fetch
reply, err := t.Client.TrustBundleListByService(external.ContextWithToken(context.Background(), reqReal.Token), reqReal.Request)
if err != nil {
// Return an empty result if the error is due to peering being disabled.
// This allows mesh gateways to receive an update and confirm that the watch is set.
if e, ok := status.FromError(err); ok && e.Code() == codes.FailedPrecondition {
result.Index = 1
result.Value = &pbpeering.TrustBundleListByServiceResponse{Index: 1}
return result, nil
}
return result, err
}

View File

@ -7,6 +7,8 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/proto/pbpeering"
@ -48,6 +50,29 @@ func TestTrustBundles(t *testing.T) {
}, result)
}
func TestTrustBundles_PeeringDisabled(t *testing.T) {
client := NewMockTrustBundleLister(t)
typ := &TrustBundles{Client: client}
var resp *pbpeering.TrustBundleListByServiceResponse
// Expect the proper call.
// This also returns the canned response above.
client.On("TrustBundleListByService", mock.Anything, mock.Anything).
Return(resp, grpcstatus.Error(codes.FailedPrecondition, "peering must be enabled to use this endpoint"))
// Fetch and assert against the result.
result, err := typ.Fetch(cache.FetchOptions{}, &TrustBundleListRequest{
Request: &pbpeering.TrustBundleListByServiceRequest{
ServiceName: "foo",
},
})
require.NoError(t, err)
require.NotNil(t, result)
require.EqualValues(t, 1, result.Index)
require.NotNil(t, result.Value)
}
func TestTrustBundles_badReqType(t *testing.T) {
client := pbpeering.NewPeeringServiceClient(nil)
typ := &TrustBundles{Client: client}

View File

@ -2531,10 +2531,9 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza")
}
// TLS is only enabled on the gRPC listener if there's an HTTPS port configured
// for historic and backwards-compatibility reasons.
if rt.HTTPSPort <= 0 && (t.GRPC != TLSProtocolConfig{} && t.GRPCModifiedByDeprecatedConfig == nil) {
b.warn("tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)")
// And UseAutoCert right now only applies to external gRPC interface.
if t.Defaults.UseAutoCert != nil || t.HTTPS.UseAutoCert != nil || t.InternalRPC.UseAutoCert != nil {
return c, errors.New("use_auto_cert is only valid in the tls.grpc stanza")
}
defaultTLSMinVersion := b.tlsVersion("tls.defaults.tls_min_version", t.Defaults.TLSMinVersion)
@ -2591,6 +2590,7 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
mapCommon("https", t.HTTPS, &c.HTTPS)
mapCommon("grpc", t.GRPC, &c.GRPC)
c.GRPC.UseAutoCert = boolValWithDefault(t.GRPC.UseAutoCert, false)
c.ServerName = rt.ServerName
c.NodeName = rt.NodeName

View File

@ -611,7 +611,7 @@ type Connect struct {
MeshGatewayWANFederationEnabled *bool `mapstructure:"enable_mesh_gateway_wan_federation"`
EnableServerlessPlugin *bool `mapstructure:"enable_serverless_plugin"`
// TestCALeafRootChangeSpread controls how long after a CA roots change before new leaft certs will be generated.
// TestCALeafRootChangeSpread controls how long after a CA roots change before new leaf certs will be generated.
// This is only tuned in tests, generally set to 1ns to make tests deterministic with when to expect updated leaf
// certs by. This configuration is not exposed to users (not documented, and agent/config/default.go will override it)
TestCALeafRootChangeSpread *string `mapstructure:"test_ca_leaf_root_change_spread"`
@ -867,6 +867,7 @@ type TLSProtocolConfig struct {
VerifyIncoming *bool `mapstructure:"verify_incoming"`
VerifyOutgoing *bool `mapstructure:"verify_outgoing"`
VerifyServerHostname *bool `mapstructure:"verify_server_hostname"`
UseAutoCert *bool `mapstructure:"use_auto_cert"`
}
type TLS struct {

View File

@ -104,9 +104,6 @@ func DefaultSource() Source {
kv_max_value_size = ` + strconv.FormatInt(raft.SuggestedMaxDataSize, 10) + `
txn_max_req_len = ` + strconv.FormatInt(raft.SuggestedMaxDataSize, 10) + `
}
peering = {
enabled = true
}
performance = {
leave_drain_time = "5s"
raft_multiplier = ` + strconv.Itoa(int(consul.DefaultRaftMultiplier)) + `

View File

@ -5516,7 +5516,70 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
},
})
run(t, testCase{
desc: "tls.grpc without ports.https",
desc: "tls.grpc.use_auto_cert defaults to false",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
"tls": {
"grpc": {}
}
}
`},
hcl: []string{`
tls {
grpc {}
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
run(t, testCase{
desc: "tls.grpc.use_auto_cert defaults to false (II)",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
"tls": {}
}
`},
hcl: []string{`
tls {
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
run(t, testCase{
desc: "tls.grpc.use_auto_cert defaults to false (III)",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
}
`},
hcl: []string{`
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
run(t, testCase{
desc: "tls.grpc.use_auto_cert enabled when true",
args: []string{
`-data-dir=` + dataDir,
},
@ -5524,7 +5587,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
{
"tls": {
"grpc": {
"cert_file": "cert-1234"
"use_auto_cert": true
}
}
}
@ -5532,30 +5595,43 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
hcl: []string{`
tls {
grpc {
cert_file = "cert-1234"
use_auto_cert = true
}
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.CertFile = "cert-1234"
},
expectedWarnings: []string{
"tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)",
rt.TLS.GRPC.UseAutoCert = true
},
})
run(t, testCase{
desc: "peering.enabled defaults to true",
desc: "tls.grpc.use_auto_cert disabled when false",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
"tls": {
"grpc": {
"use_auto_cert": false
}
}
}
`},
hcl: []string{`
tls {
grpc {
use_auto_cert = false
}
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.PeeringEnabled = true
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
}
@ -6350,6 +6426,7 @@ func TestLoad_FullConfig(t *testing.T) {
TLSMinVersion: types.TLSv1_0,
CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA},
VerifyOutgoing: false,
UseAutoCert: true,
},
HTTPS: tlsutil.ProtocolConfig{
VerifyIncoming: true,

View File

@ -374,7 +374,8 @@
"TLSMinVersion": "",
"VerifyIncoming": false,
"VerifyOutgoing": false,
"VerifyServerHostname": false
"VerifyServerHostname": false,
"UseAutoCert": false
},
"HTTPS": {
"CAFile": "",
@ -385,7 +386,8 @@
"TLSMinVersion": "",
"VerifyIncoming": false,
"VerifyOutgoing": false,
"VerifyServerHostname": false
"VerifyServerHostname": false,
"UseAutoCert": false
},
"InternalRPC": {
"CAFile": "",
@ -396,7 +398,8 @@
"TLSMinVersion": "",
"VerifyIncoming": false,
"VerifyOutgoing": false,
"VerifyServerHostname": false
"VerifyServerHostname": false,
"UseAutoCert": false
},
"NodeName": "",
"ServerName": ""
@ -466,4 +469,4 @@
"VersionMetadata": "",
"VersionPrerelease": "",
"Watches": []
}
}

View File

@ -697,6 +697,7 @@ tls {
tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
tls_min_version = "TLSv1_0"
verify_incoming = true
use_auto_cert = true
}
}
tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"

View File

@ -692,7 +692,8 @@
"key_file": "1y4prKjl",
"tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
"tls_min_version": "TLSv1_0",
"verify_incoming": true
"verify_incoming": true,
"use_auto_cert": true
}
},
"tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",

View File

@ -178,20 +178,43 @@ func TestQuerySNI(t *testing.T) {
func TestTargetSNI(t *testing.T) {
// empty namespace, empty subset
require.Equal(t, "api.default.foo."+testTrustDomainSuffix1,
TargetSNI(structs.NewDiscoveryTarget("api", "", "", "default", "foo"), testTrustDomain1))
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain1))
require.Equal(t, "api.default.foo."+testTrustDomainSuffix1,
TargetSNI(structs.NewDiscoveryTarget("api", "", "", "", "foo"), testTrustDomain1))
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
Datacenter: "foo",
}), testTrustDomain1))
// set namespace, empty subset
require.Equal(t, "api.neighbor.foo."+testTrustDomainSuffix2,
TargetSNI(structs.NewDiscoveryTarget("api", "", "neighbor", "default", "foo"), testTrustDomain2))
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
Namespace: "neighbor",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain2))
// empty namespace, set subset
require.Equal(t, "v2.api.default.foo."+testTrustDomainSuffix1,
TargetSNI(structs.NewDiscoveryTarget("api", "v2", "", "default", "foo"), testTrustDomain1))
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
ServiceSubset: "v2",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain1))
// set namespace, set subset
require.Equal(t, "canary.api.neighbor.foo."+testTrustDomainSuffix2,
TargetSNI(structs.NewDiscoveryTarget("api", "canary", "neighbor", "default", "foo"), testTrustDomain2))
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
ServiceSubset: "canary",
Namespace: "neighbor",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain2))
}

View File

@ -565,6 +565,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
return err
}
filter, err := bexpr.CreateFilter(args.Filter, nil, []*structs.ServiceNode{})
if err != nil {
return err
}
// Set reply enterprise metadata after resolving and validating the token so
// that we can properly infer metadata from the token.
reply.EnterpriseMeta = args.EnterpriseMeta
@ -574,10 +579,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
var err error
var serviceNodes structs.ServiceNodes
if len(args.NodeMetaFilters) > 0 {
reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
reply.Index, serviceNodes, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
} else {
reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName)
reply.Index, serviceNodes, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName)
}
if err != nil {
return err
@ -588,11 +594,43 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
return nil
}
raw, err := filter.Execute(serviceNodes)
if err != nil {
return err
}
reply.Services = servicesTagsByName(raw.(structs.ServiceNodes))
c.srv.filterACLWithAuthorizer(authz, reply)
return nil
})
}
func servicesTagsByName(services []*structs.ServiceNode) structs.Services {
unique := make(map[string]map[string]struct{})
for _, svc := range services {
tags, ok := unique[svc.ServiceName]
if !ok {
unique[svc.ServiceName] = make(map[string]struct{})
tags = unique[svc.ServiceName]
}
for _, tag := range svc.ServiceTags {
tags[tag] = struct{}{}
}
}
// Generate the output structure.
var results = make(structs.Services)
for service, tags := range unique {
results[service] = make([]string, 0, len(tags))
for tag := range tags {
results[service] = append(results[service], tag)
}
}
return results
}
// ServiceList is used to query the services in a DC.
// Returns services as a list of ServiceNames.
func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.IndexedServiceList) error {

View File

@ -1523,6 +1523,45 @@ func TestCatalog_ListServices_NodeMetaFilter(t *testing.T) {
}
}
func TestCatalog_ListServices_Filter(t *testing.T) {
t.Parallel()
_, s1 := testServer(t)
codec := rpcClient(t, s1)
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
// prep the cluster with some data we can use in our filters
registerTestCatalogEntries(t, codec)
// Run the tests against the test server
t.Run("ListServices", func(t *testing.T) {
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
args.Filter = "ServiceName == redis"
out := new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Contains(t, out.Services, "redis")
require.ElementsMatch(t, []string{"v1", "v2"}, out.Services["redis"])
args.Filter = "NodeMeta.os == NoSuchOS"
out = new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Len(t, out.Services, 0)
args.Filter = "NodeMeta.NoSuchMetadata == linux"
out = new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Len(t, out.Services, 0)
args.Filter = "InvalidField == linux"
out = new(structs.IndexedServices)
require.Error(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
})
}
func TestCatalog_ListServices_Blocking(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")

View File

@ -62,6 +62,8 @@ func (c *Client) setupSerf(conf *serf.Config, ch chan serf.Event, path string) (
return nil, err
}
addSerfMetricsLabels(conf, false, c.config.Segment, c.config.AgentEnterpriseMeta().PartitionOrDefault(), "")
addEnterpriseSerfTags(conf.Tags, c.config.AgentEnterpriseMeta())
conf.ReconnectTimeoutOverride = libserf.NewReconnectOverride(c.logger)

View File

@ -893,8 +893,8 @@ func TestClient_RPC_Timeout(t *testing.T) {
}
})
// waiter will sleep for 50ms
require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 50 * time.Millisecond}))
// waiter will sleep for 101ms which is 1ms more than the DefaultQueryTime
require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 101 * time.Millisecond}))
// Requests with QueryOptions have a default timeout of RPCHoldTimeout (10ms)
// so we expect the RPC call to timeout.
@ -903,7 +903,8 @@ func TestClient_RPC_Timeout(t *testing.T) {
require.Error(t, err)
require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached")
// Blocking requests have a longer timeout (100ms) so this should pass
// Blocking requests have a longer timeout (100ms) so this should pass since we
// add the maximum jitter which should be 16ms
out = struct{}{}
err = c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{
QueryOptions: structs.QueryOptions{

View File

@ -517,7 +517,6 @@ func DefaultConfig() *Config {
DefaultQueryTime: 300 * time.Second,
MaxQueryTime: 600 * time.Second,
PeeringEnabled: true,
PeeringTestAllowPeerRegistrations: false,
EnterpriseConfig: DefaultEnterpriseConfig(),
@ -585,6 +584,7 @@ func CloneSerfLANConfig(base *serf.Config) *serf.Config {
cfg.MemberlistConfig.ProbeTimeout = base.MemberlistConfig.ProbeTimeout
cfg.MemberlistConfig.SuspicionMult = base.MemberlistConfig.SuspicionMult
cfg.MemberlistConfig.RetransmitMult = base.MemberlistConfig.RetransmitMult
cfg.MemberlistConfig.MetricLabels = base.MemberlistConfig.MetricLabels
// agent/keyring.go
cfg.MemberlistConfig.Keyring = base.MemberlistConfig.Keyring
@ -594,6 +594,7 @@ func CloneSerfLANConfig(base *serf.Config) *serf.Config {
cfg.ReapInterval = base.ReapInterval
cfg.TombstoneTimeout = base.TombstoneTimeout
cfg.MemberlistConfig.SecretKey = base.MemberlistConfig.SecretKey
cfg.MetricLabels = base.MetricLabels
return cfg
}

View File

@ -1804,8 +1804,6 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams_Blocking(t *testing.T) {
t.Skip("too slow for testing.Short")
}
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()

View File

@ -56,8 +56,17 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
return &resp, nil
}
newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
if opts.Namespace == "" {
opts.Namespace = "default"
}
if opts.Partition == "" {
opts.Partition = "default"
}
if opts.Datacenter == "" {
opts.Datacenter = "dc1"
}
t := structs.NewDiscoveryTarget(opts)
t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul")
t.Name = t.SNI
t.ConnectTimeout = 5 * time.Second // default
@ -119,7 +128,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"),
"web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
},
},
}
@ -245,7 +254,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc1"),
newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
33*time.Second,
),
},

View File

@ -8,6 +8,7 @@ import (
"github.com/mitchellh/hashstructure"
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs"
@ -576,7 +577,10 @@ func (c *compiler) assembleChain() error {
if router == nil {
// If no router is configured, move on down the line to the next hop of
// the chain.
node, err := c.getSplitterOrResolverNode(c.newTarget(c.serviceName, "", "", "", ""))
node, err := c.getSplitterOrResolverNode(c.newTarget(structs.DiscoveryTargetOpts{
Service: c.serviceName,
}))
if err != nil {
return err
}
@ -626,11 +630,20 @@ func (c *compiler) assembleChain() error {
)
if dest.ServiceSubset == "" {
node, err = c.getSplitterOrResolverNode(
c.newTarget(svc, "", destNamespace, destPartition, ""),
)
c.newTarget(structs.DiscoveryTargetOpts{
Service: svc,
Namespace: destNamespace,
Partition: destPartition,
},
))
} else {
node, err = c.getResolverNode(
c.newTarget(svc, dest.ServiceSubset, destNamespace, destPartition, ""),
c.newTarget(structs.DiscoveryTargetOpts{
Service: svc,
ServiceSubset: dest.ServiceSubset,
Namespace: destNamespace,
Partition: destPartition,
}),
false,
)
}
@ -642,7 +655,12 @@ func (c *compiler) assembleChain() error {
// If we have a router, we'll add a catch-all route at the end to send
// unmatched traffic to the next hop in the chain.
defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(router.Name, "", router.NamespaceOrDefault(), router.PartitionOrDefault(), ""))
opts := structs.DiscoveryTargetOpts{
Service: router.Name,
Namespace: router.NamespaceOrDefault(),
Partition: router.PartitionOrDefault(),
}
defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(opts))
if err != nil {
return err
}
@ -674,26 +692,36 @@ func newDefaultServiceRoute(serviceName, namespace, partition string) *structs.S
}
}
func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
if service == "" {
func (c *compiler) newTarget(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
if opts.Service == "" {
panic("newTarget called with empty service which makes no sense")
}
t := structs.NewDiscoveryTarget(
service,
serviceSubset,
defaultIfEmpty(namespace, c.evaluateInNamespace),
defaultIfEmpty(partition, c.evaluateInPartition),
defaultIfEmpty(datacenter, c.evaluateInDatacenter),
)
if opts.Peer == "" {
opts.Datacenter = defaultIfEmpty(opts.Datacenter, c.evaluateInDatacenter)
opts.Namespace = defaultIfEmpty(opts.Namespace, c.evaluateInNamespace)
opts.Partition = defaultIfEmpty(opts.Partition, c.evaluateInPartition)
} else {
// Don't allow Peer and Datacenter.
opts.Datacenter = ""
// Peer and Partition cannot both be set.
opts.Partition = acl.PartitionOrDefault("")
// Default to "default" rather than c.evaluateInNamespace.
opts.Namespace = acl.PartitionOrDefault(opts.Namespace)
}
// Set default connect SNI. This will be overridden later if the service
// has an explicit SNI value configured in service-defaults.
t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain)
t := structs.NewDiscoveryTarget(opts)
// Use the same representation for the name. This will NOT be overridden
// later.
t.Name = t.SNI
// We don't have the peer's trust domain yet so we can't construct the SNI.
if opts.Peer == "" {
// Set default connect SNI. This will be overridden later if the service
// has an explicit SNI value configured in service-defaults.
t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain)
// Use the same representation for the name. This will NOT be overridden
// later.
t.Name = t.SNI
}
prev, ok := c.loadedTargets[t.ID]
if ok {
@ -703,34 +731,30 @@ func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datac
return t
}
func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, service, serviceSubset, partition, namespace, datacenter string) *structs.DiscoveryTarget {
var (
service2 = t.Service
serviceSubset2 = t.ServiceSubset
partition2 = t.Partition
namespace2 = t.Namespace
datacenter2 = t.Datacenter
)
func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
mergedOpts := t.ToDiscoveryTargetOpts()
if service != "" && service != service2 {
service2 = service
if opts.Service != "" && opts.Service != mergedOpts.Service {
mergedOpts.Service = opts.Service
// Reset the chosen subset if we reference a service other than our own.
serviceSubset2 = ""
mergedOpts.ServiceSubset = ""
}
if serviceSubset != "" {
serviceSubset2 = serviceSubset
if opts.ServiceSubset != "" {
mergedOpts.ServiceSubset = opts.ServiceSubset
}
if partition != "" {
partition2 = partition
if opts.Partition != "" {
mergedOpts.Partition = opts.Partition
}
if namespace != "" {
namespace2 = namespace
// Only use explicit Namespace with Peer
if opts.Namespace != "" || opts.Peer != "" {
mergedOpts.Namespace = opts.Namespace
}
if datacenter != "" {
datacenter2 = datacenter
if opts.Datacenter != "" {
mergedOpts.Datacenter = opts.Datacenter
}
mergedOpts.Peer = opts.Peer
return c.newTarget(service2, serviceSubset2, namespace2, partition2, datacenter2)
return c.newTarget(mergedOpts)
}
func (c *compiler) getSplitterOrResolverNode(target *structs.DiscoveryTarget) (*structs.DiscoveryGraphNode, error) {
@ -803,10 +827,13 @@ func (c *compiler) getSplitterNode(sid structs.ServiceID) (*structs.DiscoveryGra
// fall through to group-resolver
}
node, err := c.getResolverNode(
c.newTarget(splitID.ID, split.ServiceSubset, splitID.NamespaceOrDefault(), splitID.PartitionOrDefault(), ""),
false,
)
opts := structs.DiscoveryTargetOpts{
Service: splitID.ID,
ServiceSubset: split.ServiceSubset,
Namespace: splitID.NamespaceOrDefault(),
Partition: splitID.PartitionOrDefault(),
}
node, err := c.getResolverNode(c.newTarget(opts), false)
if err != nil {
return nil, err
}
@ -881,11 +908,7 @@ RESOLVE_AGAIN:
redirectedTarget := c.rewriteTarget(
target,
redirect.Service,
redirect.ServiceSubset,
redirect.Partition,
redirect.Namespace,
redirect.Datacenter,
redirect.ToDiscoveryTargetOpts(),
)
if redirectedTarget.ID != target.ID {
target = redirectedTarget
@ -895,14 +918,9 @@ RESOLVE_AGAIN:
// Handle default subset.
if target.ServiceSubset == "" && resolver.DefaultSubset != "" {
target = c.rewriteTarget(
target,
"",
resolver.DefaultSubset,
"",
"",
"",
)
target = c.rewriteTarget(target, structs.DiscoveryTargetOpts{
ServiceSubset: resolver.DefaultSubset,
})
goto RESOLVE_AGAIN
}
@ -1027,56 +1045,54 @@ RESOLVE_AGAIN:
failover, ok = f["*"]
}
if ok {
// Determine which failover definitions apply.
var failoverTargets []*structs.DiscoveryTarget
if len(failover.Datacenters) > 0 {
for _, dc := range failover.Datacenters {
// Rewrite the target as per the failover policy.
failoverTarget := c.rewriteTarget(
target,
failover.Service,
failover.ServiceSubset,
target.Partition,
failover.Namespace,
dc,
)
if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
}
}
} else {
if !ok {
return node, nil
}
// Determine which failover definitions apply.
var failoverTargets []*structs.DiscoveryTarget
if len(failover.Datacenters) > 0 {
opts := failover.ToDiscoveryTargetOpts()
for _, dc := range failover.Datacenters {
// Rewrite the target as per the failover policy.
failoverTarget := c.rewriteTarget(
target,
failover.Service,
failover.ServiceSubset,
target.Partition,
failover.Namespace,
"",
)
opts.Datacenter = dc
failoverTarget := c.rewriteTarget(target, opts)
if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
}
}
// If we filtered everything out then no point in having a failover.
if len(failoverTargets) > 0 {
df := &structs.DiscoveryFailover{}
node.Resolver.Failover = df
// Take care of doing any redirects or configuration loading
// related to targets by cheating a bit and recursing into
// ourselves.
for _, target := range failoverTargets {
failoverResolveNode, err := c.getResolverNode(target, true)
if err != nil {
return nil, err
}
failoverTarget := failoverResolveNode.Resolver.Target
df.Targets = append(df.Targets, failoverTarget)
} else if len(failover.Targets) > 0 {
for _, t := range failover.Targets {
// Rewrite the target as per the failover policy.
failoverTarget := c.rewriteTarget(target, t.ToDiscoveryTargetOpts())
if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
}
}
} else {
// Rewrite the target as per the failover policy.
failoverTarget := c.rewriteTarget(target, failover.ToDiscoveryTargetOpts())
if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
}
}
// If we filtered everything out then no point in having a failover.
if len(failoverTargets) > 0 {
df := &structs.DiscoveryFailover{}
node.Resolver.Failover = df
// Take care of doing any redirects or configuration loading
// related to targets by cheating a bit and recursing into
// ourselves.
for _, target := range failoverTargets {
failoverResolveNode, err := c.getResolverNode(target, true)
if err != nil {
return nil, err
}
failoverTarget := failoverResolveNode.Resolver.Target
df.Targets = append(df.Targets, failoverTarget)
}
}
}

View File

@ -39,6 +39,7 @@ func TestCompile(t *testing.T) {
"service redirect": testcase_ServiceRedirect(),
"service and subset redirect": testcase_ServiceAndSubsetRedirect(),
"datacenter redirect": testcase_DatacenterRedirect(),
"redirect to cluster peer": testcase_PeerRedirect(),
"datacenter redirect with mesh gateways": testcase_DatacenterRedirect_WithMeshGateways(),
"service failover": testcase_ServiceFailover(),
"service failover through redirect": testcase_ServiceFailoverThroughRedirect(),
@ -46,6 +47,7 @@ func TestCompile(t *testing.T) {
"service and subset failover": testcase_ServiceAndSubsetFailover(),
"datacenter failover": testcase_DatacenterFailover(),
"datacenter failover with mesh gateways": testcase_DatacenterFailover_WithMeshGateways(),
"target failover": testcase_Failover_Targets(),
"noop split to resolver with default subset": testcase_NoopSplit_WithDefaultSubset(),
"resolver with default subset": testcase_Resolve_WithDefaultSubset(),
"default resolver with external sni": testcase_DefaultResolver_ExternalSNI(),
@ -182,7 +184,7 @@ func testcase_JustRouterWithDefaults() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -244,7 +246,7 @@ func testcase_JustRouterWithNoDestination() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -294,7 +296,7 @@ func testcase_RouterWithDefaults_NoSplit_WithResolver() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc1", nil),
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
33*time.Second,
),
},
@ -361,7 +363,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_DefaultResolver() compileTestCase
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -426,7 +428,10 @@ func testcase_NoopSplit_DefaultResolver_ProtocolFromProxyDefaults() compileTestC
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc1",
}, nil),
},
}
@ -498,7 +503,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_WithResolver() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc1", nil),
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
33*time.Second,
),
},
@ -584,8 +589,11 @@ func testcase_RouteBypassesSplit() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"bypass.other.default.default.dc1": newTarget("other", "bypass", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"bypass.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "other",
ServiceSubset: "bypass",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == bypass",
}
@ -638,7 +646,7 @@ func testcase_NoopSplit_DefaultResolver() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -694,7 +702,7 @@ func testcase_NoopSplit_WithResolver() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc1", nil),
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
33*time.Second,
),
},
@ -776,12 +784,19 @@ func testcase_SubsetSplit() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2",
}
}),
"v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v1",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 1",
}
@ -855,8 +870,8 @@ func testcase_ServiceSplit() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil),
"bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil),
"foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil),
"bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil),
},
}
@ -935,7 +950,10 @@ func testcase_SplitBypassesSplit() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"bypassed.next.default.default.dc1": newTarget("next", "bypassed", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"bypassed.next.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "next",
ServiceSubset: "bypassed",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == bypass",
}
@ -973,7 +991,7 @@ func testcase_ServiceRedirect() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
},
}
@ -1019,7 +1037,10 @@ func testcase_ServiceAndSubsetRedirect() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"v2.other.default.default.dc1": newTarget("other", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "other",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2",
}
@ -1055,7 +1076,51 @@ func testcase_DatacenterRedirect() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", nil),
"main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc9",
}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
}
func testcase_PeerRedirect() compileTestCase {
entries := newEntries()
entries.AddResolvers(
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
Peer: "cluster-01",
},
},
)
expect := &structs.CompiledDiscoveryChain{
Protocol: "tcp",
StartNode: "resolver:other.default.default.external.cluster-01",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:other.default.default.external.cluster-01": {
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "other.default.default.external.cluster-01",
Resolver: &structs.DiscoveryResolver{
Default: true,
ConnectTimeout: 5 * time.Second,
Target: "other.default.default.external.cluster-01",
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{
Service: "other",
Peer: "cluster-01",
}, func(t *structs.DiscoveryTarget) {
t.SNI = ""
t.Name = ""
t.Datacenter = ""
}),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1095,7 +1160,10 @@ func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", func(t *structs.DiscoveryTarget) {
"main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc9",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
@ -1134,8 +1202,8 @@ func testcase_ServiceFailover() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1177,8 +1245,8 @@ func testcase_ServiceFailoverThroughRedirect() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"actual.default.default.dc1": newTarget("actual", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"actual.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "actual"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1220,8 +1288,8 @@ func testcase_Resolver_CircularFailover() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1261,8 +1329,11 @@ func testcase_ServiceAndSubsetFailover() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"backup.main.default.default.dc1": newTarget("main", "backup", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"backup.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "backup",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == backup",
}
@ -1301,9 +1372,15 @@ func testcase_DatacenterFailover() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", nil),
"main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc2",
}, nil),
"main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc4",
}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1350,17 +1427,105 @@ func testcase_DatacenterFailover_WithMeshGateways() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", func(t *structs.DiscoveryTarget) {
"main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc2",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", func(t *structs.DiscoveryTarget) {
"main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc4",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
},
}
return compileTestCase{entries: entries, expect: expect}
}
func testcase_Failover_Targets() compileTestCase {
entries := newEntries()
entries.AddProxyDefaults(&structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
},
})
entries.AddResolvers(
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Targets: []structs.ServiceResolverFailoverTarget{
{Datacenter: "dc3"},
{Service: "new-main"},
{Peer: "cluster-01"},
},
},
},
},
)
expect := &structs.CompiledDiscoveryChain{
Protocol: "tcp",
StartNode: "resolver:main.default.default.dc1",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:main.default.default.dc1": {
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "main.default.default.dc1",
Resolver: &structs.DiscoveryResolver{
ConnectTimeout: 5 * time.Second,
Target: "main.default.default.dc1",
Failover: &structs.DiscoveryFailover{
Targets: []string{
"main.default.default.dc3",
"new-main.default.default.dc1",
"main.default.default.external.cluster-01",
},
},
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"main.default.default.dc3": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc3",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"new-main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "new-main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"main.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Peer: "cluster-01",
}, func(t *structs.DiscoveryTarget) {
t.SNI = ""
t.Name = ""
t.Datacenter = ""
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
@ -1422,7 +1587,10 @@ func testcase_NoopSplit_WithDefaultSubset() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2",
}
@ -1452,7 +1620,7 @@ func testcase_DefaultResolver() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
@ -1488,7 +1656,7 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
@ -1530,7 +1698,7 @@ func testcase_ServiceMetaProjection() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -1588,7 +1756,7 @@ func testcase_ServiceMetaProjectionWithRedirect() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
},
}
@ -1623,7 +1791,7 @@ func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
},
}
@ -1658,7 +1826,10 @@ func testcase_Resolve_WithDefaultSubset() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2",
}
@ -1692,7 +1863,7 @@ func testcase_DefaultResolver_ExternalSNI() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.SNI = "main.some.other.service.mesh"
t.External = true
}),
@ -1857,11 +2028,17 @@ func testcase_MultiDatacenterCanary() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc2": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc2", nil),
newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc2",
}, nil),
33*time.Second,
),
"main.default.default.dc3": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc3", nil),
newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc3",
}, nil),
33*time.Second,
),
},
@ -2155,27 +2332,42 @@ func testcase_AllBellsAndWhistles() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"prod.redirected.default.default.dc1": newTarget("redirected", "prod", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"prod.redirected.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "redirected",
ServiceSubset: "prod",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "ServiceMeta.env == prod",
}
}),
"v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v1",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 1",
}
}),
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2",
}
}),
"v3.main.default.default.dc1": newTarget("main", "v3", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v3.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v3",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 3",
}
}),
"default-subset.main.default.default.dc1": newTarget("main", "default-subset", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"default-subset.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "default-subset",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{OnlyPassing: true}
}),
},
@ -2379,7 +2571,7 @@ func testcase_ResolverProtocolOverride() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect,
@ -2413,7 +2605,7 @@ func testcase_ResolverProtocolOverrideIgnored() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect,
@ -2451,7 +2643,7 @@ func testcase_RouterIgnored_ResolverProtocolOverride() compileTestCase {
},
Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
return compileTestCase{entries: entries, expect: expect,
@ -2685,9 +2877,9 @@ func testcase_LBSplitterAndResolver() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil),
"bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil),
"baz.default.default.dc1": newTarget("baz", "", "default", "default", "dc1", nil),
"foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil),
"bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil),
"baz.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "baz"}, nil),
},
}
@ -2743,7 +2935,7 @@ func testcase_LBResolver() compileTestCase {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
},
}
@ -2791,8 +2983,17 @@ func newEntries() *configentry.DiscoveryChainSet {
}
}
func newTarget(service, serviceSubset, namespace, partition, datacenter string, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
func newTarget(opts structs.DiscoveryTargetOpts, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget {
if opts.Namespace == "" {
opts.Namespace = "default"
}
if opts.Partition == "" {
opts.Partition = "default"
}
if opts.Datacenter == "" {
opts.Datacenter = "dc1"
}
t := structs.NewDiscoveryTarget(opts)
t.SNI = connect.TargetSNI(t, "trustdomain.consul")
t.Name = t.SNI
t.ConnectTimeout = 5 * time.Second // default

View File

@ -720,9 +720,9 @@ func (c *FSM) applyPeeringDelete(buf []byte, index uint64) interface{} {
}
func (c *FSM) applyPeeringSecretsWrite(buf []byte, index uint64) interface{} {
var req pbpeering.PeeringSecrets
var req pbpeering.SecretsWriteRequest
if err := structs.DecodeProto(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode peering write request: %v", err))
panic(fmt.Errorf("failed to decode peering secrets write request: %v", err))
}
defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering_secrets"}, time.Now(),

View File

@ -1,8 +1,12 @@
package fsm
import (
"fmt"
"net"
"github.com/hashicorp/consul-net-rpc/go-msgpack/codec"
"github.com/hashicorp/raft"
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
@ -38,6 +42,7 @@ func init() {
registerRestorer(structs.FreeVirtualIPRequestType, restoreFreeVirtualIP)
registerRestorer(structs.PeeringWriteType, restorePeering)
registerRestorer(structs.PeeringTrustBundleWriteType, restorePeeringTrustBundle)
registerRestorer(structs.PeeringSecretsWriteType, restorePeeringSecrets)
}
func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) error {
@ -95,6 +100,9 @@ func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) err
if err := s.persistPeeringTrustBundles(sink, encoder); err != nil {
return err
}
if err := s.persistPeeringSecrets(sink, encoder); err != nil {
return err
}
return nil
}
@ -582,6 +590,24 @@ func (s *snapshot) persistPeeringTrustBundles(sink raft.SnapshotSink, encoder *c
return nil
}
func (s *snapshot) persistPeeringSecrets(sink raft.SnapshotSink, encoder *codec.Encoder) error {
secrets, err := s.state.PeeringSecrets()
if err != nil {
return err
}
for entry := secrets.Next(); entry != nil; entry = secrets.Next() {
if _, err := sink.Write([]byte{byte(structs.PeeringSecretsWriteType)}); err != nil {
return err
}
if err := encoder.Encode(entry.(*pbpeering.PeeringSecrets)); err != nil {
return err
}
}
return nil
}
func restoreRegistration(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error {
var req structs.RegisterRequest
if err := decoder.Decode(&req); err != nil {
@ -864,11 +890,43 @@ func restoreSystemMetadata(header *SnapshotHeader, restore *state.Restore, decod
}
func restoreServiceVirtualIP(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error {
var req state.ServiceVirtualIP
// state.ServiceVirtualIP was changed in a breaking way in 1.13.0 (2e4cb6f77d2be36b02e9be0b289b24e5b0afb794).
// We attempt to reconcile the older type by decoding to a map then decoding that map into
// structs.PeeredServiceName first, and then structs.ServiceName.
var req struct {
Service map[string]interface{}
IP net.IP
structs.RaftIndex
}
if err := decoder.Decode(&req); err != nil {
return err
}
if err := restore.ServiceVirtualIP(req); err != nil {
vip := state.ServiceVirtualIP{
IP: req.IP,
RaftIndex: req.RaftIndex,
}
// PeeredServiceName is the expected primary key type.
var psn structs.PeeredServiceName
if err := mapstructure.Decode(req.Service, &psn); err != nil {
return fmt.Errorf("cannot decode to structs.PeeredServiceName: %w", err)
}
vip.Service = psn
// If the expected primary key field is empty, it must be the older ServiceName type.
if vip.Service.ServiceName.Name == "" {
var sn structs.ServiceName
if err := mapstructure.Decode(req.Service, &sn); err != nil {
return fmt.Errorf("cannot decode to structs.ServiceName: %w", err)
}
vip.Service = structs.PeeredServiceName{
ServiceName: sn,
}
}
if err := restore.ServiceVirtualIP(vip); err != nil {
return err
}
return nil
@ -906,3 +964,14 @@ func restorePeeringTrustBundle(header *SnapshotHeader, restore *state.Restore, d
}
return nil
}
func restorePeeringSecrets(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error {
var req pbpeering.PeeringSecrets
if err := decoder.Decode(&req); err != nil {
return err
}
if err := restore.PeeringSecrets(&req); err != nil {
return err
}
return nil
}

View File

@ -3,6 +3,7 @@ package fsm
import (
"bytes"
"fmt"
"net"
"testing"
"time"
@ -18,6 +19,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib/stringslice"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/prototest"
"github.com/hashicorp/consul/sdk/testutil"
)
@ -482,6 +484,14 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
ID: "1fabcd52-1d46-49b0-b1d8-71559aee47f5",
Name: "baz",
},
SecretsRequest: &pbpeering.SecretsWriteRequest{
PeerID: "1fabcd52-1d46-49b0-b1d8-71559aee47f5",
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: "baaeea83-8419-4aa8-ac89-14e7246a3d2f",
},
},
},
}))
// Peering Trust Bundles
@ -491,6 +501,27 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
RootPEMs: []string{"qux certificate bundle"},
}))
// Issue two more secrets writes so that there are three secrets associated with the peering:
// - Establishment: "389bbcdf-1c31-47d6-ae96-f2a3f4c45f84"
// - Pending: "0b7812d4-32d9-4e54-b1b3-4d97084982a0"
require.NoError(t, fsm.state.PeeringSecretsWrite(34, &pbpeering.SecretsWriteRequest{
PeerID: "1fabcd52-1d46-49b0-b1d8-71559aee47f5",
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
EstablishmentSecret: "baaeea83-8419-4aa8-ac89-14e7246a3d2f",
PendingStreamSecret: "0b7812d4-32d9-4e54-b1b3-4d97084982a0",
},
},
}))
require.NoError(t, fsm.state.PeeringSecretsWrite(33, &pbpeering.SecretsWriteRequest{
PeerID: "1fabcd52-1d46-49b0-b1d8-71559aee47f5",
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: "389bbcdf-1c31-47d6-ae96-f2a3f4c45f84",
},
},
}))
// Snapshot
snap, err := fsm.Snapshot()
require.NoError(t, err)
@ -797,6 +828,29 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
require.NotNil(t, prngRestored)
require.Equal(t, "baz", prngRestored.Name)
// Verify peering secrets are restored
secretsRestored, err := fsm2.state.PeeringSecretsRead(nil, "1fabcd52-1d46-49b0-b1d8-71559aee47f5")
require.NoError(t, err)
expectSecrets := &pbpeering.PeeringSecrets{
PeerID: "1fabcd52-1d46-49b0-b1d8-71559aee47f5",
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: "389bbcdf-1c31-47d6-ae96-f2a3f4c45f84",
},
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: "0b7812d4-32d9-4e54-b1b3-4d97084982a0",
},
}
prototest.AssertDeepEqual(t, expectSecrets, secretsRestored)
uuids := []string{"389bbcdf-1c31-47d6-ae96-f2a3f4c45f84", "0b7812d4-32d9-4e54-b1b3-4d97084982a0"}
for _, id := range uuids {
free, err := fsm2.state.ValidateProposedPeeringSecretUUID(id)
require.NoError(t, err)
// The UUIDs in the peering secret should be tracked as in use.
require.False(t, free)
}
// Verify peering trust bundle is restored
idx, ptbRestored, err := fsm2.state.PeeringTrustBundleRead(nil, state.Query{
Value: "qux",
@ -909,3 +963,66 @@ func TestFSM_BadSnapshot_NilCAConfig(t *testing.T) {
require.EqualValues(t, 0, idx)
require.Nil(t, config)
}
// This test asserts that ServiceVirtualIP, which made a breaking change
// in 1.13.0, can still restore from older snapshots which use the old
// state.ServiceVirtualIP type.
func Test_restoreServiceVirtualIP(t *testing.T) {
psn := structs.PeeredServiceName{
ServiceName: structs.ServiceName{
Name: "foo",
},
}
run := func(t *testing.T, input interface{}) {
t.Helper()
var b []byte
buf := bytes.NewBuffer(b)
// Encode input
encoder := codec.NewEncoder(buf, structs.MsgpackHandle)
require.NoError(t, encoder.Encode(input))
// Create a decoder
dec := codec.NewDecoder(buf, structs.MsgpackHandle)
logger := testutil.Logger(t)
fsm, err := New(nil, logger)
require.NoError(t, err)
restore := fsm.State().Restore()
// Call restore
require.NoError(t, restoreServiceVirtualIP(nil, restore, dec))
require.NoError(t, restore.Commit())
ip, err := fsm.State().VirtualIPForService(psn)
require.NoError(t, err)
// 240->224 due to addIPOffset
require.Equal(t, "224.0.0.2", ip)
}
t.Run("new ServiceVirtualIP with PeeredServiceName", func(t *testing.T) {
run(t, state.ServiceVirtualIP{
Service: psn,
IP: net.ParseIP("240.0.0.2"),
RaftIndex: structs.RaftIndex{},
})
})
t.Run("pre-1.13.0 ServiceVirtualIP with ServiceName", func(t *testing.T) {
type compatServiceVirtualIP struct {
Service structs.ServiceName
IP net.IP
RaftIndex structs.RaftIndex
}
run(t, compatServiceVirtualIP{
Service: structs.ServiceName{
Name: "foo",
},
IP: net.ParseIP("240.0.0.2"),
RaftIndex: structs.RaftIndex{},
})
})
}

View File

@ -31,11 +31,18 @@ import (
)
var leaderExportedServicesCountKey = []string{"consul", "peering", "exported_services"}
var leaderHealthyPeeringKey = []string{"consul", "peering", "healthy"}
var LeaderPeeringMetrics = []prometheus.GaugeDefinition{
{
Name: leaderExportedServicesCountKey,
Help: "A gauge that tracks how many services are exported for the peering. " +
"The labels are \"peering\" and, for enterprise, \"partition\". " +
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
"We emit this metric every 9 seconds",
},
{
Name: leaderHealthyPeeringKey,
Help: "A gauge that tracks how if a peering is healthy (1) or not (0). " +
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
"We emit this metric every 9 seconds",
},
}
@ -85,13 +92,6 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
}
for _, peer := range peers {
status, found := s.peerStreamServer.StreamStatus(peer.ID)
if !found {
logger.Trace("did not find status for", "peer_name", peer.Name)
continue
}
esc := status.GetExportedServicesCount()
part := peer.Partition
labels := []metrics.Label{
{Name: "peer_name", Value: peer.Name},
@ -101,7 +101,25 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
labels = append(labels, metrics.Label{Name: "partition", Value: part})
}
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
status, found := s.peerStreamServer.StreamStatus(peer.ID)
if found {
// exported services count metric
esc := status.GetExportedServicesCount()
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
}
// peering health metric
if status.NeverConnected {
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
} else {
healthy := s.peerStreamServer.Tracker.IsHealthy(status)
healthyInt := 0
if healthy {
healthyInt = 1
}
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(healthyInt), labels)
}
}
return nil
@ -237,7 +255,7 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
}
}
logger.Trace("checking connected streams", "streams", s.peerStreamServer.ConnectedStreams(), "sequence_id", seq)
logger.Trace("checking connected streams", "streams", connectedStreams, "sequence_id", seq)
// Clean up active streams of peerings that were deleted from the state store.
// TODO(peering): This is going to trigger shutting down peerings we generated a token for. Is that OK?
@ -277,13 +295,6 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
return fmt.Errorf("failed to build TLS dial option from peering: %w", err)
}
// Create a ring buffer to cycle through peer addresses in the retry loop below.
buffer := ring.New(len(peer.PeerServerAddresses))
for _, addr := range peer.PeerServerAddresses {
buffer.Value = addr
buffer = buffer.Next()
}
secret, err := s.fsm.State().PeeringSecretsRead(ws, peer.ID)
if err != nil {
return fmt.Errorf("failed to read secret for peering: %w", err)
@ -294,27 +305,26 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
logger.Trace("establishing stream to peer")
retryCtx, cancel := context.WithCancel(ctx)
cancelFns[peer.ID] = cancel
streamStatus, err := s.peerStreamTracker.Register(peer.ID)
streamStatus, err := s.peerStreamServer.Tracker.Register(peer.ID)
if err != nil {
return fmt.Errorf("failed to register stream: %v", err)
}
streamCtx, cancel := context.WithCancel(ctx)
cancelFns[peer.ID] = cancel
// Start a goroutine to watch for updates to peer server addresses.
// The latest valid server address can be received from nextServerAddr.
nextServerAddr := make(chan string)
go s.watchPeerServerAddrs(streamCtx, peer, nextServerAddr)
// Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes.
go retryLoopBackoffPeering(retryCtx, logger, func() error {
go retryLoopBackoffPeering(streamCtx, logger, func() error {
// Try a new address on each iteration by advancing the ring buffer on errors.
defer func() {
buffer = buffer.Next()
}()
addr, ok := buffer.Value.(string)
if !ok {
return fmt.Errorf("peer server address type %T is not a string", buffer.Value)
}
addr := <-nextServerAddr
logger.Trace("dialing peer", "addr", addr)
conn, err := grpc.DialContext(retryCtx, addr,
conn, err := grpc.DialContext(streamCtx, addr,
// TODO(peering): use a grpc.WithStatsHandler here?)
tlsOption,
// For keep alive parameters there is a larger comment in ClientConnPool.dial about that.
@ -331,7 +341,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
defer conn.Close()
client := pbpeerstream.NewPeerStreamServiceClient(conn)
stream, err := client.StreamResources(retryCtx)
stream, err := client.StreamResources(streamCtx)
if err != nil {
return err
}
@ -379,6 +389,74 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
return nil
}
// watchPeerServerAddrs sends an up-to-date peer server address to nextServerAddr.
// It loads the server addresses into a ring buffer and cycles through them until:
// 1. streamCtx is cancelled (peer is deleted)
// 2. the peer is modified and the watchset fires.
//
// In case (2) we refetch the peering and rebuild the ring buffer.
func (s *Server) watchPeerServerAddrs(ctx context.Context, peer *pbpeering.Peering, nextServerAddr chan<- string) {
defer close(nextServerAddr)
// we initialize the ring buffer with the peer passed to `establishStream`
// because the caller has pre-checked `peer.ShouldDial`, guaranteeing
// at least one server address.
//
// IMPORTANT: ringbuf must always be length > 0 or else `<-nextServerAddr` may block.
ringbuf := ring.New(len(peer.PeerServerAddresses))
for _, addr := range peer.PeerServerAddresses {
ringbuf.Value = addr
ringbuf = ringbuf.Next()
}
innerWs := memdb.NewWatchSet()
_, _, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
if err != nil {
s.logger.Warn("failed to watch for changes to peer; server addresses may become stale over time.",
"peer_id", peer.ID,
"error", err)
}
fetchAddrs := func() error {
// reinstantiate innerWs to prevent it from growing indefinitely
innerWs = memdb.NewWatchSet()
_, peering, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
if err != nil {
return fmt.Errorf("failed to fetch peer %q: %w", peer.ID, err)
}
if !peering.IsActive() {
return fmt.Errorf("peer %q is no longer active", peer.ID)
}
if len(peering.PeerServerAddresses) == 0 {
return fmt.Errorf("peer %q has no addresses to dial", peer.ID)
}
ringbuf = ring.New(len(peering.PeerServerAddresses))
for _, addr := range peering.PeerServerAddresses {
ringbuf.Value = addr
ringbuf = ringbuf.Next()
}
return nil
}
for {
select {
case nextServerAddr <- ringbuf.Value.(string):
ringbuf = ringbuf.Next()
case err := <-innerWs.WatchCh(ctx):
if err != nil {
// context was cancelled
return
}
// watch fired so we refetch the peering and rebuild the ring buffer
if err := fetchAddrs(); err != nil {
s.logger.Warn("watchset for peer was fired but failed to update server addresses",
"peer_id", peer.ID,
"error", err)
}
}
}
}
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
}
@ -391,6 +469,12 @@ func (s *Server) runPeeringDeletions(ctx context.Context) error {
// process. This includes deletion of the peerings themselves in addition to any peering data
raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate))
for {
select {
case <-ctx.Done():
return nil
default:
}
ws := memdb.NewWatchSet()
state := s.fsm.State()
_, peerings, err := s.fsm.State().PeeringListDeleted(ws)
@ -606,6 +690,15 @@ func isFailedPreconditionErr(err error) bool {
if err == nil {
return false
}
// Handle wrapped errors, since status.FromError does a naive assertion.
var statusErr interface {
GRPCStatus() *grpcstatus.Status
}
if errors.As(err, &statusErr) {
return statusErr.GRPCStatus().Code() == codes.FailedPrecondition
}
grpcErr, ok := grpcstatus.FromError(err)
if !ok {
return false

View File

@ -7,15 +7,18 @@ import (
"errors"
"fmt"
"io/ioutil"
"math"
"testing"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
@ -23,6 +26,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/types"
@ -41,8 +45,8 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
t.Skip("too slow for testing.Short")
}
_, s1 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "bob"
_, acceptor := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptor"
c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul"
if enableTLS {
@ -51,25 +55,25 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key"
}
})
testrpc.WaitForLeader(t, s1.RPC, "dc1")
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
// Create a peering by generating a token
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())),
conn, err := grpc.DialContext(ctx, acceptor.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(acceptor.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
s1Client := pbpeering.NewPeeringServiceClient(conn)
acceptorClient := pbpeering.NewPeeringServiceClient(conn)
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-s2",
PeerName: "my-peer-dialer",
}
resp, err := s1Client.GenerateToken(ctx, &req)
resp, err := acceptorClient.GenerateToken(ctx, &req)
require.NoError(t, err)
tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken)
@ -78,14 +82,14 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
var token structs.PeeringToken
require.NoError(t, json.Unmarshal(tokenJSON, &token))
// S1 should not have a stream tracked for dc2 because s1 generated a token for baz, and therefore needs to wait to be dialed.
// S1 should not have a stream tracked for dc2 because acceptor generated a token for baz, and therefore needs to wait to be dialed.
time.Sleep(1 * time.Second)
_, found := s1.peerStreamServer.StreamStatus(token.PeerID)
_, found := acceptor.peerStreamServer.StreamStatus(token.PeerID)
require.False(t, found)
// Bring up s2 and establish a peering with s1's token so that it attempts to dial.
_, s2 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "betty"
// Bring up dialer and establish a peering with acceptor's token so that it attempts to dial.
_, dialer := testServerWithConfig(t, func(c *Config) {
c.NodeName = "dialer"
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc2"
if enableTLS {
@ -94,33 +98,39 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
}
})
testrpc.WaitForLeader(t, s2.RPC, "dc2")
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
// Create a peering at s2 by establishing a peering with s1's token
// Create a peering at dialer by establishing a peering with acceptor's token
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err = grpc.DialContext(ctx, s2.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(s2.config.RPCAddr.String())),
conn, err = grpc.DialContext(ctx, dialer.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(dialer.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
s2Client := pbpeering.NewPeeringServiceClient(conn)
dialerClient := pbpeering.NewPeeringServiceClient(conn)
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-s1",
PeerName: "my-peer-acceptor",
PeeringToken: resp.PeeringToken,
}
_, err = s2Client.Establish(ctx, &establishReq)
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := s2Client.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-s1"})
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
status, found := s2.peerStreamServer.StreamStatus(p.Peering.ID)
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
require.True(r, status.Connected)
})
retry.Run(t, func(r *retry.R) {
status, found := acceptor.peerStreamServer.StreamStatus(p.Peering.PeerID)
require.True(r, found)
require.True(r, status.Connected)
})
@ -128,21 +138,21 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
// Delete the peering to trigger the termination sequence.
deleted := &pbpeering.Peering{
ID: p.Peering.ID,
Name: "my-peer-s1",
Name: "my-peer-acceptor",
DeletedAt: structs.TimeToProto(time.Now()),
}
require.NoError(t, s2.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
s2.logger.Trace("deleted peering for my-peer-s1")
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
dialer.logger.Trace("deleted peering for my-peer-acceptor")
retry.Run(t, func(r *retry.R) {
_, found := s2.peerStreamServer.StreamStatus(p.Peering.ID)
_, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.False(r, found)
})
// s1 should have also marked the peering as terminated.
// acceptor should have also marked the peering as terminated.
retry.Run(t, func(r *retry.R) {
_, peering, err := s1.fsm.State().PeeringRead(nil, state.Query{
Value: "my-peer-s2",
_, peering, err := acceptor.fsm.State().PeeringRead(nil, state.Query{
Value: "my-peer-dialer",
})
require.NoError(r, err)
require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State)
@ -151,20 +161,20 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
t.Run("without-tls", func(t *testing.T) {
testLeader_PeeringSync_Lifecycle_ServerDeletion(t, false)
testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t, false)
})
t.Run("with-tls", func(t *testing.T) {
testLeader_PeeringSync_Lifecycle_ServerDeletion(t, true)
testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t, true)
})
}
func testLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T, enableTLS bool) {
func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS bool) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
_, s1 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "bob"
_, acceptor := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptor"
c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul"
if enableTLS {
@ -173,14 +183,14 @@ func testLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T, enableTLS boo
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key"
}
})
testrpc.WaitForLeader(t, s1.RPC, "dc1")
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
// Define a peering by generating a token for s2
// Define a peering by generating a token for dialer
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())),
conn, err := grpc.DialContext(ctx, acceptor.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(acceptor.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
@ -189,7 +199,7 @@ func testLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T, enableTLS boo
peeringClient := pbpeering.NewPeeringServiceClient(conn)
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-s2",
PeerName: "my-peer-dialer",
}
resp, err := peeringClient.GenerateToken(ctx, &req)
require.NoError(t, err)
@ -200,9 +210,9 @@ func testLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T, enableTLS boo
var token structs.PeeringToken
require.NoError(t, json.Unmarshal(tokenJSON, &token))
// Bring up s2 and establish a peering with s1's token so that it attempts to dial.
_, s2 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "betty"
// Bring up dialer and establish a peering with acceptor's token so that it attempts to dial.
_, dialer := testServerWithConfig(t, func(c *Config) {
c.NodeName = "dialer"
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc2"
if enableTLS {
@ -211,33 +221,39 @@ func testLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T, enableTLS boo
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
}
})
testrpc.WaitForLeader(t, s2.RPC, "dc2")
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
// Create a peering at s2 by establishing a peering with s1's token
// Create a peering at dialer by establishing a peering with acceptor's token
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err = grpc.DialContext(ctx, s2.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(s2.config.RPCAddr.String())),
conn, err = grpc.DialContext(ctx, dialer.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(dialer.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
s2Client := pbpeering.NewPeeringServiceClient(conn)
dialerClient := pbpeering.NewPeeringServiceClient(conn)
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-s1",
PeerName: "my-peer-acceptor",
PeeringToken: resp.PeeringToken,
}
_, err = s2Client.Establish(ctx, &establishReq)
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := s2Client.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-s1"})
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
status, found := s2.peerStreamServer.StreamStatus(p.Peering.ID)
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
require.True(r, status.Connected)
})
retry.Run(t, func(r *retry.R) {
status, found := acceptor.peerStreamServer.StreamStatus(p.Peering.PeerID)
require.True(r, found)
require.True(r, status.Connected)
})
@ -245,21 +261,22 @@ func testLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T, enableTLS boo
// Delete the peering from the server peer to trigger the termination sequence.
deleted := &pbpeering.Peering{
ID: p.Peering.PeerID,
Name: "my-peer-s2",
Name: "my-peer-dialer",
DeletedAt: structs.TimeToProto(time.Now()),
}
require.NoError(t, s1.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
s2.logger.Trace("deleted peering for my-peer-s2")
require.NoError(t, acceptor.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
acceptor.logger.Trace("deleted peering for my-peer-dialer")
retry.Run(t, func(r *retry.R) {
_, found := s1.peerStreamServer.StreamStatus(p.Peering.PeerID)
_, found := acceptor.peerStreamServer.StreamStatus(p.Peering.PeerID)
require.False(r, found)
})
// s2 should have received the termination message and updated the peering state.
// dialer should have received the termination message and updated the peering state.
retry.Run(t, func(r *retry.R) {
_, peering, err := s2.fsm.State().PeeringRead(nil, state.Query{
Value: "my-peer-s1",
_, peering, err := dialer.fsm.State().PeeringRead(nil, state.Query{
Value: "my-peer-acceptor",
})
require.NoError(r, err)
require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State)
@ -452,8 +469,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
//
// To test this, we start the two peer servers (accepting and dialing), set up peering, and then shut down
// the accepting peer. This terminates the connection without sending a Terminated message.
// We then restart the accepting peer (we actually spin up a new server with the same config and port) and then
// assert that the dialing peer reestablishes the connection.
// We then restart the accepting peer and assert that the dialing peer reestablishes the connection.
func TestLeader_Peering_DialerReestablishesConnectionOnError(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
@ -566,20 +582,17 @@ func TestLeader_Peering_DialerReestablishesConnectionOnError(t *testing.T) {
// Have to manually shut down the gRPC server otherwise it stays bound to the port.
acceptingServer.externalGRPCServer.Stop()
// Mimic the server restarting by starting a new server with the same config.
// Restart the server by re-using the previous acceptor's data directory and node id.
_, acceptingServerRestart := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptingServer.dc1"
c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul"
c.GRPCPort = acceptingServerPort
c.DataDir = acceptingServer.config.DataDir
c.NodeID = acceptingServer.config.NodeID
})
testrpc.WaitForLeader(t, acceptingServerRestart.RPC, "dc1")
// Re-insert the peering state, mimicking a snapshot restore.
require.NoError(t, acceptingServerRestart.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{
Peering: peering.Peering,
Secret: secrets,
}))
testrpc.WaitForLeader(t, acceptingServerRestart.RPC, "dc1")
// The dialing peer should eventually reconnect.
retry.Run(t, func(r *retry.R) {
@ -964,6 +977,7 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
var (
s2PeerID1 = generateUUID()
s2PeerID2 = generateUUID()
s2PeerID3 = generateUUID()
testContextTimeout = 60 * time.Second
lastIdx = uint64(0)
)
@ -1053,6 +1067,24 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
// mimic tracking exported services
mst2.TrackExportedService(structs.ServiceName{Name: "d-service"})
mst2.TrackExportedService(structs.ServiceName{Name: "e-service"})
// pretend that the hearbeat happened
mst2.TrackRecvHeartbeat()
}
// Simulate a peering that never connects
{
p3 := &pbpeering.Peering{
ID: s2PeerID3,
Name: "my-peer-s4",
PeerID: token.PeerID, // doesn't much matter what these values are
PeerCAPems: token.CA,
PeerServerName: token.ServerName,
PeerServerAddresses: token.ServerAddresses,
}
require.True(t, p3.ShouldDial())
lastIdx++
require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: p3}))
}
// set up a metrics sink
@ -1082,6 +1114,18 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric2))
require.Equal(r, float32(2), metric2.Value) // for d, e services
keyHealthyMetric2 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s3;peer_id=%s", s2PeerID2)
healthyMetric2, ok := intv.Gauges[keyHealthyMetric2]
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric2))
require.Equal(r, float32(1), healthyMetric2.Value)
keyHealthyMetric3 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s4;peer_id=%s", s2PeerID3)
healthyMetric3, ok := intv.Gauges[keyHealthyMetric3]
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric3))
require.True(r, math.IsNaN(float64(healthyMetric3.Value)))
})
}
@ -1172,7 +1216,7 @@ func TestLeader_Peering_NoEstablishmentWhenPeeringDisabled(t *testing.T) {
}))
require.Never(t, func() bool {
_, found := s1.peerStreamTracker.StreamStatus(peerID)
_, found := s1.peerStreamServer.StreamStatus(peerID)
return found
}, 7*time.Second, 1*time.Second, "peering should not have been established")
}
@ -1323,3 +1367,148 @@ func TestLeader_Peering_retryLoopBackoffPeering_cancelContext(t *testing.T) {
fmt.Errorf("error 1"),
}, allErrors)
}
func Test_isFailedPreconditionErr(t *testing.T) {
st := grpcstatus.New(codes.FailedPrecondition, "cannot establish a peering stream on a follower node")
err := st.Err()
assert.True(t, isFailedPreconditionErr(err))
// test that wrapped errors are checked correctly
werr := fmt.Errorf("wrapped: %w", err)
assert.True(t, isFailedPreconditionErr(werr))
}
func Test_Leader_PeeringSync_ServerAddressUpdates(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
// We want 1s retries for this test
orig := maxRetryBackoff
maxRetryBackoff = 1
t.Cleanup(func() { maxRetryBackoff = orig })
_, acceptor := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptor"
c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul"
})
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
// Create a peering by generating a token
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err := grpc.DialContext(ctx, acceptor.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(acceptor.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
acceptorClient := pbpeering.NewPeeringServiceClient(conn)
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-dialer",
}
resp, err := acceptorClient.GenerateToken(ctx, &req)
require.NoError(t, err)
// Bring up dialer and establish a peering with acceptor's token so that it attempts to dial.
_, dialer := testServerWithConfig(t, func(c *Config) {
c.NodeName = "dialer"
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc2"
})
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
// Create a peering at dialer by establishing a peering with acceptor's token
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err = grpc.DialContext(ctx, dialer.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(dialer.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
dialerClient := pbpeering.NewPeeringServiceClient(conn)
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-acceptor",
PeeringToken: resp.PeeringToken,
}
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
require.True(r, status.Connected)
})
testutil.RunStep(t, "calling establish with active connection does not overwrite server addresses", func(t *testing.T) {
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
// generate a new token from the acceptor
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-dialer",
}
resp, err := acceptorClient.GenerateToken(ctx, &req)
require.NoError(t, err)
token, err := acceptor.peeringBackend.DecodeToken([]byte(resp.PeeringToken))
require.NoError(t, err)
// we will update the token with bad addresses to assert it doesn't clobber existing ones
token.ServerAddresses = []string{"1.2.3.4:1234"}
badToken, err := acceptor.peeringBackend.EncodeToken(token)
require.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
// Try establishing.
// This call will only succeed if the bad address was not used in the calls to exchange the peering secret.
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-acceptor",
PeeringToken: string(badToken),
}
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
require.NotContains(t, p.Peering.PeerServerAddresses, "1.2.3.4:1234")
})
testutil.RunStep(t, "updated server addresses are picked up by the leader", func(t *testing.T) {
// force close the acceptor's gRPC server so the dialier retries with a new address.
acceptor.externalGRPCServer.Stop()
clone := proto.Clone(p.Peering)
updated := clone.(*pbpeering.Peering)
// start with a bad address so we can assert for a specific error
updated.PeerServerAddresses = append([]string{
"bad",
}, p.Peering.PeerServerAddresses...)
// this write will wake up the watch on the leader to refetch server addresses
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: updated}))
retry.Run(t, func(r *retry.R) {
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
// We assert for this error to be set which would indicate that we iterated
// through a bad address.
require.Contains(r, status.LastSendErrorMessage, "transport: Error while dialing dial tcp: address bad: missing port in address")
require.False(r, status.Connected)
})
})
}

View File

@ -159,6 +159,13 @@ func computeResolvedServiceConfig(
thisReply.Destination = *serviceConf.Destination
}
if serviceConf.MaxInboundConnections > 0 {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = map[string]interface{}{}
}
thisReply.ProxyConfig["max_inbound_connections"] = serviceConf.MaxInboundConnections
}
thisReply.Meta = serviceConf.Meta
}

View File

@ -3,12 +3,60 @@ package consul
import (
"testing"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/structs"
"github.com/mitchellh/copystructure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_ComputeResolvedServiceConfig(t *testing.T) {
type args struct {
scReq *structs.ServiceConfigRequest
upstreamIDs []structs.ServiceID
entries *configentry.ResolvedServiceConfigSet
}
sid := structs.ServiceID{
ID: "sid",
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}
tests := []struct {
name string
args args
want *structs.ServiceConfigResponse
}{
{
name: "proxy with maxinboundsconnections",
args: args{
scReq: &structs.ServiceConfigRequest{
Name: "sid",
},
entries: &configentry.ResolvedServiceConfigSet{
ServiceDefaults: map[structs.ServiceID]*structs.ServiceConfigEntry{
sid: {
MaxInboundConnections: 20,
},
},
},
},
want: &structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"max_inbound_connections": 20,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := computeResolvedServiceConfig(tt.args.scReq, tt.args.upstreamIDs,
false, tt.args.entries, nil)
require.NoError(t, err)
assert.Equal(t, tt.want, got)
})
}
}
func Test_MergeServiceConfig_TransparentProxy(t *testing.T) {
type args struct {
defaults *structs.ServiceConfigResponse
@ -153,6 +201,12 @@ func Test_MergeServiceConfig_UpstreamOverrides(t *testing.T) {
DestinationNamespace: "default",
DestinationPartition: "default",
DestinationName: "zap",
Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{
"Interval": int64(20),
"MaxFailures": int64(4),
},
},
},
},
},
@ -171,8 +225,8 @@ func Test_MergeServiceConfig_UpstreamOverrides(t *testing.T) {
DestinationName: "zap",
Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{
"Interval": int64(10),
"MaxFailures": int64(2),
"Interval": int64(20),
"MaxFailures": int64(4),
},
"protocol": "grpc",
},

View File

@ -141,7 +141,7 @@ func (b *PeeringBackend) ValidateProposedPeeringSecret(id string) (bool, error)
return b.srv.fsm.State().ValidateProposedPeeringSecretUUID(id)
}
func (b *PeeringBackend) PeeringSecretsWrite(req *pbpeering.PeeringSecrets) error {
func (b *PeeringBackend) PeeringSecretsWrite(req *pbpeering.SecretsWriteRequest) error {
_, err := b.srv.raftApplyProtobuf(structs.PeeringSecretsWriteType, req)
return err
}

View File

@ -11,6 +11,7 @@ import (
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/testrpc"
)
@ -76,3 +77,62 @@ func newServerDialer(serverAddr string) func(context.Context, string) (net.Conn,
return conn, nil
}
}
func TestPeerStreamService_ForwardToLeader(t *testing.T) {
t.Parallel()
_, conf1 := testServerConfig(t)
server1, err := newServer(t, conf1)
require.NoError(t, err)
_, conf2 := testServerConfig(t)
conf2.Bootstrap = false
server2, err := newServer(t, conf2)
require.NoError(t, err)
// server1 is leader, server2 follower
testrpc.WaitForLeader(t, server1.RPC, "dc1")
joinLAN(t, server2, server1)
testrpc.WaitForLeader(t, server2.RPC, "dc1")
peerId := testUUID()
// Simulate a GenerateToken call on server1, which stores the establishment secret
{
require.NoError(t, server1.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
Name: "foo",
ID: peerId,
},
SecretsRequest: &pbpeering.SecretsWriteRequest{
PeerID: peerId,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: "389bbcdf-1c31-47d6-ae96-f2a3f4c45f84",
},
},
},
}))
}
testutil.RunStep(t, "server2 forwards write to server1", func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
// We will dial server2 which should forward to server1
conn, err := gogrpc.DialContext(ctx, server2.config.RPCAddr.String(),
gogrpc.WithContextDialer(newServerDialer(server2.config.RPCAddr.String())),
gogrpc.WithInsecure(),
gogrpc.WithBlock())
require.NoError(t, err)
t.Cleanup(func() { conn.Close() })
peerStreamClient := pbpeerstream.NewPeerStreamServiceClient(conn)
req := &pbpeerstream.ExchangeSecretRequest{
PeerID: peerId,
EstablishmentSecret: "389bbcdf-1c31-47d6-ae96-f2a3f4c45f84",
}
_, err = peerStreamClient.ExchangeSecret(ctx, req)
require.NoError(t, err)
})
}

View File

@ -370,9 +370,9 @@ type Server struct {
// peerStreamServer is a server used to handle peering streams from external clusters.
peerStreamServer *peerstream.Server
// peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens.
peeringServer *peering.Server
peerStreamTracker *peerstream.Tracker
peeringServer *peering.Server
// embedded struct to hold all the enterprise specific data
EnterpriseServer
@ -724,11 +724,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
Logger: logger.Named("grpc-api.server-discovery"),
}).Register(s.externalGRPCServer)
s.peerStreamTracker = peerstream.NewTracker()
s.peeringBackend = NewPeeringBackend(s)
s.peerStreamServer = peerstream.NewServer(peerstream.Config{
Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
GetStore: func() peerstream.StateStore { return s.FSM().State() },
Logger: logger.Named("grpc-api.peerstream"),
ACLResolver: s.ACLResolver,
@ -790,7 +788,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler
p := peering.NewServer(peering.Config{
Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
Tracker: s.peerStreamServer.Tracker,
Logger: deps.Logger.Named("grpc-api.peering"),
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
// Only forward the request if the dc in the request matches the server's datacenter.
@ -816,6 +814,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler
// Note: these external gRPC services are also exposed on the internal server to
// enable RPC forwarding.
s.peerStreamServer.Register(srv)
s.externalACLServer.Register(srv)
s.externalConnectCAServer.Register(srv)
}
@ -1573,12 +1572,12 @@ func (s *Server) Stats() map[string]map[string]string {
// GetLANCoordinate returns the coordinate of the node in the LAN gossip
// pool.
//
// - Clients return a single coordinate for the single gossip pool they are
// in (default, segment, or partition).
// - Clients return a single coordinate for the single gossip pool they are
// in (default, segment, or partition).
//
// - Servers return one coordinate for their canonical gossip pool (i.e.
// default partition/segment) and one per segment they are also ancillary
// members of.
// - Servers return one coordinate for their canonical gossip pool (i.e.
// default partition/segment) and one per segment they are also ancillary
// members of.
//
// NOTE: servers do not emit coordinates for partitioned gossip pools they
// are ancillary members of.

View File

@ -159,3 +159,18 @@ func (s *Server) addEnterpriseStats(stats map[string]map[string]string) {
func getSerfMemberEnterpriseMeta(member serf.Member) *acl.EnterpriseMeta {
return structs.NodeEnterpriseMetaInDefaultPartition()
}
func addSerfMetricsLabels(conf *serf.Config, wan bool, segment string, partition string, areaID string) {
conf.MetricLabels = []metrics.Label{}
networkMetric := metrics.Label{
Name: "network",
}
if wan {
networkMetric.Value = "wan"
} else {
networkMetric.Value = "lan"
}
conf.MetricLabels = append(conf.MetricLabels, networkMetric)
}

View File

@ -8,6 +8,7 @@ import (
"strings"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/memberlist"
"github.com/hashicorp/raft"
@ -177,9 +178,10 @@ func (s *Server) setupSerfConfig(opts setupSerfOptions) (*serf.Config, error) {
if opts.WAN {
nt, err := memberlist.NewNetTransport(&memberlist.NetTransportConfig{
BindAddrs: []string{conf.MemberlistConfig.BindAddr},
BindPort: conf.MemberlistConfig.BindPort,
Logger: conf.MemberlistConfig.Logger,
BindAddrs: []string{conf.MemberlistConfig.BindAddr},
BindPort: conf.MemberlistConfig.BindPort,
Logger: conf.MemberlistConfig.Logger,
MetricLabels: []metrics.Label{{Name: "network", Value: "wan"}},
})
if err != nil {
return nil, err
@ -230,6 +232,8 @@ func (s *Server) setupSerfConfig(opts setupSerfOptions) (*serf.Config, error) {
conf.ReconnectTimeoutOverride = libserf.NewReconnectOverride(s.logger)
addSerfMetricsLabels(conf, opts.WAN, opts.Segment, s.config.AgentEnterpriseMeta().PartitionOrDefault(), "")
addEnterpriseSerfTags(conf.Tags, s.config.AgentEnterpriseMeta())
if s.config.OverrideInitialSerfTags != nil {

View File

@ -179,6 +179,7 @@ func testServerConfig(t *testing.T) (string, *Config) {
"IntermediateCertTTL": "288h",
},
}
config.PeeringEnabled = true
return dir, config
}

View File

@ -11,6 +11,7 @@ import (
"github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
@ -871,7 +872,7 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool
if svc.Kind == structs.ServiceKindTypical && svc.Service != "consul" {
// Check if this service is covered by a gateway's wildcard specifier, we force the service kind to a gateway-service here as that take precedence
sn := structs.NewServiceName(svc.Service, &svc.EnterpriseMeta)
if err = checkGatewayWildcardsAndUpdate(tx, idx, &sn, structs.GatewayServiceKindService); err != nil {
if err = checkGatewayWildcardsAndUpdate(tx, idx, &sn, svc, structs.GatewayServiceKindService); err != nil {
return fmt.Errorf("failed updating gateway mapping: %s", err)
}
if err = checkGatewayAndUpdate(tx, idx, &sn, structs.GatewayServiceKindService); err != nil {
@ -890,6 +891,15 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool
return fmt.Errorf("failed updating upstream/downstream association")
}
service := svc.Service
if svc.Kind == structs.ServiceKindConnectProxy {
service = svc.Proxy.DestinationServiceName
}
sn := structs.ServiceName{Name: service, EnterpriseMeta: svc.EnterpriseMeta}
if err = checkGatewayWildcardsAndUpdate(tx, idx, &sn, svc, structs.GatewayServiceKindService); err != nil {
return fmt.Errorf("failed updating gateway mapping: %s", err)
}
supported, err := virtualIPsSupported(tx, nil)
if err != nil {
return err
@ -897,12 +907,6 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool
// Update the virtual IP for the service
if supported {
service := svc.Service
if svc.Kind == structs.ServiceKindConnectProxy {
service = svc.Proxy.DestinationServiceName
}
sn := structs.ServiceName{Name: service, EnterpriseMeta: svc.EnterpriseMeta}
psn := structs.PeeredServiceName{Peer: svc.PeerName, ServiceName: sn}
vip, err := assignServiceVirtualIP(tx, idx, psn)
if err != nil {
@ -1130,7 +1134,7 @@ func terminatingGatewayVirtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool,
}
// Services returns all services along with a list of associated tags.
func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) {
func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, []*structs.ServiceNode, error) {
tx := s.db.Txn(false)
defer tx.Abort()
@ -1144,30 +1148,11 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerNam
}
ws.Add(services.WatchCh())
// Rip through the services and enumerate them and their unique set of
// tags.
unique := make(map[string]map[string]struct{})
var result []*structs.ServiceNode
for service := services.Next(); service != nil; service = services.Next() {
svc := service.(*structs.ServiceNode)
tags, ok := unique[svc.ServiceName]
if !ok {
unique[svc.ServiceName] = make(map[string]struct{})
tags = unique[svc.ServiceName]
}
for _, tag := range svc.ServiceTags {
tags[tag] = struct{}{}
}
result = append(result, service.(*structs.ServiceNode))
}
// Generate the output structure.
var results = make(structs.Services)
for service, tags := range unique {
results[service] = make([]string, 0, len(tags))
for tag := range tags {
results[service] = append(results[service], tag)
}
}
return idx, results, nil
return idx, result, nil
}
func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error) {
@ -1208,7 +1193,7 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta,
}
// ServicesByNodeMeta returns all services, filtered by the given node metadata.
func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) {
func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, []*structs.ServiceNode, error) {
tx := s.db.Txn(false)
defer tx.Abort()
@ -1255,8 +1240,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
}
allServicesCh := allServices.WatchCh()
// Populate the services map
unique := make(map[string]map[string]struct{})
var result structs.ServiceNodes
for node := nodes.Next(); node != nil; node = nodes.Next() {
n := node.(*structs.Node)
if len(filters) > 1 && !structs.SatisfiesMetaFilters(n.Meta, filters) {
@ -1270,30 +1254,11 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
}
ws.AddWithLimit(watchLimit, services.WatchCh(), allServicesCh)
// Rip through the services and enumerate them and their unique set of
// tags.
for service := services.Next(); service != nil; service = services.Next() {
svc := service.(*structs.ServiceNode)
tags, ok := unique[svc.ServiceName]
if !ok {
unique[svc.ServiceName] = make(map[string]struct{})
tags = unique[svc.ServiceName]
}
for _, tag := range svc.ServiceTags {
tags[tag] = struct{}{}
}
result = append(result, service.(*structs.ServiceNode))
}
}
// Generate the output structure.
var results = make(structs.Services)
for service, tags := range unique {
results[service] = make([]string, 0, len(tags))
for tag := range tags {
results[service] = append(results[service], tag)
}
}
return idx, results, nil
return idx, result, nil
}
// maxIndexForService return the maximum Raft Index for a service
@ -1713,6 +1678,9 @@ func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.Ent
if err != nil {
return 0, nil, fmt.Errorf("failed querying service for node %q: %w", node.Node, err)
}
if service != nil {
service.ID = node.ID
}
return idx, service, nil
}
@ -1984,11 +1952,6 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
if err := catalogUpdateServiceExtinctionIndex(tx, idx, entMeta, svc.PeerName); err != nil {
return err
}
if svc.PeerName == "" {
if err := cleanupGatewayWildcards(tx, idx, svc); err != nil {
return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", name.String(), err)
}
}
psn := structs.PeeredServiceName{Peer: svc.PeerName, ServiceName: name}
if err := freeServiceVirtualIP(tx, idx, psn, nil); err != nil {
return fmt.Errorf("failed to clean up virtual IP for %q: %v", name.String(), err)
@ -2001,6 +1964,13 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
return fmt.Errorf("Could not find any service %s: %s", svc.ServiceName, err)
}
if svc.PeerName == "" {
sn := structs.ServiceName{Name: svc.ServiceName, EnterpriseMeta: svc.EnterpriseMeta}
if err := cleanupGatewayWildcards(tx, idx, sn, false); err != nil {
return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", name.String(), err)
}
}
return nil
}
@ -3652,6 +3622,18 @@ func updateGatewayNamespace(tx WriteTxn, idx uint64, service *structs.GatewaySer
continue
}
hasConnectInstance, hasNonConnectInstance, err := serviceHasConnectInstances(tx, sn.ServiceName, entMeta)
if err != nil {
return err
}
if service.GatewayKind == structs.ServiceKindIngressGateway && !hasConnectInstance {
continue
}
if service.GatewayKind == structs.ServiceKindTerminatingGateway && !hasNonConnectInstance {
continue
}
existing, err := tx.First(tableGatewayServices, indexID, service.Gateway, sn.CompoundServiceName(), service.Port)
if err != nil {
return fmt.Errorf("gateway service lookup failed: %s", err)
@ -3717,6 +3699,38 @@ func updateGatewayNamespace(tx WriteTxn, idx uint64, service *structs.GatewaySer
return nil
}
// serviceHasConnectInstances returns whether the service has at least one connect instance,
// and at least one non-connect instance.
func serviceHasConnectInstances(tx WriteTxn, serviceName string, entMeta *acl.EnterpriseMeta) (bool, bool, error) {
hasConnectInstance := false
query := Query{
Value: serviceName,
EnterpriseMeta: *entMeta,
}
svc, err := tx.First(tableServices, indexConnect, query)
if err != nil {
return false, false, fmt.Errorf("failed service lookup: %s", err)
}
if svc != nil {
hasConnectInstance = true
}
hasNonConnectInstance := false
iter, err := tx.Get(tableServices, indexService, query)
if err != nil {
return false, false, fmt.Errorf("failed service lookup: %s", err)
}
for service := iter.Next(); service != nil; service = iter.Next() {
sn := service.(*structs.ServiceNode)
if !sn.ServiceConnect.Native {
hasNonConnectInstance = true
break
}
}
return hasConnectInstance, hasNonConnectInstance, nil
}
// updateGatewayService associates services with gateways after an eligible event
// ie. Registering a service in a namespace targeted by a gateway
func updateGatewayService(tx WriteTxn, idx uint64, mapping *structs.GatewayService) error {
@ -3754,14 +3768,35 @@ func updateGatewayService(tx WriteTxn, idx uint64, mapping *structs.GatewayServi
// checkWildcardForGatewaysAndUpdate checks whether a service matches a
// wildcard definition in gateway config entries and if so adds it the the
// gateway-services table.
func checkGatewayWildcardsAndUpdate(tx WriteTxn, idx uint64, svc *structs.ServiceName, kind structs.GatewayServiceKind) error {
func checkGatewayWildcardsAndUpdate(tx WriteTxn, idx uint64, svc *structs.ServiceName, ns *structs.NodeService, kind structs.GatewayServiceKind) error {
sn := structs.ServiceName{Name: structs.WildcardSpecifier, EnterpriseMeta: svc.EnterpriseMeta}
svcGateways, err := tx.Get(tableGatewayServices, indexService, sn)
if err != nil {
return fmt.Errorf("failed gateway lookup for %q: %s", svc.Name, err)
}
hasConnectInstance, hasNonConnectInstance, err := serviceHasConnectInstances(tx, svc.Name, &svc.EnterpriseMeta)
if err != nil {
return err
}
// If we were passed a NodeService, this might be the first registered instance of the service
// so we need to count it as either a connect or non-connect instance.
if ns != nil {
if ns.Connect.Native || ns.Kind == structs.ServiceKindConnectProxy {
hasConnectInstance = true
} else {
hasNonConnectInstance = true
}
}
for service := svcGateways.Next(); service != nil; service = svcGateways.Next() {
if wildcardSvc, ok := service.(*structs.GatewayService); ok && wildcardSvc != nil {
if wildcardSvc.GatewayKind == structs.ServiceKindIngressGateway && !hasConnectInstance {
continue
}
if wildcardSvc.GatewayKind == structs.ServiceKindTerminatingGateway && !hasNonConnectInstance && kind != structs.GatewayServiceKindDestination {
continue
}
// Copy the wildcard mapping and modify it
gatewaySvc := wildcardSvc.Clone()
@ -3803,12 +3838,11 @@ func checkGatewayAndUpdate(tx WriteTxn, idx uint64, svc *structs.ServiceName, ki
return nil
}
func cleanupGatewayWildcards(tx WriteTxn, idx uint64, svc *structs.ServiceNode) error {
func cleanupGatewayWildcards(tx WriteTxn, idx uint64, sn structs.ServiceName, cleaningUpDestination bool) error {
// Clean up association between service name and gateways if needed
sn := structs.ServiceName{Name: svc.ServiceName, EnterpriseMeta: svc.EnterpriseMeta}
gateways, err := tx.Get(tableGatewayServices, indexService, sn)
if err != nil {
return fmt.Errorf("failed gateway lookup for %q: %s", svc.ServiceName, err)
return fmt.Errorf("failed gateway lookup for %q: %s", sn.Name, err)
}
mappings := make([]*structs.GatewayService, 0)
@ -3818,12 +3852,44 @@ func cleanupGatewayWildcards(tx WriteTxn, idx uint64, svc *structs.ServiceNode)
}
}
// Check whether there are any connect or non-connect instances remaining for this service.
// If there are no connect instances left, ingress gateways with a wildcard entry can remove
// their association with it (same with terminating gateways if there are no non-connect
// instances left).
hasConnectInstance, hasNonConnectInstance, err := serviceHasConnectInstances(tx, sn.Name, &sn.EnterpriseMeta)
if err != nil {
return err
}
// If we're deleting a service instance but this service is defined as a destination via config entry,
// keep the mapping around.
hasDestination := false
if !cleaningUpDestination {
q := configentry.NewKindName(structs.ServiceDefaults, sn.Name, &sn.EnterpriseMeta)
existing, err := tx.First(tableConfigEntries, indexID, q)
if err != nil {
return fmt.Errorf("failed config entry lookup: %s", err)
}
if existing != nil {
if entry, ok := existing.(*structs.ServiceConfigEntry); ok && entry.Destination != nil {
hasDestination = true
}
}
}
// Do the updates in a separate loop so we don't trash the iterator.
for _, m := range mappings {
// Only delete if association was created by a wildcard specifier.
// Otherwise the service was specified in the config entry, and the association should be maintained
// for when the service is re-registered
if m.FromWildcard {
if m.GatewayKind == structs.ServiceKindIngressGateway && hasConnectInstance {
continue
}
if m.GatewayKind == structs.ServiceKindTerminatingGateway && (hasNonConnectInstance || hasDestination) {
continue
}
if err := tx.Delete(tableGatewayServices, m); err != nil {
return fmt.Errorf("failed to truncate gateway services table: %v", err)
}
@ -3836,7 +3902,7 @@ func cleanupGatewayWildcards(tx WriteTxn, idx uint64, svc *structs.ServiceNode)
} else {
kind, err := GatewayServiceKind(tx, m.Service.Name, &m.Service.EnterpriseMeta)
if err != nil {
return fmt.Errorf("failed to get gateway service kind for service %s: %v", svc.ServiceName, err)
return fmt.Errorf("failed to get gateway service kind for service %s: %v", sn.Name, err)
}
checkGatewayAndUpdate(tx, idx, &structs.ServiceName{Name: m.Service.Name, EnterpriseMeta: m.Service.EnterpriseMeta}, kind)
}

View File

@ -4,13 +4,16 @@ import (
"context"
crand "crypto/rand"
"fmt"
"github.com/hashicorp/consul/acl"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/hashicorp/consul/acl"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/assert"
@ -269,17 +272,20 @@ func TestStateStore_EnsureRegistration(t *testing.T) {
require.Equal(t, uint64(2), idx)
require.Equal(t, svcmap["redis1"], r)
exp := svcmap["redis1"].ToServiceNode("node1")
exp.ID = nodeID
// lookup service by node name
idx, sn, err := s.ServiceNode("", "node1", "redis1", nil, peerName)
require.NoError(t, err)
require.Equal(t, uint64(2), idx)
require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn)
require.Equal(t, exp, sn)
// lookup service by node ID
idx, sn, err = s.ServiceNode(string(nodeID), "", "redis1", nil, peerName)
require.NoError(t, err)
require.Equal(t, uint64(2), idx)
require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn)
require.Equal(t, exp, sn)
// lookup service by invalid node
_, _, err = s.ServiceNode("", "invalid-node", "redis1", nil, peerName)
@ -2101,10 +2107,13 @@ func TestStateStore_Services(t *testing.T) {
Address: "1.1.1.1",
Port: 1111,
}
ns1.EnterpriseMeta.Normalize()
if err := s.EnsureService(2, "node1", ns1); err != nil {
t.Fatalf("err: %s", err)
}
testRegisterService(t, s, 3, "node1", "dogs")
ns1Dogs := testRegisterService(t, s, 3, "node1", "dogs")
ns1Dogs.EnterpriseMeta.Normalize()
testRegisterNode(t, s, 4, "node2")
ns2 := &structs.NodeService{
ID: "service3",
@ -2113,6 +2122,7 @@ func TestStateStore_Services(t *testing.T) {
Address: "1.1.1.1",
Port: 1111,
}
ns2.EnterpriseMeta.Normalize()
if err := s.EnsureService(5, "node2", ns2); err != nil {
t.Fatalf("err: %s", err)
}
@ -2130,19 +2140,13 @@ func TestStateStore_Services(t *testing.T) {
t.Fatalf("bad index: %d", idx)
}
// Verify the result. We sort the lists since the order is
// non-deterministic (it's built using a map internally).
expected := structs.Services{
"redis": []string{"prod", "primary", "replica"},
"dogs": []string{},
}
sort.Strings(expected["redis"])
for _, tags := range services {
sort.Strings(tags)
}
if !reflect.DeepEqual(expected, services) {
t.Fatalf("bad: %#v", services)
// Verify the result.
expected := []*structs.ServiceNode{
ns1Dogs.ToServiceNode("node1"),
ns1.ToServiceNode("node1"),
ns2.ToServiceNode("node2"),
}
assertDeepEqual(t, expected, services, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
// Deleting a node with a service should fire the watch.
if err := s.DeleteNode(6, "node1", nil, ""); err != nil {
@ -2181,6 +2185,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
Address: "1.1.1.1",
Port: 1111,
}
ns1.EnterpriseMeta.Normalize()
if err := s.EnsureService(2, "node0", ns1); err != nil {
t.Fatalf("err: %s", err)
}
@ -2191,6 +2196,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
Address: "1.1.1.1",
Port: 1111,
}
ns2.EnterpriseMeta.Normalize()
if err := s.EnsureService(3, "node1", ns2); err != nil {
t.Fatalf("err: %s", err)
}
@ -2205,11 +2211,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
expected := structs.Services{
"redis": []string{"primary", "prod"},
expected := []*structs.ServiceNode{
ns1.ToServiceNode("node0"),
}
sort.Strings(res["redis"])
require.Equal(t, expected, res)
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
})
t.Run("Get all services using the common meta value", func(t *testing.T) {
@ -2217,11 +2222,12 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
expected := structs.Services{
"redis": []string{"primary", "prod", "replica"},
require.Len(t, res, 2)
expected := []*structs.ServiceNode{
ns1.ToServiceNode("node0"),
ns2.ToServiceNode("node1"),
}
sort.Strings(res["redis"])
require.Equal(t, expected, res)
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
})
t.Run("Get an empty list for an invalid meta value", func(t *testing.T) {
@ -2229,8 +2235,8 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
expected := structs.Services{}
require.Equal(t, expected, res)
var expected []*structs.ServiceNode
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
})
t.Run("Get the first node's service instance using multiple meta filters", func(t *testing.T) {
@ -2238,11 +2244,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
expected := structs.Services{
"redis": []string{"primary", "prod"},
expected := []*structs.ServiceNode{
ns1.ToServiceNode("node0"),
}
sort.Strings(res["redis"])
require.Equal(t, expected, res)
assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
})
t.Run("Registering some unrelated node + service should not fire the watch.", func(t *testing.T) {
@ -5337,13 +5342,70 @@ func TestStateStore_GatewayServices_Terminating(t *testing.T) {
}
assert.Equal(t, expect, out)
// Add a destination via config entry and make sure it's picked up by the wildcard.
configEntryDest := &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "destination1",
Destination: &structs.DestinationConfig{Port: 9000, Addresses: []string{"kafka.test.com"}},
}
assert.NoError(t, s.EnsureConfigEntry(27, configEntryDest))
idx, out, err = s.GatewayServices(ws, "gateway2", nil)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(27))
assert.Len(t, out, 3)
expectWildcardIncludesDest := structs.GatewayServices{
{
Service: structs.NewServiceName("api", nil),
Gateway: structs.NewServiceName("gateway2", nil),
GatewayKind: structs.ServiceKindTerminatingGateway,
FromWildcard: true,
RaftIndex: structs.RaftIndex{
CreateIndex: 26,
ModifyIndex: 26,
},
},
{
Service: structs.NewServiceName("db", nil),
Gateway: structs.NewServiceName("gateway2", nil),
GatewayKind: structs.ServiceKindTerminatingGateway,
FromWildcard: true,
RaftIndex: structs.RaftIndex{
CreateIndex: 26,
ModifyIndex: 26,
},
},
{
Service: structs.NewServiceName("destination1", nil),
Gateway: structs.NewServiceName("gateway2", nil),
GatewayKind: structs.ServiceKindTerminatingGateway,
ServiceKind: structs.GatewayServiceKindDestination,
FromWildcard: true,
RaftIndex: structs.RaftIndex{
CreateIndex: 27,
ModifyIndex: 27,
},
},
}
assert.ElementsMatch(t, expectWildcardIncludesDest, out)
// Delete the destination.
assert.NoError(t, s.DeleteConfigEntry(28, structs.ServiceDefaults, "destination1", nil))
idx, out, err = s.GatewayServices(ws, "gateway2", nil)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(28))
assert.Len(t, out, 2)
assert.Equal(t, expect, out)
// Deleting the config entry should remove existing mappings
assert.Nil(t, s.DeleteConfigEntry(27, "terminating-gateway", "gateway", nil))
assert.Nil(t, s.DeleteConfigEntry(29, "terminating-gateway", "gateway", nil))
assert.True(t, watchFired(ws))
idx, out, err = s.GatewayServices(ws, "gateway", nil)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(27))
assert.Equal(t, idx, uint64(29))
assert.Len(t, out, 0)
}
@ -5753,6 +5815,10 @@ func TestStateStore_GatewayServices_ServiceDeletion(t *testing.T) {
assert.Nil(t, s.EnsureService(13, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
assert.Nil(t, s.EnsureService(14, "foo", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
// Connect services (should be ignored by terminating gateway)
assert.Nil(t, s.EnsureService(15, "foo", &structs.NodeService{ID: "web", Service: "web", Tags: nil, Address: "", Connect: structs.ServiceConnect{Native: true}, Port: 5000}))
assert.Nil(t, s.EnsureService(16, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Connect: structs.ServiceConnect{Native: true}, Port: 5000}))
// Register two gateways
assert.Nil(t, s.EnsureService(17, "bar", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443}))
assert.Nil(t, s.EnsureService(18, "baz", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "other-gateway", Service: "other-gateway", Port: 443}))
@ -5895,6 +5961,16 @@ func TestStateStore_GatewayServices_ServiceDeletion(t *testing.T) {
},
}
assert.Equal(t, expect, out)
// Delete the non-connect instance of api
assert.Nil(t, s.DeleteService(21, "foo", "api", nil, ""))
// Gateway with wildcard entry should have no services left, because the last
// non-connect instance of 'api' was deleted.
idx, out, err = s.GatewayServices(ws, "other-gateway", nil)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(21))
assert.Empty(t, out)
}
func TestStateStore_CheckIngressServiceNodes(t *testing.T) {
@ -5904,7 +5980,7 @@ func TestStateStore_CheckIngressServiceNodes(t *testing.T) {
t.Run("check service1 ingress gateway", func(t *testing.T) {
idx, results, err := s.CheckIngressServiceNodes(ws, "service1", nil)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
require.Equal(t, uint64(18), idx)
// Multiple instances of the ingress2 service
require.Len(t, results, 4)
@ -5923,7 +5999,7 @@ func TestStateStore_CheckIngressServiceNodes(t *testing.T) {
t.Run("check service2 ingress gateway", func(t *testing.T) {
idx, results, err := s.CheckIngressServiceNodes(ws, "service2", nil)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
require.Equal(t, uint64(18), idx)
require.Len(t, results, 2)
ids := make(map[string]struct{})
@ -5941,7 +6017,7 @@ func TestStateStore_CheckIngressServiceNodes(t *testing.T) {
ws := memdb.NewWatchSet()
idx, results, err := s.CheckIngressServiceNodes(ws, "service3", nil)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
require.Equal(t, uint64(18), idx)
require.Len(t, results, 1)
require.Equal(t, "wildcardIngress", results[0].Service.ID)
})
@ -5952,17 +6028,17 @@ func TestStateStore_CheckIngressServiceNodes(t *testing.T) {
idx, results, err := s.CheckIngressServiceNodes(ws, "service1", nil)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
require.Equal(t, uint64(18), idx)
require.Len(t, results, 3)
idx, results, err = s.CheckIngressServiceNodes(ws, "service2", nil)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
require.Equal(t, uint64(18), idx)
require.Len(t, results, 1)
idx, results, err = s.CheckIngressServiceNodes(ws, "service3", nil)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
require.Equal(t, uint64(18), idx)
// TODO(ingress): index goes backward when deleting last config entry
// require.Equal(t,uint64(11), idx)
require.Len(t, results, 0)
@ -6305,23 +6381,80 @@ func TestStateStore_GatewayServices_WildcardAssociation(t *testing.T) {
})
t.Run("do not associate connect-proxy services with gateway", func(t *testing.T) {
// Should only associate web (the destination service of the proxy), not the
// sidecar service name itself.
testRegisterSidecarProxy(t, s, 19, "node1", "web")
require.False(t, watchFired(ws))
expected := structs.GatewayServices{
{
Gateway: structs.NewServiceName("wildcardIngress", nil),
Service: structs.NewServiceName("service1", nil),
GatewayKind: structs.ServiceKindIngressGateway,
Port: 4444,
Protocol: "http",
FromWildcard: true,
RaftIndex: structs.RaftIndex{
CreateIndex: 12,
ModifyIndex: 12,
},
},
{
Gateway: structs.NewServiceName("wildcardIngress", nil),
Service: structs.NewServiceName("service2", nil),
GatewayKind: structs.ServiceKindIngressGateway,
Port: 4444,
Protocol: "http",
FromWildcard: true,
RaftIndex: structs.RaftIndex{
CreateIndex: 12,
ModifyIndex: 12,
},
},
{
Gateway: structs.NewServiceName("wildcardIngress", nil),
Service: structs.NewServiceName("service3", nil),
GatewayKind: structs.ServiceKindIngressGateway,
Port: 4444,
Protocol: "http",
FromWildcard: true,
RaftIndex: structs.RaftIndex{
CreateIndex: 12,
ModifyIndex: 12,
},
},
{
Gateway: structs.NewServiceName("wildcardIngress", nil),
Service: structs.NewServiceName("web", nil),
ServiceKind: structs.GatewayServiceKindService,
GatewayKind: structs.ServiceKindIngressGateway,
Port: 4444,
Protocol: "http",
FromWildcard: true,
RaftIndex: structs.RaftIndex{
CreateIndex: 19,
ModifyIndex: 19,
},
},
}
idx, results, err := s.GatewayServices(ws, "wildcardIngress", nil)
require.NoError(t, err)
require.Equal(t, uint64(16), idx)
require.Len(t, results, 3)
require.Equal(t, uint64(19), idx)
require.ElementsMatch(t, results, expected)
})
t.Run("do not associate consul services with gateway", func(t *testing.T) {
ws := memdb.NewWatchSet()
_, _, err := s.GatewayServices(ws, "wildcardIngress", nil)
require.NoError(t, err)
require.Nil(t, s.EnsureService(20, "node1",
&structs.NodeService{ID: "consul", Service: "consul", Tags: nil},
))
require.False(t, watchFired(ws))
idx, results, err := s.GatewayServices(ws, "wildcardIngress", nil)
require.NoError(t, err)
require.Equal(t, uint64(16), idx)
require.Len(t, results, 3)
require.Equal(t, uint64(19), idx)
require.Len(t, results, 4)
})
}
@ -6346,8 +6479,8 @@ func TestStateStore_GatewayServices_IngressProtocolFiltering(t *testing.T) {
}
testRegisterNode(t, s, 0, "node1")
testRegisterService(t, s, 1, "node1", "service1")
testRegisterService(t, s, 2, "node1", "service2")
testRegisterConnectService(t, s, 1, "node1", "service1")
testRegisterConnectService(t, s, 2, "node1", "service2")
assert.NoError(t, s.EnsureConfigEntry(4, ingress1))
})
@ -6510,15 +6643,25 @@ func setupIngressState(t *testing.T, s *Store) memdb.WatchSet {
testRegisterNode(t, s, 0, "node1")
testRegisterNode(t, s, 1, "node2")
// Register a service against the nodes.
// Register some connect services against the nodes.
testRegisterIngressService(t, s, 3, "node1", "wildcardIngress")
testRegisterIngressService(t, s, 4, "node1", "ingress1")
testRegisterIngressService(t, s, 5, "node1", "ingress2")
testRegisterIngressService(t, s, 6, "node2", "ingress2")
testRegisterIngressService(t, s, 7, "node1", "nothingIngress")
testRegisterService(t, s, 8, "node1", "service1")
testRegisterService(t, s, 9, "node2", "service2")
testRegisterConnectService(t, s, 8, "node1", "service1")
testRegisterConnectService(t, s, 9, "node2", "service2")
testRegisterService(t, s, 10, "node2", "service3")
testRegisterServiceWithChangeOpts(t, s, 11, "node2", "service3-proxy", false, func(service *structs.NodeService) {
service.Kind = structs.ServiceKindConnectProxy
service.Proxy = structs.ConnectProxyConfig{
DestinationServiceName: "service3",
}
})
// Register some non-connect services - these shouldn't be picked up by a wildcard.
testRegisterService(t, s, 17, "node1", "service4")
testRegisterService(t, s, 18, "node2", "service5")
// Default protocol to http
proxyDefaults := &structs.ProxyConfigEntry{
@ -7883,6 +8026,7 @@ func TestCatalog_upstreamsFromRegistration_Ingress(t *testing.T) {
Address: "127.0.0.3",
Port: 443,
EnterpriseMeta: *defaultMeta,
Connect: structs.ServiceConnect{Native: true},
}
require.NoError(t, s.EnsureService(5, "foo", &svc))
assert.True(t, watchFired(ws))
@ -8667,3 +8811,10 @@ func setVirtualIPFlags(t *testing.T, s *Store) {
Value: "true",
}))
}
func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) {
t.Helper()
if diff := cmp.Diff(x, y, opts...); diff != "" {
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
}
}

View File

@ -371,9 +371,12 @@ func deleteConfigEntryTxn(tx WriteTxn, idx uint64, kind, name string, entMeta *a
gsKind = structs.GatewayServiceKindUnknown
}
serviceName := structs.NewServiceName(c.GetName(), c.GetEnterpriseMeta())
if err := checkGatewayWildcardsAndUpdate(tx, idx, &serviceName, gsKind); err != nil {
if err := checkGatewayWildcardsAndUpdate(tx, idx, &serviceName, nil, gsKind); err != nil {
return fmt.Errorf("failed updating gateway mapping: %s", err)
}
if err := cleanupGatewayWildcards(tx, idx, serviceName, true); err != nil {
return fmt.Errorf("failed to cleanup gateway mapping: \"%s\"; err: %v", serviceName, err)
}
if err := checkGatewayAndUpdate(tx, idx, &serviceName, gsKind); err != nil {
return fmt.Errorf("failed updating gateway mapping: %s", err)
}
@ -434,7 +437,7 @@ func insertConfigEntryWithTxn(tx WriteTxn, idx uint64, conf structs.ConfigEntry)
if err != nil {
return fmt.Errorf("failed updating gateway mapping: %s", err)
}
if err := checkGatewayWildcardsAndUpdate(tx, idx, &sn, gsKind); err != nil {
if err := checkGatewayWildcardsAndUpdate(tx, idx, &sn, nil, gsKind); err != nil {
return fmt.Errorf("failed updating gateway mapping: %s", err)
}
if err := checkGatewayAndUpdate(tx, idx, &sn, gsKind); err != nil {

View File

@ -372,7 +372,7 @@ func TestStore_ServiceDefaults_Kind_Destination(t *testing.T) {
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 1)
require.Equal(t, gatewayServices[0].ServiceKind, structs.GatewayServiceKindUnknown)
require.Equal(t, structs.GatewayServiceKindUnknown, gatewayServices[0].ServiceKind)
_, kindServices, err = s.ServiceNamesOfKind(ws, structs.ServiceKindDestination)
require.NoError(t, err)
@ -710,13 +710,141 @@ func TestStore_ServiceDefaults_Kind_Destination_Wildcard(t *testing.T) {
require.NoError(t, s.DeleteConfigEntry(6, structs.ServiceDefaults, destination.Name, &destination.EnterpriseMeta))
//Watch is fired because we transitioned to a destination, by default we assume it's not.
// Watch is fired because we deleted the destination - now the mapping should be gone.
require.True(t, watchFired(ws))
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 1)
require.Equal(t, gatewayServices[0].ServiceKind, structs.GatewayServiceKindUnknown)
require.Len(t, gatewayServices, 0)
t.Run("delete service instance before config entry", func(t *testing.T) {
// Set up a service with both a real instance and destination from a config entry.
require.NoError(t, s.EnsureNode(7, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
require.NoError(t, s.EnsureService(8, "foo", &structs.NodeService{ID: "dest2", Service: "dest2", Tags: nil, Address: "", Port: 5000}))
ws = memdb.NewWatchSet()
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 1)
require.Equal(t, structs.GatewayServiceKindService, gatewayServices[0].ServiceKind)
// Register destination; shouldn't change the gateway mapping.
destination2 := &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "dest2",
Destination: &structs.DestinationConfig{},
}
require.NoError(t, s.EnsureConfigEntry(9, destination2))
require.False(t, watchFired(ws))
ws = memdb.NewWatchSet()
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 1)
expected := structs.GatewayServices{
{
Service: structs.NewServiceName("dest2", nil),
Gateway: structs.NewServiceName("Gtwy1", nil),
ServiceKind: structs.GatewayServiceKindService,
GatewayKind: structs.ServiceKindTerminatingGateway,
FromWildcard: true,
RaftIndex: structs.RaftIndex{
CreateIndex: 8,
ModifyIndex: 8,
},
},
}
require.Equal(t, expected, gatewayServices)
// Delete the service, mapping should still exist.
require.NoError(t, s.DeleteService(10, "foo", "dest2", nil, ""))
require.False(t, watchFired(ws))
ws = memdb.NewWatchSet()
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 1)
require.Equal(t, expected, gatewayServices)
// Delete the config entry, mapping should be gone.
require.NoError(t, s.DeleteConfigEntry(11, structs.ServiceDefaults, "dest2", &destination.EnterpriseMeta))
require.True(t, watchFired(ws))
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Empty(t, gatewayServices)
})
t.Run("delete config entry before service instance", func(t *testing.T) {
// Set up a service with both a real instance and destination from a config entry.
destination2 := &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "dest2",
Destination: &structs.DestinationConfig{},
}
require.NoError(t, s.EnsureConfigEntry(7, destination2))
ws = memdb.NewWatchSet()
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 1)
expected := structs.GatewayServices{
{
Service: structs.NewServiceName("dest2", nil),
Gateway: structs.NewServiceName("Gtwy1", nil),
ServiceKind: structs.GatewayServiceKindDestination,
GatewayKind: structs.ServiceKindTerminatingGateway,
FromWildcard: true,
RaftIndex: structs.RaftIndex{
CreateIndex: 7,
ModifyIndex: 7,
},
},
}
require.Equal(t, expected, gatewayServices)
// Register service, only ServiceKind should have changed on the gateway mapping.
require.NoError(t, s.EnsureNode(8, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
require.NoError(t, s.EnsureService(9, "foo", &structs.NodeService{ID: "dest2", Service: "dest2", Tags: nil, Address: "", Port: 5000}))
require.True(t, watchFired(ws))
ws = memdb.NewWatchSet()
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 1)
expected = structs.GatewayServices{
{
Service: structs.NewServiceName("dest2", nil),
Gateway: structs.NewServiceName("Gtwy1", nil),
ServiceKind: structs.GatewayServiceKindService,
GatewayKind: structs.ServiceKindTerminatingGateway,
FromWildcard: true,
RaftIndex: structs.RaftIndex{
CreateIndex: 7,
ModifyIndex: 9,
},
},
}
require.Equal(t, expected, gatewayServices)
// Delete the config entry, mapping should still exist.
require.NoError(t, s.DeleteConfigEntry(10, structs.ServiceDefaults, "dest2", &destination.EnterpriseMeta))
require.False(t, watchFired(ws))
ws = memdb.NewWatchSet()
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 1)
require.Equal(t, expected, gatewayServices)
// Delete the service, mapping should be gone.
require.NoError(t, s.DeleteService(11, "foo", "dest2", nil, ""))
require.True(t, watchFired(ws))
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Empty(t, gatewayServices)
})
}
func TestStore_Service_TerminatingGateway_Kind_Service_Wildcard(t *testing.T) {
@ -774,74 +902,6 @@ func TestStore_Service_TerminatingGateway_Kind_Service_Wildcard(t *testing.T) {
require.Len(t, gatewayServices, 0)
}
func TestStore_Service_TerminatingGateway_Kind_Service_Destination_Wildcard(t *testing.T) {
s := testConfigStateStore(t)
Gtwy := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "Gtwy1",
Services: []structs.LinkedService{
{
Name: "*",
},
},
}
// Create
require.NoError(t, s.EnsureConfigEntry(0, Gtwy))
service := &structs.NodeService{
Kind: structs.ServiceKindTypical,
Service: "web",
}
destination := &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "web",
Destination: &structs.DestinationConfig{},
}
_, gatewayServices, err := s.GatewayServices(nil, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 0)
ws := memdb.NewWatchSet()
_, _, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
// Create
require.NoError(t, s.EnsureConfigEntry(0, destination))
_, gatewayServices, err = s.GatewayServices(nil, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 1)
require.Equal(t, gatewayServices[0].ServiceKind, structs.GatewayServiceKindDestination)
require.NoError(t, s.EnsureNode(0, &structs.Node{Node: "node1"}))
require.NoError(t, s.EnsureService(0, "node1", service))
//Watch is fired because we transitioned to a destination, by default we assume it's not.
require.True(t, watchFired(ws))
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 1)
require.Equal(t, gatewayServices[0].ServiceKind, structs.GatewayServiceKindService)
ws = memdb.NewWatchSet()
_, _, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.NoError(t, s.DeleteService(6, "node1", service.ID, &service.EnterpriseMeta, ""))
//Watch is fired because we transitioned to a destination, by default we assume it's not.
require.True(t, watchFired(ws))
_, gatewayServices, err = s.GatewayServices(ws, "Gtwy1", nil)
require.NoError(t, err)
require.Len(t, gatewayServices, 0)
}
func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
ensureConfigEntry := func(s *Store, idx uint64, entry structs.ConfigEntry) error {
if err := entry.Normalize(); err != nil {

View File

@ -175,36 +175,47 @@ func peeringSecretsReadByPeerIDTxn(tx ReadTxn, ws memdb.WatchSet, id string) (*p
return secret, nil
}
func (s *Store) PeeringSecretsWrite(idx uint64, secret *pbpeering.PeeringSecrets) error {
func (s *Store) PeeringSecretsWrite(idx uint64, req *pbpeering.SecretsWriteRequest) error {
tx := s.db.WriteTxn(idx)
defer tx.Abort()
if err := s.peeringSecretsWriteTxn(tx, secret); err != nil {
if err := s.peeringSecretsWriteTxn(tx, req); err != nil {
return fmt.Errorf("failed to write peering secret: %w", err)
}
return tx.Commit()
}
func (s *Store) peeringSecretsWriteTxn(tx WriteTxn, secret *pbpeering.PeeringSecrets) error {
if secret == nil {
func (s *Store) peeringSecretsWriteTxn(tx WriteTxn, req *pbpeering.SecretsWriteRequest) error {
if req == nil || req.Request == nil {
return nil
}
if err := secret.Validate(); err != nil {
return err
if err := req.Validate(); err != nil {
return fmt.Errorf("invalid secret write request: %w", err)
}
peering, err := peeringReadByIDTxn(tx, nil, secret.PeerID)
peering, err := peeringReadByIDTxn(tx, nil, req.PeerID)
if err != nil {
return fmt.Errorf("failed to read peering by id: %w", err)
}
if peering == nil {
return fmt.Errorf("unknown peering %q for secret", secret.PeerID)
return fmt.Errorf("unknown peering %q for secret", req.PeerID)
}
// If the peering came from a peering token no validation is done for the given secrets.
// Dialing peers do not need to validate uniqueness because the secrets were generated elsewhere.
if peering.ShouldDial() {
if err := tx.Insert(tablePeeringSecrets, secret); err != nil {
r, ok := req.Request.(*pbpeering.SecretsWriteRequest_Establish)
if !ok {
return fmt.Errorf("invalid request type %T when persisting stream secret for dialing peer", req.Request)
}
secrets := pbpeering.PeeringSecrets{
PeerID: req.PeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: r.Establish.ActiveStreamSecret,
},
}
if err := tx.Insert(tablePeeringSecrets, &secrets); err != nil {
return fmt.Errorf("failed inserting peering: %w", err)
}
return nil
@ -213,21 +224,16 @@ func (s *Store) peeringSecretsWriteTxn(tx WriteTxn, secret *pbpeering.PeeringSec
// If the peering token was generated locally, validate that the newly introduced UUID is still unique.
// RPC handlers validate that generated IDs are available, but availability cannot be guaranteed until the state store operation.
var newSecretID string
switch {
// Establishment secrets are written when generating peering tokens, and no other secret IDs are included.
case secret.GetEstablishment() != nil:
newSecretID = secret.GetEstablishment().SecretID
// Stream secrets can be written as:
// - A new PendingSecretID from the ExchangeSecret RPC
// - An ActiveSecretID when promoting a pending secret on first use
case secret.GetStream() != nil:
if pending := secret.GetStream().GetPendingSecretID(); pending != "" {
newSecretID = pending
}
switch r := req.Request.(type) {
// We do not need to check the long-lived Stream.ActiveSecretID for uniqueness because:
// - In the cluster that generated it the secret is always introduced as a PendingSecretID, then promoted to ActiveSecretID.
// This means that the promoted secret is already known to be unique.
// Establishment secrets are written when generating peering tokens, and no other secret IDs are included.
case *pbpeering.SecretsWriteRequest_GenerateToken:
newSecretID = r.GenerateToken.EstablishmentSecret
// When exchanging an establishment secret a new pending stream secret is generated.
// Active stream secrets doesn't need to be checked for uniqueness because it is only ever promoted from pending.
case *pbpeering.SecretsWriteRequest_ExchangeSecret:
newSecretID = r.ExchangeSecret.PendingStreamSecret
}
if newSecretID != "" {
@ -244,53 +250,106 @@ func (s *Store) peeringSecretsWriteTxn(tx WriteTxn, secret *pbpeering.PeeringSec
}
}
existing, err := peeringSecretsReadByPeerIDTxn(tx, nil, secret.PeerID)
existing, err := peeringSecretsReadByPeerIDTxn(tx, nil, req.PeerID)
if err != nil {
return err
}
secrets := pbpeering.PeeringSecrets{
PeerID: req.PeerID,
}
var toDelete []string
if existing != nil {
// Collect any overwritten UUIDs for deletion.
switch r := req.Request.(type) {
case *pbpeering.SecretsWriteRequest_GenerateToken:
// Store the newly-generated establishment secret, overwriting any that existed.
secrets.Establishment = &pbpeering.PeeringSecrets_Establishment{
SecretID: r.GenerateToken.GetEstablishmentSecret(),
}
// Merge in existing stream secrets when persisting a new establishment secret.
// This is to avoid invalidating stream secrets when a new peering token
// is generated.
//
// We purposely DO NOT do the reverse of inheriting an existing establishment secret.
// When exchanging establishment secrets for stream secrets, we invalidate the
// establishment secret by deleting it.
if secret.GetEstablishment() != nil && secret.GetStream() == nil && existing.GetStream() != nil {
secret.Stream = existing.Stream
}
secrets.Stream = existing.GetStream()
// Collect any overwritten UUIDs for deletion.
//
// Old establishment secret ID are always cleaned up when they don't match.
// They will either be replaced by a new one or deleted in the secret exchange RPC.
existingEstablishment := existing.GetEstablishment().GetSecretID()
if existingEstablishment != "" && existingEstablishment != secret.GetEstablishment().GetSecretID() {
// When a new token is generated we replace any un-used establishment secrets.
if existingEstablishment := existing.GetEstablishment().GetSecretID(); existingEstablishment != "" {
toDelete = append(toDelete, existingEstablishment)
}
// Old active secret IDs are always cleaned up when they don't match.
// They are only ever replaced when promoting a pending secret ID.
existingActive := existing.GetStream().GetActiveSecretID()
if existingActive != "" && existingActive != secret.GetStream().GetActiveSecretID() {
case *pbpeering.SecretsWriteRequest_ExchangeSecret:
if existing == nil {
return fmt.Errorf("cannot exchange peering secret: no known secrets for peering")
}
// Store the newly-generated pending stream secret, overwriting any that existed.
secrets.Stream = &pbpeering.PeeringSecrets_Stream{
PendingSecretID: r.ExchangeSecret.GetPendingStreamSecret(),
// Avoid invalidating existing active secrets when exchanging establishment secret for pending.
ActiveSecretID: existing.GetStream().GetActiveSecretID(),
}
// When exchanging an establishment secret we invalidate the existing establishment secret.
existingEstablishment := existing.GetEstablishment().GetSecretID()
switch {
case existingEstablishment == "":
// When there is no existing establishment secret we must not proceed because another ExchangeSecret
// RPC already invalidated it. Otherwise, this operation would overwrite the pending secret
// from the previous ExchangeSecret.
return fmt.Errorf("invalid establishment secret: peering was already established")
case existingEstablishment != r.ExchangeSecret.GetEstablishmentSecret():
// If there is an existing establishment secret but it is not the one from the request then
// we must not proceed because a newer one was generated.
return fmt.Errorf("invalid establishment secret")
default:
toDelete = append(toDelete, existingEstablishment)
}
// When exchanging an establishment secret unused pending secrets are overwritten.
if existingPending := existing.GetStream().GetPendingSecretID(); existingPending != "" {
toDelete = append(toDelete, existingPending)
}
case *pbpeering.SecretsWriteRequest_PromotePending:
if existing == nil {
return fmt.Errorf("cannot promote pending secret: no known secrets for peering")
}
if existing.GetStream().GetPendingSecretID() != r.PromotePending.GetActiveStreamSecret() {
// There is a potential race if multiple dialing clusters send an Open request with a valid
// pending secret. The secret could be validated for all concurrently at the RPC layer,
// but then the pending secret is promoted or otherwise changes for one dialer before the others.
return fmt.Errorf("invalid pending stream secret")
}
// Store the newly-generated pending stream secret, overwriting any that existed.
secrets.Stream = &pbpeering.PeeringSecrets_Stream{
// Promoting a pending secret moves it to active.
PendingSecretID: "",
// Store the newly-promoted pending secret as the active secret.
ActiveSecretID: r.PromotePending.GetActiveStreamSecret(),
}
// Avoid invalidating existing establishment secrets when promoting pending secrets.
secrets.Establishment = existing.GetEstablishment()
// If there was previously an active stream secret it gets replaced in favor of the pending secret
// that is being promoted.
if existingActive := existing.GetStream().GetActiveSecretID(); existingActive != "" {
toDelete = append(toDelete, existingActive)
}
// Pending secrets can change in three ways:
// - Generating a new pending secret: Nothing to delete here since there's no old pending secret being replaced.
// - Re-establishing a peering, and re-generating a pending secret: should delete the old one if both are non-empty.
// - Promoting a pending secret: Nothing to delete here since the pending secret is now active and still in use.
existingPending := existing.GetStream().GetPendingSecretID()
newPending := secret.GetStream().GetPendingSecretID()
if existingPending != "" &&
// The value of newPending indicates whether a peering is being generated/re-established (not empty)
// or whether a pending secret is being promoted (empty).
newPending != "" &&
newPending != existingPending {
toDelete = append(toDelete, existingPending)
}
case *pbpeering.SecretsWriteRequest_Establish:
// This should never happen. Dialing peers are the only ones that can call Establish,
// and the peering secrets for dialing peers should have been inserted earlier in the function.
return fmt.Errorf("an accepting peer should not have called Establish RPC")
default:
return fmt.Errorf("got unexpected request type: %T", req.Request)
}
for _, id := range toDelete {
if err := tx.Delete(tablePeeringSecretUUIDs, id); err != nil {
@ -298,23 +357,23 @@ func (s *Store) peeringSecretsWriteTxn(tx WriteTxn, secret *pbpeering.PeeringSec
}
}
if err := tx.Insert(tablePeeringSecrets, secret); err != nil {
if err := tx.Insert(tablePeeringSecrets, &secrets); err != nil {
return fmt.Errorf("failed inserting peering: %w", err)
}
return nil
}
func (s *Store) PeeringSecretsDelete(idx uint64, peerID string) error {
func (s *Store) PeeringSecretsDelete(idx uint64, peerID string, dialer bool) error {
tx := s.db.WriteTxn(idx)
defer tx.Abort()
if err := peeringSecretsDeleteTxn(tx, peerID); err != nil {
if err := peeringSecretsDeleteTxn(tx, peerID, dialer); err != nil {
return fmt.Errorf("failed to write peering secret: %w", err)
}
return tx.Commit()
}
func peeringSecretsDeleteTxn(tx WriteTxn, peerID string) error {
func peeringSecretsDeleteTxn(tx WriteTxn, peerID string, dialer bool) error {
secretRaw, err := tx.First(tablePeeringSecrets, indexID, peerID)
if err != nil {
return fmt.Errorf("failed to fetch secret for peering: %w", err)
@ -326,6 +385,11 @@ func peeringSecretsDeleteTxn(tx WriteTxn, peerID string) error {
return fmt.Errorf("failed to delete secret for peering: %w", err)
}
// Dialing peers do not track secrets in tablePeeringSecretUUIDs.
if dialer {
return nil
}
secrets, ok := secretRaw.(*pbpeering.PeeringSecrets)
if !ok {
return fmt.Errorf("invalid type %T", secretRaw)
@ -520,7 +584,7 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
// Ensure associated secrets are cleaned up when a peering is marked for deletion.
if req.Peering.State == pbpeering.PeeringState_DELETING {
if err := peeringSecretsDeleteTxn(tx, req.Peering.ID); err != nil {
if err := peeringSecretsDeleteTxn(tx, req.Peering.ID, req.Peering.ShouldDial()); err != nil {
return fmt.Errorf("failed to delete peering secrets: %w", err)
}
}
@ -532,7 +596,7 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
}
// Write any secrets generated with the peering.
err = s.peeringSecretsWriteTxn(tx, req.GetSecret())
err = s.peeringSecretsWriteTxn(tx, req.GetSecretsRequest())
if err != nil {
return fmt.Errorf("failed to write peering establishment secret: %w", err)
}
@ -918,7 +982,7 @@ func peeringsForServiceTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, en
if idx > maxIdx {
maxIdx = idx
}
if peering == nil || !peering.IsActive() {
if !peering.IsActive() {
continue
}
peerings = append(peerings, peering)
@ -1097,6 +1161,10 @@ func (s *Snapshot) PeeringTrustBundles() (memdb.ResultIterator, error) {
return s.tx.Get(tablePeeringTrustBundles, indexID)
}
func (s *Snapshot) PeeringSecrets() (memdb.ResultIterator, error) {
return s.tx.Get(tablePeeringSecrets, indexID)
}
func (r *Restore) Peering(p *pbpeering.Peering) error {
if err := r.tx.Insert(tablePeering, p); err != nil {
return fmt.Errorf("failed restoring peering: %w", err)
@ -1119,6 +1187,30 @@ func (r *Restore) PeeringTrustBundle(ptb *pbpeering.PeeringTrustBundle) error {
return nil
}
func (r *Restore) PeeringSecrets(p *pbpeering.PeeringSecrets) error {
if err := r.tx.Insert(tablePeeringSecrets, p); err != nil {
return fmt.Errorf("failed restoring peering secrets: %w", err)
}
var uuids []string
if establishment := p.GetEstablishment().GetSecretID(); establishment != "" {
uuids = append(uuids, establishment)
}
if pending := p.GetStream().GetPendingSecretID(); pending != "" {
uuids = append(uuids, pending)
}
if active := p.GetStream().GetActiveSecretID(); active != "" {
uuids = append(uuids, active)
}
for _, id := range uuids {
if err := r.tx.Insert(tablePeeringSecretUUIDs, id); err != nil {
return fmt.Errorf("failed restoring peering secret UUIDs: %w", err)
}
}
return nil
}
// peersForServiceTxn returns the names of all peers that a service is exported to.
func peersForServiceTxn(
tx ReadTxn,

View File

@ -58,7 +58,7 @@ func insertTestPeerings(t *testing.T, s *Store) {
require.NoError(t, tx.Commit())
}
func insertTestPeeringSecret(t *testing.T, s *Store, secret *pbpeering.PeeringSecrets) {
func insertTestPeeringSecret(t *testing.T, s *Store, secret *pbpeering.PeeringSecrets, dialer bool) {
t.Helper()
tx := s.db.WriteTxn(0)
@ -78,9 +78,12 @@ func insertTestPeeringSecret(t *testing.T, s *Store, secret *pbpeering.PeeringSe
uuids = append(uuids, active)
}
for _, id := range uuids {
err = tx.Insert(tablePeeringSecretUUIDs, id)
require.NoError(t, err)
// Dialing peers do not track secret UUIDs because they don't generate them.
if !dialer {
for _, id := range uuids {
err = tx.Insert(tablePeeringSecretUUIDs, id)
require.NoError(t, err)
}
}
require.NoError(t, tx.Commit())
@ -182,7 +185,7 @@ func TestStateStore_PeeringSecretsRead(t *testing.T) {
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testFooSecretID,
},
})
}, false)
type testcase struct {
name string
@ -233,24 +236,45 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
return resp
}
writeSeed := func(s *Store, req *pbpeering.PeeringWriteRequest) {
var (
testSecretOne = testUUID()
testSecretTwo = testUUID()
testSecretThree = testUUID()
testSecretFour = testUUID()
)
type testSeed struct {
peering *pbpeering.Peering
secrets *pbpeering.PeeringSecrets
}
type testcase struct {
name string
seed *testSeed
input *pbpeering.SecretsWriteRequest
expect *pbpeering.PeeringSecrets
expectUUIDs []string
expectErr string
}
writeSeed := func(s *Store, seed *testSeed) {
tx := s.db.WriteTxn(1)
defer tx.Abort()
if req.Peering != nil {
require.NoError(t, tx.Insert(tablePeering, req.Peering))
if seed.peering != nil {
require.NoError(t, tx.Insert(tablePeering, seed.peering))
}
if req.Secret != nil {
require.NoError(t, tx.Insert(tablePeeringSecrets, req.Secret))
if seed.secrets != nil {
require.NoError(t, tx.Insert(tablePeeringSecrets, seed.secrets))
var toInsert []string
if establishment := req.Secret.GetEstablishment().GetSecretID(); establishment != "" {
if establishment := seed.secrets.GetEstablishment().GetSecretID(); establishment != "" {
toInsert = append(toInsert, establishment)
}
if pending := req.Secret.GetStream().GetPendingSecretID(); pending != "" {
if pending := seed.secrets.GetStream().GetPendingSecretID(); pending != "" {
toInsert = append(toInsert, pending)
}
if active := req.Secret.GetStream().GetActiveSecretID(); active != "" {
if active := seed.secrets.GetStream().GetActiveSecretID(); active != "" {
toInsert = append(toInsert, active)
}
for _, id := range toInsert {
@ -261,20 +285,6 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
tx.Commit()
}
var (
testSecretOne = testUUID()
testSecretTwo = testUUID()
testSecretThree = testUUID()
)
type testcase struct {
name string
seed *pbpeering.PeeringWriteRequest
input *pbpeering.PeeringSecrets
expect *pbpeering.PeeringSecrets
expectUUIDs []string
expectErr string
}
run := func(t *testing.T, tc testcase) {
s := NewStateStore(nil)
@ -291,7 +301,7 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
require.NoError(t, err)
// Validate that we read what we expect
secrets, err := s.PeeringSecretsRead(nil, tc.input.PeerID)
secrets, err := s.PeeringSecretsRead(nil, tc.input.GetPeerID())
require.NoError(t, err)
require.NotNil(t, secrets)
prototest.AssertDeepEqual(t, tc.expect, secrets)
@ -301,40 +311,131 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
}
tcs := []testcase{
{
name: "missing peer id",
input: &pbpeering.PeeringSecrets{},
name: "missing peer id",
input: &pbpeering.SecretsWriteRequest{
Request: &pbpeering.SecretsWriteRequest_GenerateToken{},
},
expectErr: "missing peer ID",
},
{
name: "no secret IDs were embedded",
input: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
},
expectErr: "no secret IDs were embedded",
},
{
name: "unknown peer id",
input: &pbpeering.PeeringSecrets{
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testFooSecretID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testFooSecretID,
},
},
},
expectErr: "unknown peering",
},
{
name: "dialing peer does not track UUIDs",
seed: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
name: "no secret IDs were embedded when generating token",
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{},
},
expectErr: "missing secret ID",
},
{
name: "no secret IDs were embedded when establishing peering",
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_Establish{},
},
expectErr: "missing secret ID",
},
{
name: "no secret IDs were embedded when exchanging secret",
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{},
},
expectErr: "missing secret ID",
},
{
name: "no secret IDs were embedded when promoting pending secret",
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_PromotePending{},
},
expectErr: "missing secret ID",
},
{
name: "dialing peer invalid request type - generate token",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
PeerServerAddresses: []string{"10.0.0.1:5300"},
},
},
input: &pbpeering.PeeringSecrets{
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testFooSecretID,
// Dialing peer must only write secrets from Establish
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testFooSecretID,
},
},
},
expectErr: "invalid request type",
},
{
name: "dialing peer invalid request type - exchange secret",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
PeerServerAddresses: []string{"10.0.0.1:5300"},
},
},
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
// Dialing peer must only write secrets from Establish
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
PendingStreamSecret: testFooSecretID,
},
},
},
expectErr: "invalid request type",
},
{
name: "dialing peer invalid request type - promote pending",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
PeerServerAddresses: []string{"10.0.0.1:5300"},
},
},
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
// Dialing peer must only write secrets from Establish
Request: &pbpeering.SecretsWriteRequest_PromotePending{
PromotePending: &pbpeering.SecretsWriteRequest_PromotePendingRequest{
ActiveStreamSecret: testFooSecretID,
},
},
},
expectErr: "invalid request type",
},
{
name: "dialing peer does not track UUIDs",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
PeerServerAddresses: []string{"10.0.0.1:5300"},
},
},
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_Establish{
Establish: &pbpeering.SecretsWriteRequest_EstablishRequest{
ActiveStreamSecret: testFooSecretID,
},
},
},
expect: &pbpeering.PeeringSecrets{
@ -347,13 +448,13 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
expectUUIDs: []string{},
},
{
name: "generate new establishment secret",
seed: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
name: "generate new establishment secret when secrets already existed",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
Secret: &pbpeering.PeeringSecrets{
secrets: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: testSecretOne,
@ -361,10 +462,12 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
},
},
},
input: &pbpeering.PeeringSecrets{
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testSecretThree,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testSecretThree,
},
},
},
expect: &pbpeering.PeeringSecrets{
@ -381,24 +484,26 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
expectUUIDs: []string{testSecretOne, testSecretTwo, testSecretThree},
},
{
name: "replace establishment secret",
seed: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
name: "generate new token to replace establishment secret",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
Secret: &pbpeering.PeeringSecrets{
secrets: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testSecretOne,
},
},
},
input: &pbpeering.PeeringSecrets{
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
// Two replaces One
SecretID: testSecretTwo,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
// Two replaces One
EstablishmentSecret: testSecretTwo,
},
},
},
expect: &pbpeering.PeeringSecrets{
@ -410,46 +515,96 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
expectUUIDs: []string{testSecretTwo},
},
{
name: "generate new pending secret",
seed: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
name: "cannot exchange secret without existing secrets",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
// Do not seed an establishment secret.
},
input: &pbpeering.PeeringSecrets{
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: testSecretOne,
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
PendingStreamSecret: testSecretOne,
},
},
},
expect: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: testSecretOne,
},
},
expectUUIDs: []string{testSecretOne},
expectErr: "no known secrets for peering",
},
{
name: "replace pending secret",
seed: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
name: "cannot exchange secret without establishment secret",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
Secret: &pbpeering.PeeringSecrets{
secrets: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: testSecretOne,
},
},
},
input: &pbpeering.PeeringSecrets{
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
// Two replaces One
PendingSecretID: testSecretTwo,
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
// Attempt to replace One with Two
PendingStreamSecret: testSecretTwo,
},
},
},
expectErr: "peering was already established",
},
{
name: "cannot exchange secret without valid establishment secret",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testSecretOne,
},
},
},
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
// Given secret Three does not match One
EstablishmentSecret: testSecretThree,
PendingStreamSecret: testSecretTwo,
},
},
},
expectErr: "invalid establishment secret",
},
{
name: "exchange secret to generate new pending secret",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testSecretOne,
},
},
},
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
EstablishmentSecret: testSecretOne,
PendingStreamSecret: testSecretTwo,
},
},
},
expect: &pbpeering.PeeringSecrets{
@ -458,16 +613,101 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
PendingSecretID: testSecretTwo,
},
},
// Establishment secret testSecretOne is discarded when exchanging for a stream secret
expectUUIDs: []string{testSecretTwo},
},
{
name: "promote pending secret and delete active",
seed: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
name: "exchange secret replaces pending stream secret",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
Secret: &pbpeering.PeeringSecrets{
secrets: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testSecretFour,
},
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testSecretOne,
PendingSecretID: testSecretTwo,
},
},
},
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
EstablishmentSecret: testSecretFour,
// Three replaces two
PendingStreamSecret: testSecretThree,
},
},
},
expect: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
// Establishment secret is discarded in favor of new pending secret.
Stream: &pbpeering.PeeringSecrets_Stream{
// Active secret is not deleted until the new pending secret is promoted
ActiveSecretID: testSecretOne,
PendingSecretID: testSecretThree,
},
},
expectUUIDs: []string{testSecretOne, testSecretThree},
},
{
name: "cannot promote pending without existing secrets",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
// Do not seed a pending secret.
},
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_PromotePending{
PromotePending: &pbpeering.SecretsWriteRequest_PromotePendingRequest{
ActiveStreamSecret: testSecretOne,
},
},
},
expectErr: "no known secrets for peering",
},
{
name: "cannot promote pending without existing pending secret",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testSecretOne,
},
},
},
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_PromotePending{
PromotePending: &pbpeering.SecretsWriteRequest_PromotePendingRequest{
// Attempt to replace One with Two
ActiveStreamSecret: testSecretTwo,
},
},
},
expectErr: "invalid pending stream secret",
},
{
name: "cannot promote pending without valid pending secret",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: testSecretTwo,
@ -475,20 +715,55 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
},
},
},
input: &pbpeering.PeeringSecrets{
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
// Two gets promoted over One
ActiveSecretID: testSecretTwo,
Request: &pbpeering.SecretsWriteRequest_PromotePending{
PromotePending: &pbpeering.SecretsWriteRequest_PromotePendingRequest{
// Attempting to write secret Three, but pending secret is Two
ActiveStreamSecret: testSecretThree,
},
},
},
expectErr: "invalid pending stream secret",
},
{
name: "promote pending secret and delete active",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testFooPeerID,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testSecretThree,
},
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: testSecretTwo,
ActiveSecretID: testSecretOne,
},
},
},
input: &pbpeering.SecretsWriteRequest{
PeerID: testFooPeerID,
Request: &pbpeering.SecretsWriteRequest_PromotePending{
PromotePending: &pbpeering.SecretsWriteRequest_PromotePendingRequest{
// Two gets promoted over One
ActiveStreamSecret: testSecretTwo,
},
},
},
expect: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
// Establishment secret remains valid when promoting a stream secret.
SecretID: testSecretThree,
},
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testSecretTwo,
},
},
expectUUIDs: []string{testSecretTwo},
expectUUIDs: []string{testSecretTwo, testSecretThree},
},
}
for _, tc := range tcs {
@ -499,40 +774,67 @@ func TestStore_PeeringSecretsWrite(t *testing.T) {
}
func TestStore_PeeringSecretsDelete(t *testing.T) {
s := NewStateStore(nil)
insertTestPeerings(t, s)
const (
establishmentID = "b4b9cbae-4bbd-454b-b7ae-441a5c89c3b9"
pendingID = "0ba06390-bd77-4c52-8397-f88c0867157d"
activeID = "0b8a3817-aca0-4c06-94b6-b0763a5cd013"
)
insertTestPeeringSecret(t, s, &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: establishmentID,
},
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: pendingID,
ActiveSecretID: activeID,
},
})
type testCase struct {
dialer bool
secret *pbpeering.PeeringSecrets
}
require.NoError(t, s.PeeringSecretsDelete(12, testFooPeerID))
run := func(t *testing.T, tc testCase) {
s := NewStateStore(nil)
// The secrets should be gone
secrets, err := s.PeeringSecretsRead(nil, testFooPeerID)
require.NoError(t, err)
require.Nil(t, secrets)
insertTestPeerings(t, s)
insertTestPeeringSecret(t, s, tc.secret, tc.dialer)
// The UUIDs should be free
uuids := []string{establishmentID, pendingID, activeID}
require.NoError(t, s.PeeringSecretsDelete(12, testFooPeerID, tc.dialer))
for _, id := range uuids {
free, err := s.ValidateProposedPeeringSecretUUID(id)
// The secrets should be gone
secrets, err := s.PeeringSecretsRead(nil, testFooPeerID)
require.NoError(t, err)
require.True(t, free)
require.Nil(t, secrets)
uuids := []string{establishmentID, pendingID, activeID}
for _, id := range uuids {
free, err := s.ValidateProposedPeeringSecretUUID(id)
require.NoError(t, err)
require.True(t, free)
}
}
tt := map[string]testCase{
"acceptor": {
dialer: false,
secret: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: establishmentID,
},
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: pendingID,
ActiveSecretID: activeID,
},
},
},
"dialer": {
dialer: true,
secret: &pbpeering.PeeringSecrets{
PeerID: testFooPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: activeID,
},
},
},
}
for name, tc := range tt {
t.Run(name, func(t *testing.T) {
run(t, tc)
})
}
}
@ -847,10 +1149,12 @@ func TestStore_PeeringWrite(t *testing.T) {
Name: "baz",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
Secret: &pbpeering.PeeringSecrets{
SecretsRequest: &pbpeering.SecretsWriteRequest{
PeerID: testBazPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testBazSecretID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testBazSecretID,
},
},
},
},
@ -1157,7 +1461,13 @@ func TestStateStore_ExportedServicesForPeer(t *testing.T) {
}
newTarget := func(service, serviceSubset, datacenter string) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, "default", "default", datacenter)
t := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: service,
ServiceSubset: serviceSubset,
Partition: "default",
Namespace: "default",
Datacenter: datacenter,
})
t.SNI = connect.TargetSNI(t, connect.TestTrustDomain)
t.Name = t.SNI
t.ConnectTimeout = 5 * time.Second // default

View File

@ -146,13 +146,13 @@ func testRegisterServiceOpts(t *testing.T, s *Store, idx uint64, nodeID, service
// testRegisterServiceWithChange registers a service and allow ensuring the consul index is updated
// even if service already exists if using `modifyAccordingIndex`.
// This is done by setting the transaction ID in "version" meta so service will be updated if it already exists
func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) {
testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, modifyAccordingIndex)
func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) *structs.NodeService {
return testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, modifyAccordingIndex)
}
// testRegisterServiceWithChangeOpts is the same as testRegisterServiceWithChange with the addition of opts that can
// modify the service prior to writing.
func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool, opts ...func(service *structs.NodeService)) {
func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool, opts ...func(service *structs.NodeService)) *structs.NodeService {
meta := make(map[string]string)
if modifyAccordingIndex {
meta["version"] = fmt.Sprint(idx)
@ -183,14 +183,21 @@ func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeI
result.ServiceID != serviceID {
t.Fatalf("bad service: %#v", result)
}
return svc
}
// testRegisterService register a service with given transaction idx
// If the service already exists, transaction number might not be increased
// Use `testRegisterServiceWithChange()` if you want perform a registration that
// ensures the transaction is updated by setting idx in Meta of Service
func testRegisterService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) {
testRegisterServiceWithChange(t, s, idx, nodeID, serviceID, false)
func testRegisterService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) *structs.NodeService {
return testRegisterServiceWithChange(t, s, idx, nodeID, serviceID, false)
}
func testRegisterConnectService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) {
testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, true, func(service *structs.NodeService) {
service.Connect = structs.ServiceConnect{Native: true}
})
}
func testRegisterIngressService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) {

File diff suppressed because it is too large Load Diff

View File

@ -27,8 +27,17 @@ func TestDiscoveryChainRead(t *testing.T) {
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
if opts.Namespace == "" {
opts.Namespace = "default"
}
if opts.Partition == "" {
opts.Partition = "default"
}
if opts.Datacenter == "" {
opts.Datacenter = "dc1"
}
t := structs.NewDiscoveryTarget(opts)
t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul")
t.Name = t.SNI
t.ConnectTimeout = 5 * time.Second // default
@ -99,7 +108,7 @@ func TestDiscoveryChainRead(t *testing.T) {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"),
"web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
},
}
require.Equal(t, expect, value.Chain)
@ -144,7 +153,7 @@ func TestDiscoveryChainRead(t *testing.T) {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc2": newTarget("web", "", "default", "default", "dc2"),
"web.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
},
}
require.Equal(t, expect, value.Chain)
@ -198,7 +207,7 @@ func TestDiscoveryChainRead(t *testing.T) {
},
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"),
"web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
},
}
require.Equal(t, expect, value.Chain)
@ -264,11 +273,11 @@ func TestDiscoveryChainRead(t *testing.T) {
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc1"),
newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
33*time.Second,
),
"web.default.default.dc2": targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc2"),
newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
33*time.Second,
),
},
@ -280,7 +289,7 @@ func TestDiscoveryChainRead(t *testing.T) {
}))
expectTarget_DC1 := targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc1"),
newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
22*time.Second,
)
expectTarget_DC1.MeshGateway = structs.MeshGatewayConfig{
@ -288,7 +297,7 @@ func TestDiscoveryChainRead(t *testing.T) {
}
expectTarget_DC2 := targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc2"),
newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
22*time.Second,
)
expectTarget_DC2.MeshGateway = structs.MeshGatewayConfig{

View File

@ -1,12 +1,13 @@
package external
import (
"time"
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"time"
agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware"
"github.com/hashicorp/consul/tlsutil"
@ -34,7 +35,7 @@ func NewServer(logger agentmiddleware.Logger, tls *tlsutil.Configurator) *grpc.S
MinTime: 15 * time.Second,
}),
}
if tls != nil && tls.GRPCTLSConfigured() {
if tls != nil && tls.GRPCServerUseTLS() {
creds := credentials.NewTLS(tls.IncomingGRPCConfig())
opts = append(opts, grpc.Creds(creds))
}

View File

@ -52,13 +52,21 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G
}
// Build out the response
var serviceName string
if svc.ServiceKind == structs.ServiceKindConnectProxy {
serviceName = svc.ServiceProxy.DestinationServiceName
} else {
serviceName = svc.ServiceName
}
resp := &pbdataplane.GetEnvoyBootstrapParamsResponse{
Service: svc.ServiceProxy.DestinationServiceName,
Service: serviceName,
Partition: svc.EnterpriseMeta.PartitionOrDefault(),
Namespace: svc.EnterpriseMeta.NamespaceOrDefault(),
Datacenter: s.Datacenter,
ServiceKind: convertToResponseServiceKind(svc.ServiceKind),
NodeName: svc.Node,
NodeId: string(svc.ID),
}
bootstrapConfig, err := structpb.NewStruct(svc.ServiceProxy.Config)

View File

@ -97,14 +97,20 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) {
resp, err := client.GetEnvoyBootstrapParams(ctx, req)
require.NoError(t, err)
require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service)
if tc.registerReq.Service.IsGateway() {
require.Equal(t, tc.registerReq.Service.Service, resp.Service)
} else {
require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service)
}
require.Equal(t, serverDC, resp.Datacenter)
require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition)
require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace)
require.Contains(t, resp.Config.Fields, proxyConfigKey)
require.Equal(t, structpb.NewStringValue(proxyConfigValue), resp.Config.Fields[proxyConfigKey])
require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind)
require.Equal(t, tc.registerReq.Node, resp.NodeName)
require.Equal(t, string(tc.registerReq.ID), resp.NodeId)
}
testCases := []testCase{

View File

@ -26,11 +26,12 @@ const (
type Server struct {
Config
Tracker *Tracker
}
type Config struct {
Backend Backend
Tracker *Tracker
GetStore func() StateStore
Logger hclog.Logger
ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error)
@ -53,7 +54,6 @@ type ACLResolver interface {
func NewServer(cfg Config) *Server {
requireNotNil(cfg.Backend, "Backend")
requireNotNil(cfg.Tracker, "Tracker")
requireNotNil(cfg.GetStore, "GetStore")
requireNotNil(cfg.Logger, "Logger")
// requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required
@ -67,7 +67,8 @@ func NewServer(cfg Config) *Server {
cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
}
return &Server{
Config: cfg,
Config: cfg,
Tracker: NewTracker(cfg.incomingHeartbeatTimeout),
}
}
@ -99,7 +100,7 @@ type Backend interface {
GetLeaderAddress() string
ValidateProposedPeeringSecret(id string) (bool, error)
PeeringSecretsWrite(req *pbpeering.PeeringSecrets) error
PeeringSecretsWrite(req *pbpeering.SecretsWriteRequest) error
PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error
PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error
CatalogRegister(req *structs.RegisterRequest) error

View File

@ -25,13 +25,17 @@ func TestServer_ExchangeSecret(t *testing.T) {
var secret string
testutil.RunStep(t, "known establishment secret is accepted", func(t *testing.T) {
require.NoError(t, store.PeeringSecretsWrite(1, &pbpeering.PeeringSecrets{
PeerID: testPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{SecretID: testEstablishmentSecretID},
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testActiveStreamSecretID,
// First write the establishment secret so that it can be exchanged
require.NoError(t, store.PeeringSecretsWrite(1, &pbpeering.SecretsWriteRequest{
PeerID: testPeerID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testEstablishmentSecretID,
},
},
}))
// Exchange the now-valid establishment secret for a stream secret
resp, err := srv.ExchangeSecret(context.Background(), &pbpeerstream.ExchangeSecretRequest{
PeerID: testPeerID,
EstablishmentSecret: testEstablishmentSecretID,
@ -47,8 +51,5 @@ func TestServer_ExchangeSecret(t *testing.T) {
require.NoError(t, err)
require.Equal(t, secret, s.GetStream().GetPendingSecretID())
// Active stream secret persists until pending secret is promoted during peering establishment.
require.Equal(t, testActiveStreamSecretID, s.GetStream().GetActiveSecretID())
})
}

View File

@ -77,20 +77,21 @@ func (s *Server) ExchangeSecret(ctx context.Context, req *pbpeerstream.ExchangeS
return nil, grpcstatus.Errorf(codes.Internal, "failed to generate peering stream secret: %v", err)
}
secrets := &pbpeering.PeeringSecrets{
writeReq := &pbpeering.SecretsWriteRequest{
PeerID: req.PeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
// Overwriting any existing un-utilized pending stream secret.
PendingSecretID: id,
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
// Pass the given establishment secret to that it can be re-validated at the state store.
// Validating the establishment secret at the RPC is not enough because there can be
// concurrent callers with the same establishment secret.
EstablishmentSecret: req.EstablishmentSecret,
// If there is an active stream secret ID it is NOT invalidated here.
// It remains active until the pending secret ID is used and promoted to active.
// This allows dialing clusters with the active stream secret to continue to dial successfully until they
// receive the new secret.
ActiveSecretID: existing.GetStream().GetActiveSecretID(),
// Overwrite any existing un-utilized pending stream secret.
PendingStreamSecret: id,
},
},
}
err = s.Backend.PeeringSecretsWrite(secrets)
err = s.Backend.PeeringSecretsWrite(writeReq)
if err != nil {
return nil, grpcstatus.Errorf(codes.Internal, "failed to persist peering secret: %v", err)
}
@ -191,14 +192,13 @@ func (s *Server) StreamResources(stream pbpeerstream.PeerStreamService_StreamRes
}
authorized = true
promoted := &pbpeering.PeeringSecrets{
PeerID: req.PeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: pending,
// The PendingSecretID is intentionally zeroed out since we want to avoid re-triggering this
// promotion process with the same pending secret.
PendingSecretID: "",
promoted := &pbpeering.SecretsWriteRequest{
PeerID: p.ID,
Request: &pbpeering.SecretsWriteRequest_PromotePending{
PromotePending: &pbpeering.SecretsWriteRequest_PromotePendingRequest{
// Overwrite any existing un-utilized pending stream secret.
ActiveStreamSecret: pending,
},
},
}
err = s.Backend.PeeringSecretsWrite(promoted)
@ -447,6 +447,8 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
// exits. After the method exits this code here won't receive any recv errors and those will be handled
// by DrainStream().
err = fmt.Errorf("stream ended unexpectedly")
} else {
err = fmt.Errorf("unexpected error receiving from the stream: %w", err)
}
status.TrackRecvError(err.Error())
return err
@ -573,6 +575,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
status.TrackRecvResourceSuccess()
}
// We are replying ACK or NACK depending on whether we successfully processed the response.
if err := streamSend(reply); err != nil {
return fmt.Errorf("failed to send to stream: %v", err)
}
@ -684,10 +687,29 @@ func logTraceProto(logger hclog.Logger, pb proto.Message, received bool) {
dir = "received"
}
// Redact the long-lived stream secret to avoid leaking it in trace logs.
pbToLog := pb
switch msg := pb.(type) {
case *pbpeerstream.ReplicationMessage:
clone := &pbpeerstream.ReplicationMessage{}
proto.Merge(clone, msg)
if clone.GetOpen() != nil {
clone.GetOpen().StreamSecretID = "hidden"
pbToLog = clone
}
case *pbpeerstream.ReplicationMessage_Open:
clone := &pbpeerstream.ReplicationMessage_Open{}
proto.Merge(clone, msg)
clone.StreamSecretID = "hidden"
pbToLog = clone
}
m := jsonpb.Marshaler{
Indent: " ",
}
out, err := m.MarshalToString(pb)
out, err := m.MarshalToString(pbToLog)
if err != nil {
out = "<ERROR: " + err.Error() + ">"
}

View File

@ -1,6 +1,7 @@
package peerstream
import (
"bytes"
"context"
"fmt"
"io"
@ -10,13 +11,14 @@ import (
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/require"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
newproto "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/consul/acl"
@ -26,6 +28,7 @@ import (
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/proto/pbcommon"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
@ -178,9 +181,13 @@ func TestStreamResources_Server_LeaderBecomesFollower(t *testing.T) {
}
func TestStreamResources_Server_ActiveSecretValidation(t *testing.T) {
type testSeed struct {
peering *pbpeering.Peering
secrets []*pbpeering.SecretsWriteRequest
}
type testCase struct {
name string
seed *pbpeering.PeeringWriteRequest
seed *testSeed
input *pbpeerstream.ReplicationMessage
wantErr error
}
@ -191,7 +198,13 @@ func TestStreamResources_Server_ActiveSecretValidation(t *testing.T) {
srv, store := newTestServer(t, nil)
// Write a seed peering.
require.NoError(t, store.PeeringWrite(1, tc.seed))
if tc.seed != nil {
require.NoError(t, store.PeeringWrite(1, &pbpeering.PeeringWriteRequest{Peering: tc.seed.peering}))
for _, s := range tc.seed.secrets {
require.NoError(t, store.PeeringSecretsWrite(1, s))
}
}
// Set the initial roots and CA configuration.
_, _ = writeInitialRootsAndCA(t, store)
@ -220,12 +233,14 @@ func TestStreamResources_Server_ActiveSecretValidation(t *testing.T) {
} else {
require.NoError(t, err)
}
client.Close()
}
tt := []testCase{
{
name: "no secret for peering",
seed: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: peeringWithoutSecrets,
},
@ -241,15 +256,19 @@ func TestStreamResources_Server_ActiveSecretValidation(t *testing.T) {
},
{
name: "unknown secret",
seed: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testPeerID,
},
Secret: &pbpeering.PeeringSecrets{
PeerID: testPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testActiveStreamSecretID,
secrets: []*pbpeering.SecretsWriteRequest{
{
PeerID: testPeerID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testEstablishmentSecretID,
},
},
},
},
},
@ -264,16 +283,29 @@ func TestStreamResources_Server_ActiveSecretValidation(t *testing.T) {
wantErr: status.Error(codes.PermissionDenied, "invalid peering stream secret"),
},
{
name: "known active secret",
seed: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
name: "known pending secret",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testPeerID,
},
Secret: &pbpeering.PeeringSecrets{
PeerID: testPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testActiveStreamSecretID,
secrets: []*pbpeering.SecretsWriteRequest{
{
PeerID: testPeerID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testEstablishmentSecretID,
},
},
},
{
PeerID: testPeerID,
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
EstablishmentSecret: testEstablishmentSecretID,
PendingStreamSecret: testPendingStreamSecretID,
},
},
},
},
},
@ -281,22 +313,44 @@ func TestStreamResources_Server_ActiveSecretValidation(t *testing.T) {
Payload: &pbpeerstream.ReplicationMessage_Open_{
Open: &pbpeerstream.ReplicationMessage_Open{
PeerID: testPeerID,
StreamSecretID: testActiveStreamSecretID,
StreamSecretID: testPendingStreamSecretID,
},
},
},
},
{
name: "known pending secret",
seed: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
name: "known active secret",
seed: &testSeed{
peering: &pbpeering.Peering{
Name: "foo",
ID: testPeerID,
},
Secret: &pbpeering.PeeringSecrets{
PeerID: testPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: testPendingStreamSecretID,
secrets: []*pbpeering.SecretsWriteRequest{
{
PeerID: testPeerID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testEstablishmentSecretID,
},
},
},
{
PeerID: testPeerID,
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
EstablishmentSecret: testEstablishmentSecretID,
PendingStreamSecret: testPendingStreamSecretID,
},
},
},
{
PeerID: testPeerID,
Request: &pbpeering.SecretsWriteRequest_PromotePending{
PromotePending: &pbpeering.SecretsWriteRequest_PromotePendingRequest{
// Pending gets promoted to active.
ActiveStreamSecret: testPendingStreamSecretID,
},
},
},
},
},
@ -445,9 +499,8 @@ func TestStreamResources_Server_Terminate(t *testing.T) {
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
}
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
})
srv, store := newTestServer(t, nil)
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -498,9 +551,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
}
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
})
srv, store := newTestServer(t, nil)
srv.Tracker.setClock(it.Now)
// Set the initial roots and CA configuration.
_, rootA := writeInitialRootsAndCA(t, store)
@ -518,7 +570,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
})
})
var lastSendSuccess time.Time
var lastSendAck time.Time
testutil.RunStep(t, "ack tracked as success", func(t *testing.T) {
ack := &pbpeerstream.ReplicationMessage{
@ -533,19 +585,19 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
},
}
lastSendSuccess = it.FutureNow(1)
lastSendAck = it.FutureNow(1)
err := client.Send(ack)
require.NoError(t, err)
expect := Status{
Connected: true,
LastAck: lastSendSuccess,
LastAck: lastSendAck,
}
retry.Run(t, func(r *retry.R) {
status, ok := srv.StreamStatus(testPeerID)
rStatus, ok := srv.StreamStatus(testPeerID)
require.True(r, ok)
require.Equal(r, expect, status)
require.Equal(r, expect, rStatus)
})
})
@ -575,15 +627,15 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
expect := Status{
Connected: true,
LastAck: lastSendSuccess,
LastAck: lastSendAck,
LastNack: lastNack,
LastNackMessage: lastNackMsg,
}
retry.Run(t, func(r *retry.R) {
status, ok := srv.StreamStatus(testPeerID)
rStatus, ok := srv.StreamStatus(testPeerID)
require.True(r, ok)
require.Equal(r, expect, status)
require.Equal(r, expect, rStatus)
})
})
@ -640,7 +692,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
expect := Status{
Connected: true,
LastAck: lastSendSuccess,
LastAck: lastSendAck,
LastNack: lastNack,
LastNackMessage: lastNackMsg,
LastRecvResourceSuccess: lastRecvResourceSuccess,
@ -699,7 +751,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
expect := Status{
Connected: true,
LastAck: lastSendSuccess,
LastAck: lastSendAck,
LastNack: lastNack,
LastNackMessage: lastNackMsg,
LastRecvResourceSuccess: lastRecvResourceSuccess,
@ -731,7 +783,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
expect := Status{
Connected: true,
LastAck: lastSendSuccess,
LastAck: lastSendAck,
LastNack: lastNack,
LastNackMessage: lastNackMsg,
LastRecvResourceSuccess: lastRecvResourceSuccess,
@ -762,7 +814,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
expect := Status{
Connected: false,
DisconnectErrorMessage: lastRecvErrorMsg,
LastAck: lastSendSuccess,
LastAck: lastSendAck,
LastNack: lastNack,
LastNackMessage: lastNackMsg,
DisconnectTime: disconnectTime,
@ -1074,9 +1126,9 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) {
}
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.incomingHeartbeatTimeout = 5 * time.Millisecond
})
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -1122,9 +1174,9 @@ func TestStreamResources_Server_SendsHeartbeats(t *testing.T) {
outgoingHeartbeatInterval := 5 * time.Millisecond
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.outgoingHeartbeatInterval = outgoingHeartbeatInterval
})
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -1181,9 +1233,9 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) {
incomingHeartbeatTimeout := 10 * time.Millisecond
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.incomingHeartbeatTimeout = incomingHeartbeatTimeout
})
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -1387,7 +1439,7 @@ func (b *testStreamBackend) ValidateProposedPeeringSecret(id string) (bool, erro
return true, nil
}
func (b *testStreamBackend) PeeringSecretsWrite(req *pbpeering.PeeringSecrets) error {
func (b *testStreamBackend) PeeringSecretsWrite(req *pbpeering.SecretsWriteRequest) error {
return b.store.PeeringSecretsWrite(1, req)
}
@ -1628,12 +1680,25 @@ func writeTestPeering(t *testing.T, store *state.Store, idx uint64, peerName, re
if remotePeerID != "" {
peering.PeerServerAddresses = []string{"127.0.0.1:5300"}
}
require.NoError(t, store.PeeringWrite(idx, &pbpeering.PeeringWriteRequest{
Peering: &peering,
Secret: &pbpeering.PeeringSecrets{
SecretsRequest: &pbpeering.SecretsWriteRequest{
PeerID: testPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
PendingSecretID: testPendingStreamSecretID,
// Simulate generating a stream secret by first generating a token then exchanging for a stream secret.
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testEstablishmentSecretID,
},
},
},
}))
require.NoError(t, store.PeeringSecretsWrite(idx, &pbpeering.SecretsWriteRequest{
PeerID: testPeerID,
Request: &pbpeering.SecretsWriteRequest_ExchangeSecret{
ExchangeSecret: &pbpeering.SecretsWriteRequest_ExchangeSecretRequest{
EstablishmentSecret: testEstablishmentSecretID,
PendingStreamSecret: testPendingStreamSecretID,
},
},
}))
@ -1657,7 +1722,7 @@ func writeInitialRootsAndCA(t *testing.T, store *state.Store) (string, *structs.
return clusterID, rootA
}
func makeAnyPB(t *testing.T, pb proto.Message) *anypb.Any {
func makeAnyPB(t *testing.T, pb newproto.Message) *anypb.Any {
any, err := anypb.New(pb)
require.NoError(t, err)
return any
@ -2592,6 +2657,51 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) {
}
}
// TestLogTraceProto tests that all PB trace log helpers redact the
// long-lived SecretStreamID.
// We ensure it gets redacted when logging a ReplicationMessage_Open or a ReplicationMessage.
// In the stream handler we only log the ReplicationMessage_Open, but testing both guards against
// a change in that behavior.
func TestLogTraceProto(t *testing.T) {
type testCase struct {
input proto.Message
}
tt := map[string]testCase{
"replication message": {
input: &pbpeerstream.ReplicationMessage{
Payload: &pbpeerstream.ReplicationMessage_Open_{
Open: &pbpeerstream.ReplicationMessage_Open{
StreamSecretID: testPendingStreamSecretID,
},
},
},
},
"open message": {
input: &pbpeerstream.ReplicationMessage_Open{
StreamSecretID: testPendingStreamSecretID,
},
},
}
for name, tc := range tt {
t.Run(name, func(t *testing.T) {
var b bytes.Buffer
logger, err := logging.Setup(logging.Config{
LogLevel: "TRACE",
}, &b)
require.NoError(t, err)
logTraceRecv(logger, tc.input)
logTraceSend(logger, tc.input)
logTraceProto(logger, tc.input, false)
body, err := io.ReadAll(&b)
require.NoError(t, err)
require.NotContains(t, string(body), testPendingStreamSecretID)
})
}
}
func requireEqualInstances(t *testing.T, expect, got structs.CheckServiceNodes) {
t.Helper()
@ -2634,7 +2744,6 @@ func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.
store: store,
pub: publisher,
},
Tracker: NewTracker(),
GetStore: func() StateStore { return store },
Logger: testutil.Logger(t),
Datacenter: "dc1",

View File

@ -14,18 +14,27 @@ type Tracker struct {
mu sync.RWMutex
streams map[string]*MutableStatus
// heartbeatTimeout is the max duration a connection is allowed to be
// disconnected before the stream health is reported as non-healthy
heartbeatTimeout time.Duration
// timeNow is a shim for testing.
timeNow func() time.Time
}
func NewTracker() *Tracker {
func NewTracker(heartbeatTimeout time.Duration) *Tracker {
if heartbeatTimeout == 0 {
heartbeatTimeout = defaultIncomingHeartbeatTimeout
}
return &Tracker{
streams: make(map[string]*MutableStatus),
timeNow: time.Now,
streams: make(map[string]*MutableStatus),
timeNow: time.Now,
heartbeatTimeout: heartbeatTimeout,
}
}
func (t *Tracker) SetClock(clock func() time.Time) {
// setClock is used for debugging purposes only.
func (t *Tracker) setClock(clock func() time.Time) {
if clock == nil {
t.timeNow = time.Now
} else {
@ -101,7 +110,9 @@ func (t *Tracker) StreamStatus(id string) (resp Status, found bool) {
s, ok := t.streams[id]
if !ok {
return Status{}, false
return Status{
NeverConnected: true,
}, false
}
return s.GetStatus(), true
}
@ -126,6 +137,39 @@ func (t *Tracker) DeleteStatus(id string) {
delete(t.streams, id)
}
// IsHealthy is a calculates the health of a peering status.
// We define a peering as unhealthy if its status has been in the following
// states for longer than the configured incomingHeartbeatTimeout.
// - If it is disconnected
// - If the last received Nack is newer than last received Ack
// - If the last received error is newer than last received success
//
// If none of these conditions apply, we call the peering healthy.
func (t *Tracker) IsHealthy(s Status) bool {
// If stream is in a disconnected state for longer than the configured
// heartbeat timeout, report as unhealthy.
if !s.DisconnectTime.IsZero() &&
t.timeNow().Sub(s.DisconnectTime) > t.heartbeatTimeout {
return false
}
// If last Nack is after last Ack, it means the peer is unable to
// handle our replication message.
if s.LastNack.After(s.LastAck) &&
t.timeNow().Sub(s.LastAck) > t.heartbeatTimeout {
return false
}
// If last recv error is newer than last recv success, we were unable
// to handle the peer's replication message.
if s.LastRecvError.After(s.LastRecvResourceSuccess) &&
t.timeNow().Sub(s.LastRecvError) > t.heartbeatTimeout {
return false
}
return true
}
type MutableStatus struct {
mu sync.RWMutex
@ -145,6 +189,9 @@ type Status struct {
// Connected is true when there is an open stream for the peer.
Connected bool
// NeverConnected is true for peerings that have never connected, false otherwise.
NeverConnected bool
// DisconnectErrorMessage tracks the error that caused the stream to disconnect non-gracefully.
// If the stream is connected or it disconnected gracefully it will be empty.
DisconnectErrorMessage string
@ -199,7 +246,8 @@ func (s *Status) GetExportedServicesCount() uint64 {
func newMutableStatus(now func() time.Time, connected bool) *MutableStatus {
return &MutableStatus{
Status: Status{
Connected: connected,
Connected: connected,
NeverConnected: !connected,
},
timeNow: now,
doneCh: make(chan struct{}),

View File

@ -5,13 +5,117 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/sdk/testutil"
)
const (
aPeerID = "63b60245-c475-426b-b314-4588d210859d"
)
func TestTracker_IsHealthy(t *testing.T) {
type testcase struct {
name string
tracker *Tracker
modifierFunc func(status *MutableStatus)
expectedVal bool
}
tcs := []testcase{
{
name: "disconnect time within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
status.DisconnectTime = time.Now()
},
},
{
name: "disconnect time past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
status.DisconnectTime = time.Now().Add(-1 * time.Minute)
},
},
{
name: "receive error before receive success within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "receive error before receive success within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "receive error before receive success past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
now := time.Now().Add(-2 * time.Second)
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "nack before ack within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastAck = now
status.LastNack = now.Add(1 * time.Second)
},
},
{
name: "nack before ack past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
now := time.Now().Add(-2 * time.Second)
status.LastAck = now
status.LastNack = now.Add(1 * time.Second)
},
},
{
name: "healthy",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
tracker := tc.tracker
st, err := tracker.Connected(aPeerID)
require.NoError(t, err)
require.True(t, st.Connected)
if tc.modifierFunc != nil {
tc.modifierFunc(st)
}
assert.Equal(t, tc.expectedVal, tracker.IsHealthy(st.GetStatus()))
})
}
}
func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
tracker := NewTracker()
tracker := NewTracker(defaultIncomingHeartbeatTimeout)
peerID := "63b60245-c475-426b-b314-4588d210859d"
it := incrementalTime{
@ -96,7 +200,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
status, ok := tracker.StreamStatus(peerID)
require.False(t, ok)
require.Zero(t, status)
require.Equal(t, Status{NeverConnected: true}, status)
})
}
@ -108,7 +212,7 @@ func TestTracker_connectedStreams(t *testing.T) {
}
run := func(t *testing.T, tc testCase) {
tracker := NewTracker()
tracker := NewTracker(defaultIncomingHeartbeatTimeout)
if tc.setup != nil {
tc.setup(t, tracker)
}

View File

@ -124,15 +124,21 @@ func (c *cacheProxyDataSource[ReqType]) Notify(
func dispatchCacheUpdate(ch chan<- proxycfg.UpdateEvent) cache.Callback {
return func(ctx context.Context, e cache.UpdateEvent) {
u := proxycfg.UpdateEvent{
CorrelationID: e.CorrelationID,
Result: e.Result,
Err: e.Err,
}
select {
case ch <- u:
case ch <- newUpdateEvent(e.CorrelationID, e.Result, e.Err):
case <-ctx.Done():
}
}
}
func newUpdateEvent(correlationID string, result any, err error) proxycfg.UpdateEvent {
// This roughly matches the logic in agent/submatview.LocalMaterializer.isTerminalError.
if acl.IsErrNotFound(err) {
err = proxycfg.TerminalError(err)
}
return proxycfg.UpdateEvent{
CorrelationID: correlationID,
Result: result,
Err: err,
}
}

View File

@ -54,13 +54,8 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi
func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) {
return func(ctx context.Context, correlationID string, result ResultType, err error) {
event := proxycfg.UpdateEvent{
CorrelationID: correlationID,
Result: result,
Err: err,
}
select {
case ch <- event:
case ch <- newUpdateEvent(correlationID, result, err):
case <-ctx.Done():
}
}

View File

@ -36,14 +36,11 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi
},
},
},
QueryOptions: structs.QueryOptions{Token: req.QueryOptions.Token},
}
return c.c.NotifyCallback(ctx, cachetype.IntentionMatchName, query, correlationID, func(ctx context.Context, event cache.UpdateEvent) {
e := proxycfg.UpdateEvent{
CorrelationID: correlationID,
Err: event.Err,
}
if e.Err == nil {
var result any
if event.Err == nil {
rsp, ok := event.Result.(*structs.IndexedIntentionMatches)
if !ok {
return
@ -53,11 +50,11 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi
if len(rsp.Matches) != 0 {
matches = rsp.Matches[0]
}
e.Result = matches
result = matches
}
select {
case ch <- e:
case ch <- newUpdateEvent(correlationID, result, event.Err):
case <-ctx.Done():
}
})
@ -109,10 +106,7 @@ func (s *serverIntentions) Notify(ctx context.Context, req *structs.ServiceSpeci
sort.Sort(structs.IntentionPrecedenceSorter(intentions))
return proxycfg.UpdateEvent{
CorrelationID: correlationID,
Result: intentions,
}, true
return newUpdateEvent(correlationID, intentions, nil), true
}
for subjectIdx, subject := range subjects {

View File

@ -125,7 +125,7 @@ func (m *ConfigSource) startSync(closeCh <-chan chan struct{}, proxyID proxycfg.
case ns == nil:
m.Manager.Deregister(proxyID, source)
logger.Trace("service does not exist in catalog, de-registering it with proxycfg manager")
return nil, err
return ws, nil
case !ns.Kind.IsProxy():
err := errors.New("service must be a sidecar proxy or gateway")
logger.Error(err.Error())

View File

@ -2,6 +2,7 @@ package proxycfg
import (
"context"
"errors"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/structs"
@ -15,6 +16,28 @@ type UpdateEvent struct {
Err error
}
// TerminalError wraps the given error to indicate that the data source is in
// an irrecoverably broken state (e.g. because the given ACL token has been
// deleted).
//
// Setting UpdateEvent.Err to a TerminalError causes all watches to be canceled
// which, in turn, terminates the xDS streams.
func TerminalError(err error) error {
return terminalError{err}
}
// IsTerminalError returns whether the given error indicates that the data
// source is in an irrecoverably broken state so watches should be torn down
// and retried at a higher level.
func IsTerminalError(err error) bool {
return errors.As(err, &terminalError{})
}
type terminalError struct{ err error }
func (e terminalError) Error() string { return e.err.Error() }
func (e terminalError) Unwrap() error { return e.err }
// DataSources contains the dependencies used to consume data used to configure
// proxies.
type DataSources struct {

View File

@ -127,7 +127,7 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour
}
// We are updating the proxy, close its old state
state.Close()
state.Close(false)
}
// TODO: move to a function that translates ManagerConfig->stateConfig
@ -148,14 +148,13 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour
return err
}
ch, err := state.Watch()
if err != nil {
if _, err = state.Watch(); err != nil {
return err
}
m.proxies[id] = state
// Start a goroutine that will wait for changes and broadcast them to watchers.
go m.notifyBroadcast(ch)
go m.notifyBroadcast(id, state)
return nil
}
@ -175,8 +174,8 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) {
}
// Closing state will let the goroutine we started in Register finish since
// watch chan is closed.
state.Close()
// watch chan is closed
state.Close(false)
delete(m.proxies, id)
// We intentionally leave potential watchers hanging here - there is no new
@ -186,11 +185,17 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) {
// cleaned up naturally.
}
func (m *Manager) notifyBroadcast(ch <-chan ConfigSnapshot) {
// Run until ch is closed
for snap := range ch {
func (m *Manager) notifyBroadcast(proxyID ProxyID, state *state) {
// Run until ch is closed (by a defer in state.run).
for snap := range state.snapCh {
m.notify(&snap)
}
// If state.run exited because of an irrecoverable error, close all of the
// watchers so that the consumers reconnect/retry at a higher level.
if state.failed() {
m.closeAllWatchers(proxyID)
}
}
func (m *Manager) notify(snap *ConfigSnapshot) {
@ -281,6 +286,20 @@ func (m *Manager) Watch(id ProxyID) (<-chan *ConfigSnapshot, CancelFunc) {
}
}
func (m *Manager) closeAllWatchers(proxyID ProxyID) {
m.mu.Lock()
defer m.mu.Unlock()
watchers, ok := m.watchers[proxyID]
if !ok {
return
}
for watchID := range watchers {
m.closeWatchLocked(proxyID, watchID)
}
}
// closeWatchLocked cleans up state related to a single watcher. It assumes the
// lock is held.
func (m *Manager) closeWatchLocked(proxyID ProxyID, watchID uint64) {
@ -309,7 +328,7 @@ func (m *Manager) Close() error {
// Then close all states
for proxyID, state := range m.proxies {
state.Close()
state.Close(false)
delete(m.proxies, proxyID)
}
return nil

View File

@ -63,22 +63,29 @@ func NewUpstreamIDFromServiceID(sid structs.ServiceID) UpstreamID {
return id
}
// TODO(peering): confirm we don't need peername here
func NewUpstreamIDFromTargetID(tid string) UpstreamID {
// Drop the leading subset if one is present in the target ID.
separators := strings.Count(tid, ".")
if separators > 3 {
prefix := tid[:strings.Index(tid, ".")+1]
tid = strings.TrimPrefix(tid, prefix)
var id UpstreamID
split := strings.Split(tid, ".")
switch {
case split[len(split)-2] == "external":
id = UpstreamID{
Name: split[0],
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
Peer: split[4],
}
case len(split) == 5:
// Drop the leading subset if one is present in the target ID.
split = split[1:]
fallthrough
default:
id = UpstreamID{
Name: split[0],
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
Datacenter: split[3],
}
}
split := strings.SplitN(tid, ".", 4)
id := UpstreamID{
Name: split[0],
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
Datacenter: split[3],
}
id.normalize()
return id
}

View File

@ -35,6 +35,13 @@ func TestUpstreamIDFromTargetID(t *testing.T) {
Datacenter: "dc2",
},
},
"peered": {
tid: "foo.default.default.external.cluster-01",
expect: UpstreamID{
Name: "foo",
Peer: "cluster-01",
},
},
}
for name, tc := range cases {

View File

@ -6,6 +6,7 @@ import (
"fmt"
"net"
"reflect"
"sync/atomic"
"time"
"github.com/hashicorp/go-hclog"
@ -70,11 +71,21 @@ type state struct {
// in Watch.
cancel func()
// failedFlag is (atomically) set to 1 (by Close) when run exits because a data
// source is in an irrecoverable state. It can be read with failed.
failedFlag int32
ch chan UpdateEvent
snapCh chan ConfigSnapshot
reqCh chan chan *ConfigSnapshot
}
// failed returns whether run exited because a data source is in an
// irrecoverable state.
func (s *state) failed() bool {
return atomic.LoadInt32(&s.failedFlag) == 1
}
type DNSConfig struct {
Domain string
AltDomain string
@ -250,10 +261,13 @@ func (s *state) Watch() (<-chan ConfigSnapshot, error) {
}
// Close discards the state and stops any long-running watches.
func (s *state) Close() error {
func (s *state) Close(failed bool) error {
if s.cancel != nil {
s.cancel()
}
if failed {
atomic.StoreInt32(&s.failedFlag, 1)
}
return nil
}
@ -300,7 +314,13 @@ func (s *state) run(ctx context.Context, snap *ConfigSnapshot) {
case <-ctx.Done():
return
case u := <-s.ch:
s.logger.Trace("A blocking query returned; handling snapshot update", "correlationID", u.CorrelationID)
s.logger.Trace("Data source returned; handling snapshot update", "correlationID", u.CorrelationID)
if IsTerminalError(u.Err) {
s.logger.Error("Data source in an irrecoverable state; exiting", "error", u.Err, "correlationID", u.CorrelationID)
s.Close(true)
return
}
if err := s.handler.handleUpdate(ctx, u, snap); err != nil {
s.logger.Error("Failed to handle update from watch",

View File

@ -226,7 +226,8 @@ func (r *retryJoiner) retryJoin() error {
for {
addrs := retryJoinAddrs(disco, r.variant, r.cluster, r.addrs, r.logger)
if len(addrs) > 0 {
n, err := r.join(addrs)
n := 0
n, err = r.join(addrs)
if err == nil {
if r.variant == retryJoinMeshGatewayVariant {
r.logger.Info("Refreshing mesh gateways completed")

View File

@ -8,7 +8,6 @@ import (
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/consul/proto/pbpeerstream"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-multierror"
@ -27,6 +26,7 @@ import (
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
)
var (
@ -260,10 +260,12 @@ func (s *Server) GenerateToken(
writeReq := &pbpeering.PeeringWriteRequest{
Peering: peering,
Secret: &pbpeering.PeeringSecrets{
SecretsRequest: &pbpeering.SecretsWriteRequest{
PeerID: peering.ID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: secretID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: secretID,
},
},
},
}
@ -377,6 +379,7 @@ func (s *Server) Establish(
}
var id string
serverAddrs := tok.ServerAddresses
if existing == nil {
id, err = lib.GenerateUUID(s.Backend.CheckPeeringUUID)
if err != nil {
@ -384,6 +387,11 @@ func (s *Server) Establish(
}
} else {
id = existing.ID
// If there is a connected stream, assume that the existing ServerAddresses
// are up to date and do not try to overwrite them with the token's addresses.
if status, ok := s.Tracker.StreamStatus(id); ok && status.Connected {
serverAddrs = existing.PeerServerAddresses
}
}
// validate that this peer name is not being used as an acceptor already
@ -395,7 +403,7 @@ func (s *Server) Establish(
ID: id,
Name: req.PeerName,
PeerCAPems: tok.CA,
PeerServerAddresses: tok.ServerAddresses,
PeerServerAddresses: serverAddrs,
PeerServerName: tok.ServerName,
PeerID: tok.PeerID,
Meta: req.Meta,
@ -416,9 +424,9 @@ func (s *Server) Establish(
}
var exchangeResp *pbpeerstream.ExchangeSecretResponse
// Loop through the token's addresses once, attempting to fetch the long-lived stream secret.
// Loop through the known server addresses once, attempting to fetch the long-lived stream secret.
var dialErrors error
for _, addr := range peering.PeerServerAddresses {
for _, addr := range serverAddrs {
exchangeResp, err = exchangeSecret(ctx, addr, tlsOption, &exchangeReq)
if err != nil {
dialErrors = multierror.Append(dialErrors, fmt.Errorf("failed to exchange peering secret with %q: %w", addr, err))
@ -431,18 +439,20 @@ func (s *Server) Establish(
return nil, dialErrors
}
// As soon as a peering is written with a list of ServerAddresses that is
// non-empty, the leader routine will see the peering and attempt to
// establish a connection with the remote peer.
// As soon as a peering is written with a non-empty list of ServerAddresses
// and an active stream secret, a leader routine will see the peering and
// attempt to establish a peering stream with the remote peer.
//
// This peer now has a record of both the LocalPeerID(ID) and
// RemotePeerID(PeerID) but at this point the other peer does not.
writeReq := &pbpeering.PeeringWriteRequest{
Peering: peering,
Secret: &pbpeering.PeeringSecrets{
SecretsRequest: &pbpeering.SecretsWriteRequest{
PeerID: peering.ID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: exchangeResp.StreamSecret,
Request: &pbpeering.SecretsWriteRequest_Establish{
Establish: &pbpeering.SecretsWriteRequest_EstablishRequest{
ActiveStreamSecret: exchangeResp.StreamSecret,
},
},
},
}
@ -716,7 +726,7 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete
return nil, err
}
if existing == nil || !existing.IsActive() {
if !existing.IsActive() {
// Return early when the Peering doesn't exist or is already marked for deletion.
// We don't return nil because the pb will fail to marshal.
return &pbpeering.PeeringDeleteResponse{}, nil
@ -729,10 +739,11 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete
// We only need to include the name and partition for the peering to be identified.
// All other data associated with the peering can be discarded because once marked
// for deletion the peering is effectively gone.
ID: existing.ID,
Name: req.Name,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now().UTC()),
ID: existing.ID,
Name: req.Name,
State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: existing.PeerServerAddresses,
DeletedAt: structs.TimeToProto(time.Now().UTC()),
// PartitionOrEmpty is used to avoid writing "default" in OSS.
Partition: entMeta.PartitionOrEmpty(),

View File

@ -1283,6 +1283,7 @@ func newTestServer(t *testing.T, cb func(conf *consul.Config)) testingServer {
ports := freeport.GetN(t, 4) // {rpc, serf_lan, serf_wan, grpc}
conf.PeeringEnabled = true
conf.Bootstrap = true
conf.Datacenter = "dc1"
conf.DataDir = dir

View File

@ -114,9 +114,35 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
}
}
if sidecar.Port < 1 {
port, err := a.sidecarPortFromServiceID(sidecar.CompoundServiceID())
if err != nil {
return nil, nil, "", err
}
sidecar.Port = port
}
// Setup checks
checks, err := ns.Connect.SidecarService.CheckTypes()
if err != nil {
return nil, nil, "", err
}
// Setup default check if none given
if len(checks) < 1 {
checks = sidecarDefaultChecks(ns.ID, sidecar.Proxy.LocalServiceAddress, sidecar.Port)
}
return sidecar, checks, token, nil
}
// sidecarPortFromServiceID is used to allocate a unique port for a sidecar proxy.
// This is called immediately before registration to avoid value collisions. This function assumes the state lock is already held.
func (a *Agent) sidecarPortFromServiceID(sidecarCompoundServiceID structs.ServiceID) (int, error) {
sidecarPort := 0
// Allocate port if needed (min and max inclusive).
rangeLen := a.config.ConnectSidecarMaxPort - a.config.ConnectSidecarMinPort + 1
if sidecar.Port < 1 && a.config.ConnectSidecarMinPort > 0 && rangeLen > 0 {
if sidecarPort < 1 && a.config.ConnectSidecarMinPort > 0 && rangeLen > 0 {
// This did pick at random which was simpler but consul reload would assign
// new ports to all the sidecars since it unloads all state and
// re-populates. It also made this more difficult to test (have to pin the
@ -130,11 +156,11 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
// Check if other port is in auto-assign range
if otherNS.Port >= a.config.ConnectSidecarMinPort &&
otherNS.Port <= a.config.ConnectSidecarMaxPort {
if otherNS.CompoundServiceID() == sidecar.CompoundServiceID() {
if otherNS.CompoundServiceID() == sidecarCompoundServiceID {
// This sidecar is already registered with an auto-port and is just
// being updated so pick the same port as before rather than allocate
// a new one.
sidecar.Port = otherNS.Port
sidecarPort = otherNS.Port
break
}
usedPorts[otherNS.Port] = struct{}{}
@ -147,54 +173,48 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
// Check we still need to assign a port and didn't find we already had one
// allocated.
if sidecar.Port < 1 {
if sidecarPort < 1 {
// Iterate until we find lowest unused port
for p := a.config.ConnectSidecarMinPort; p <= a.config.ConnectSidecarMaxPort; p++ {
_, used := usedPorts[p]
if !used {
sidecar.Port = p
sidecarPort = p
break
}
}
}
}
// If no ports left (or auto ports disabled) fail
if sidecar.Port < 1 {
if sidecarPort < 1 {
// If ports are set to zero explicitly, config builder switches them to
// `-1`. In this case don't show the actual values since we don't know what
// was actually in config (zero or negative) and it might be confusing, we
// just know they explicitly disabled auto assignment.
if a.config.ConnectSidecarMinPort < 1 || a.config.ConnectSidecarMaxPort < 1 {
return nil, nil, "", fmt.Errorf("no port provided for sidecar_service " +
return 0, fmt.Errorf("no port provided for sidecar_service " +
"and auto-assignment disabled in config")
}
return nil, nil, "", fmt.Errorf("no port provided for sidecar_service and none "+
return 0, fmt.Errorf("no port provided for sidecar_service and none "+
"left in the configured range [%d, %d]", a.config.ConnectSidecarMinPort,
a.config.ConnectSidecarMaxPort)
}
// Setup checks
checks, err := ns.Connect.SidecarService.CheckTypes()
if err != nil {
return nil, nil, "", err
}
// Setup default check if none given
if len(checks) < 1 {
checks = []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
// Default to localhost rather than agent/service public IP. The checks
// can always be overridden if a non-loopback IP is needed.
TCP: ipaddr.FormatAddressPort(sidecar.Proxy.LocalServiceAddress, sidecar.Port),
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing " + ns.ID,
AliasService: ns.ID,
},
}
}
return sidecar, checks, token, nil
return sidecarPort, nil
}
func sidecarDefaultChecks(serviceID string, localServiceAddress string, port int) []*structs.CheckType {
// Setup default check if none given
return []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
// Default to localhost rather than agent/service public IP. The checks
// can always be overridden if a non-loopback IP is needed.
TCP: ipaddr.FormatAddressPort(localServiceAddress, port),
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing " + serviceID,
AliasService: serviceID,
},
}
}

View File

@ -5,6 +5,8 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/acl"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
@ -16,16 +18,13 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
}
tests := []struct {
name string
maxPort int
preRegister *structs.ServiceDefinition
sd *structs.ServiceDefinition
token string
autoPortsDisabled bool
wantNS *structs.NodeService
wantChecks []*structs.CheckType
wantToken string
wantErr string
name string
sd *structs.ServiceDefinition
token string
wantNS *structs.NodeService
wantChecks []*structs.CheckType
wantToken string
wantErr string
}{
{
name: "no sidecar",
@ -141,42 +140,6 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
},
wantToken: "custom-token",
},
{
name: "no auto ports available",
// register another sidecar consuming our 1 and only allocated auto port.
preRegister: &structs.ServiceDefinition{
Kind: structs.ServiceKindConnectProxy,
Name: "api-proxy-sidecar",
Port: 2222, // Consume the one available auto-port
Proxy: &structs.ConnectProxyConfig{
DestinationServiceName: "api",
},
},
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{},
},
},
token: "foo",
wantErr: "none left in the configured range [2222, 2222]",
},
{
name: "auto ports disabled",
autoPortsDisabled: true,
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{},
},
},
token: "foo",
wantErr: "auto-assignment disabled in config",
},
{
name: "inherit tags and meta",
sd: &structs.ServiceDefinition{
@ -252,6 +215,58 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
token: "foo",
wantErr: "reserved for internal use",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
hcl := `
ports {
sidecar_min_port = 2222
sidecar_max_port = 2222
}
`
a := StartTestAgent(t, TestAgent{Name: "jones", HCL: hcl})
defer a.Shutdown()
ns := tt.sd.NodeService()
err := ns.Validate()
require.NoError(t, err, "Invalid test case - NodeService must validate")
gotNS, gotChecks, gotToken, err := a.sidecarServiceFromNodeService(ns, tt.token)
if tt.wantErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.wantErr)
return
}
require.NoError(t, err)
require.Equal(t, tt.wantNS, gotNS)
require.Equal(t, tt.wantChecks, gotChecks)
require.Equal(t, tt.wantToken, gotToken)
})
}
}
func TestAgent_SidecarPortFromServiceID(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
tests := []struct {
name string
autoPortsDisabled bool
enterpriseMeta acl.EnterpriseMeta
maxPort int
port int
preRegister *structs.ServiceDefinition
serviceID string
wantPort int
wantErr string
}{
{
name: "use auto ports",
serviceID: "web1",
wantPort: 2222,
},
{
name: "re-registering same sidecar with no port should pick same one",
// Allow multiple ports to be sure we get the right one
@ -269,42 +284,27 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
LocalServicePort: 1111,
},
},
// Register same again but with different service port
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1112,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{},
// Register same again
serviceID: "web1-sidecar-proxy",
wantPort: 2222, // Should claim the same port as before
},
{
name: "all auto ports already taken",
// register another sidecar consuming our 1 and only allocated auto port.
preRegister: &structs.ServiceDefinition{
Kind: structs.ServiceKindConnectProxy,
Name: "api-proxy-sidecar",
Port: 2222, // Consume the one available auto-port
Proxy: &structs.ConnectProxyConfig{
DestinationServiceName: "api",
},
},
token: "foo",
wantNS: &structs.NodeService{
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222, // Should claim the same port as before
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web1",
LocalServiceAddress: "127.0.0.1",
LocalServicePort: 1112,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "127.0.0.1:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
wantErr: "none left in the configured range [2222, 2222]",
},
{
name: "auto ports disabled",
autoPortsDisabled: true,
wantErr: "auto-assignment disabled in config",
},
}
for _, tt := range tests {
@ -329,7 +329,6 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
}
`
}
a := StartTestAgent(t, TestAgent{Name: "jones", HCL: hcl})
defer a.Shutdown()
@ -338,11 +337,8 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
require.NoError(t, err)
}
ns := tt.sd.NodeService()
err := ns.Validate()
require.NoError(t, err, "Invalid test case - NodeService must validate")
gotPort, err := a.sidecarPortFromServiceID(structs.ServiceID{ID: tt.serviceID, EnterpriseMeta: tt.enterpriseMeta})
gotNS, gotChecks, gotToken, err := a.sidecarServiceFromNodeService(ns, tt.token)
if tt.wantErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.wantErr)
@ -350,9 +346,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
}
require.NoError(t, err)
require.Equal(t, tt.wantNS, gotNS)
require.Equal(t, tt.wantChecks, gotChecks)
require.Equal(t, tt.wantToken, gotToken)
require.Equal(t, tt.wantPort, gotPort)
})
}
}

View File

@ -954,17 +954,28 @@ func (e *ServiceResolverConfigEntry) Validate() error {
r := e.Redirect
if err := r.ValidateEnterprise(); err != nil {
return fmt.Errorf("Redirect: %s", err.Error())
}
if len(e.Failover) > 0 {
return fmt.Errorf("Redirect and Failover cannot both be set")
}
// TODO(rb): prevent subsets and default subsets from being defined?
if r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" {
if r.isEmpty() {
return fmt.Errorf("Redirect is empty")
}
if r.Service == "" {
switch {
case r.Peer != "" && r.ServiceSubset != "":
return fmt.Errorf("Redirect.Peer cannot be set with Redirect.ServiceSubset")
case r.Peer != "" && r.Partition != "":
return fmt.Errorf("Redirect.Partition cannot be set with Redirect.Peer")
case r.Peer != "" && r.Datacenter != "":
return fmt.Errorf("Redirect.Peer cannot be set with Redirect.Datacenter")
case r.Service == "":
if r.ServiceSubset != "" {
return fmt.Errorf("Redirect.ServiceSubset defined without Redirect.Service")
}
@ -974,9 +985,12 @@ func (e *ServiceResolverConfigEntry) Validate() error {
if r.Partition != "" {
return fmt.Errorf("Redirect.Partition defined without Redirect.Service")
}
} else if r.Service == e.Name {
if r.ServiceSubset != "" && !isSubset(r.ServiceSubset) {
return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, r.Service)
if r.Peer != "" {
return fmt.Errorf("Redirect.Peer defined without Redirect.Service")
}
case r.ServiceSubset != "" && (r.Service == "" || r.Service == e.Name):
if !isSubset(r.ServiceSubset) {
return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, e.Name)
}
}
}
@ -988,18 +1002,59 @@ func (e *ServiceResolverConfigEntry) Validate() error {
return fmt.Errorf("Cross-datacenter failover is only supported in the default partition")
}
if subset != "*" && !isSubset(subset) {
return fmt.Errorf("Bad Failover[%q]: not a valid subset", subset)
errorPrefix := fmt.Sprintf("Bad Failover[%q]: ", subset)
if err := f.ValidateEnterprise(); err != nil {
return fmt.Errorf(errorPrefix + err.Error())
}
if f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 {
return fmt.Errorf("Bad Failover[%q] one of Service, ServiceSubset, Namespace, or Datacenters is required", subset)
if subset != "*" && !isSubset(subset) {
return fmt.Errorf(errorPrefix + "not a valid subset subset")
}
if f.isEmpty() {
return fmt.Errorf(errorPrefix + "one of Service, ServiceSubset, Namespace, Targets, or Datacenters is required")
}
if f.ServiceSubset != "" {
if f.Service == "" || f.Service == e.Name {
if !isSubset(f.ServiceSubset) {
return fmt.Errorf("Bad Failover[%q].ServiceSubset %q is not a valid subset of %q", subset, f.ServiceSubset, f.Service)
return fmt.Errorf("%sServiceSubset %q is not a valid subset of %q", errorPrefix, f.ServiceSubset, f.Service)
}
}
}
if len(f.Datacenters) != 0 && len(f.Targets) != 0 {
return fmt.Errorf("Bad Failover[%q]: Targets cannot be set with Datacenters", subset)
}
if f.ServiceSubset != "" && len(f.Targets) != 0 {
return fmt.Errorf("Bad Failover[%q]: Targets cannot be set with ServiceSubset", subset)
}
if f.Service != "" && len(f.Targets) != 0 {
return fmt.Errorf("Bad Failover[%q]: Targets cannot be set with Service", subset)
}
for i, target := range f.Targets {
errorPrefix := fmt.Sprintf("Bad Failover[%q].Targets[%d]: ", subset, i)
if err := target.ValidateEnterprise(); err != nil {
return fmt.Errorf(errorPrefix + err.Error())
}
switch {
case target.Peer != "" && target.ServiceSubset != "":
return fmt.Errorf(errorPrefix + "Peer cannot be set with ServiceSubset")
case target.Peer != "" && target.Partition != "":
return fmt.Errorf(errorPrefix + "Partition cannot be set with Peer")
case target.Peer != "" && target.Datacenter != "":
return fmt.Errorf(errorPrefix + "Peer cannot be set with Datacenter")
case target.Partition != "" && target.Datacenter != "":
return fmt.Errorf(errorPrefix + "Partition cannot be set with Datacenter")
case target.ServiceSubset != "" && (target.Service == "" || target.Service == e.Name):
if !isSubset(target.ServiceSubset) {
return fmt.Errorf("%sServiceSubset %q is not a valid subset of %q", errorPrefix, target.ServiceSubset, e.Name)
}
}
}
@ -1107,9 +1162,24 @@ func (e *ServiceResolverConfigEntry) ListRelatedServices() []ServiceID {
if len(e.Failover) > 0 {
for _, failover := range e.Failover {
failoverID := NewServiceID(defaultIfEmpty(failover.Service, e.Name), failover.GetEnterpriseMeta(&e.EnterpriseMeta))
if failoverID != svcID {
found[failoverID] = struct{}{}
if len(failover.Targets) == 0 {
failoverID := NewServiceID(defaultIfEmpty(failover.Service, e.Name), failover.GetEnterpriseMeta(&e.EnterpriseMeta))
if failoverID != svcID {
found[failoverID] = struct{}{}
}
continue
}
for _, target := range failover.Targets {
// We can't know about related services on cluster peers.
if target.Peer != "" {
continue
}
failoverID := NewServiceID(defaultIfEmpty(target.Service, e.Name), target.GetEnterpriseMeta(failover.GetEnterpriseMeta(&e.EnterpriseMeta)))
if failoverID != svcID {
found[failoverID] = struct{}{}
}
}
}
}
@ -1171,12 +1241,32 @@ type ServiceResolverRedirect struct {
// Datacenter is the datacenter to resolve the service from instead of the
// current one (optional).
Datacenter string `json:",omitempty"`
// Peer is the name of the cluster peer to resolve the service from instead
// of the current one (optional).
Peer string `json:",omitempty"`
}
func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
return DiscoveryTargetOpts{
Service: r.Service,
ServiceSubset: r.ServiceSubset,
Namespace: r.Namespace,
Partition: r.Partition,
Datacenter: r.Datacenter,
Peer: r.Peer,
}
}
func (r *ServiceResolverRedirect) isEmpty() bool {
return r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" && r.Peer == ""
}
// There are some restrictions on what is allowed in here:
//
// - Service, ServiceSubset, Namespace, and Datacenters cannot all be
// empty at once.
// - Service, ServiceSubset, Namespace, Datacenters, and Targets cannot all be
// empty at once. When Targets is defined, the other fields should not be
// populated.
//
type ServiceResolverFailover struct {
// Service is the service to resolve instead of the default as the failover
@ -1205,6 +1295,56 @@ type ServiceResolverFailover struct {
//
// This is a DESTINATION during failover.
Datacenters []string `json:",omitempty"`
// Targets specifies a fixed list of failover targets to try. We never try a
// target multiple times, so those are subtracted from this list before
// proceeding.
//
// This is a DESTINATION during failover.
Targets []ServiceResolverFailoverTarget `json:",omitempty"`
}
func (t *ServiceResolverFailover) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
return DiscoveryTargetOpts{
Service: t.Service,
ServiceSubset: t.ServiceSubset,
Namespace: t.Namespace,
}
}
func (f *ServiceResolverFailover) isEmpty() bool {
return f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 && len(f.Targets) == 0
}
type ServiceResolverFailoverTarget struct {
// Service specifies the name of the service to try during failover.
Service string `json:",omitempty"`
// ServiceSubset specifies the service subset to try during failover.
ServiceSubset string `json:",omitempty" alias:"service_subset"`
// Partition specifies the partition to try during failover.
Partition string `json:",omitempty"`
// Namespace specifies the namespace to try during failover.
Namespace string `json:",omitempty"`
// Datacenter specifies the datacenter to try during failover.
Datacenter string `json:",omitempty"`
// Peer specifies the name of the cluster peer to try during failover.
Peer string `json:",omitempty"`
}
func (t *ServiceResolverFailoverTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
return DiscoveryTargetOpts{
Service: t.Service,
ServiceSubset: t.ServiceSubset,
Namespace: t.Namespace,
Partition: t.Partition,
Datacenter: t.Datacenter,
Peer: t.Peer,
}
}
// LoadBalancer determines the load balancing policy and configuration for services

View File

@ -4,6 +4,8 @@
package structs
import (
"fmt"
"github.com/hashicorp/consul/acl"
)
@ -25,12 +27,56 @@ func (redir *ServiceResolverRedirect) GetEnterpriseMeta(_ *acl.EnterpriseMeta) *
return DefaultEnterpriseMetaInDefaultPartition()
}
// ValidateEnterprise validates that enterprise fields are only set
// with enterprise binaries.
func (redir *ServiceResolverRedirect) ValidateEnterprise() error {
if redir.Partition != "" {
return fmt.Errorf("Setting Partition requires Consul Enterprise")
}
if redir.Namespace != "" {
return fmt.Errorf("Setting Namespace requires Consul Enterprise")
}
return nil
}
// GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from
// fields in the ServiceResolverFailover
func (failover *ServiceResolverFailover) GetEnterpriseMeta(_ *acl.EnterpriseMeta) *acl.EnterpriseMeta {
return DefaultEnterpriseMetaInDefaultPartition()
}
// ValidateEnterprise validates that enterprise fields are only set
// with enterprise binaries.
func (failover *ServiceResolverFailover) ValidateEnterprise() error {
if failover.Namespace != "" {
return fmt.Errorf("Setting Namespace requires Consul Enterprise")
}
return nil
}
// GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from
// fields in the ServiceResolverFailoverTarget
func (target *ServiceResolverFailoverTarget) GetEnterpriseMeta(_ *acl.EnterpriseMeta) *acl.EnterpriseMeta {
return DefaultEnterpriseMetaInDefaultPartition()
}
// ValidateEnterprise validates that enterprise fields are only set
// with enterprise binaries.
func (redir *ServiceResolverFailoverTarget) ValidateEnterprise() error {
if redir.Partition != "" {
return fmt.Errorf("Setting Partition requires Consul Enterprise")
}
if redir.Namespace != "" {
return fmt.Errorf("Setting Namespace requires Consul Enterprise")
}
return nil
}
// GetEnterpriseMeta is used to synthesize the EnterpriseMeta struct from
// fields in the DiscoveryChainRequest
func (req *DiscoveryChainRequest) GetEnterpriseMeta() *acl.EnterpriseMeta {

View File

@ -0,0 +1,153 @@
//go:build !consulent
// +build !consulent
package structs
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestServiceResolverConfigEntry_OSS(t *testing.T) {
type testcase struct {
name string
entry *ServiceResolverConfigEntry
normalizeErr string
validateErr string
// check is called between normalize and validate
check func(t *testing.T, entry *ServiceResolverConfigEntry)
}
cases := []testcase{
{
name: "failover with a namespace on OSS",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
Service: "backup",
Namespace: "ns1",
},
},
},
validateErr: `Bad Failover["*"]: Setting Namespace requires Consul Enterprise`,
},
{
name: "failover Targets cannot set Namespace on OSS",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
Targets: []ServiceResolverFailoverTarget{{Namespace: "ns1"}},
},
},
},
validateErr: `Bad Failover["*"].Targets[0]: Setting Namespace requires Consul Enterprise`,
},
{
name: "failover Targets cannot set Partition on OSS",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
Targets: []ServiceResolverFailoverTarget{{Partition: "ap1"}},
},
},
},
validateErr: `Bad Failover["*"].Targets[0]: Setting Partition requires Consul Enterprise`,
},
{
name: "setting failover Namespace on OSS",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {Namespace: "ns1"},
},
},
validateErr: `Bad Failover["*"]: Setting Namespace requires Consul Enterprise`,
},
{
name: "setting redirect Namespace on OSS",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Namespace: "ns1",
},
},
validateErr: `Redirect: Setting Namespace requires Consul Enterprise`,
},
{
name: "setting redirect Partition on OSS",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Partition: "ap1",
},
},
validateErr: `Redirect: Setting Partition requires Consul Enterprise`,
},
}
// Bulk add a bunch of similar validation cases.
for _, invalidSubset := range invalidSubsetNames {
tc := testcase{
name: "invalid subset name: " + invalidSubset,
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Subsets: map[string]ServiceResolverSubset{
invalidSubset: {OnlyPassing: true},
},
},
validateErr: fmt.Sprintf("Subset %q is invalid", invalidSubset),
}
cases = append(cases, tc)
}
for _, goodSubset := range validSubsetNames {
tc := testcase{
name: "valid subset name: " + goodSubset,
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Subsets: map[string]ServiceResolverSubset{
goodSubset: {OnlyPassing: true},
},
},
}
cases = append(cases, tc)
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
err := tc.entry.Normalize()
if tc.normalizeErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.normalizeErr)
return
}
require.NoError(t, err)
if tc.check != nil {
tc.check(t, tc.entry)
}
err = tc.entry.Validate()
if tc.validateErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.validateErr)
return
}
require.NoError(t, err)
})
}
}

View File

@ -165,6 +165,34 @@ func TestConfigEntries_ListRelatedServices_AndACLs(t *testing.T) {
},
},
},
{
name: "resolver: failover with targets",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
Targets: []ServiceResolverFailoverTarget{
{Service: "other1"},
{Datacenter: "dc2"},
{Peer: "cluster-01"},
},
},
},
},
expectServices: []ServiceID{NewServiceID("other1", nil)},
expectACLs: []testACL{
defaultDenyCase,
readTestCase,
writeTestCaseDenied,
{
name: "can write test (with other1:read)",
authorizer: newServiceACL(t, []string{"other1"}, []string{"test"}),
canRead: true,
canWrite: true,
},
},
},
{
name: "splitter: self",
entry: &ServiceSplitterConfigEntry{
@ -595,6 +623,15 @@ func TestServiceResolverConfigEntry(t *testing.T) {
},
validateErr: "Redirect is empty",
},
{
name: "empty redirect",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{},
},
validateErr: "Redirect is empty",
},
{
name: "redirect subset with no service",
entry: &ServiceResolverConfigEntry{
@ -606,17 +643,6 @@ func TestServiceResolverConfigEntry(t *testing.T) {
},
validateErr: "Redirect.ServiceSubset defined without Redirect.Service",
},
{
name: "redirect namespace with no service",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Namespace: "alternate",
},
},
validateErr: "Redirect.Namespace defined without Redirect.Service",
},
{
name: "self redirect with invalid subset",
entry: &ServiceResolverConfigEntry{
@ -629,6 +655,41 @@ func TestServiceResolverConfigEntry(t *testing.T) {
},
validateErr: `Redirect.ServiceSubset "gone" is not a valid subset of "test"`,
},
{
name: "redirect with peer and subset",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Peer: "cluster-01",
ServiceSubset: "gone",
},
},
validateErr: `Redirect.Peer cannot be set with Redirect.ServiceSubset`,
},
{
name: "redirect with peer and datacenter",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Peer: "cluster-01",
Datacenter: "dc2",
},
},
validateErr: `Redirect.Peer cannot be set with Redirect.Datacenter`,
},
{
name: "redirect with peer and datacenter",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Peer: "cluster-01",
},
},
validateErr: `Redirect.Peer defined without Redirect.Service`,
},
{
name: "self redirect with valid subset",
entry: &ServiceResolverConfigEntry{
@ -643,6 +704,17 @@ func TestServiceResolverConfigEntry(t *testing.T) {
},
},
},
{
name: "redirect to peer",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Service: "other",
Peer: "cluster-01",
},
},
},
{
name: "simple wildcard failover",
entry: &ServiceResolverConfigEntry{
@ -695,7 +767,7 @@ func TestServiceResolverConfigEntry(t *testing.T) {
"v1": {},
},
},
validateErr: `Bad Failover["v1"] one of Service, ServiceSubset, Namespace, or Datacenters is required`,
validateErr: `Bad Failover["v1"]: one of Service, ServiceSubset, Namespace, Targets, or Datacenters is required`,
},
{
name: "failover to self using invalid subset",
@ -712,7 +784,7 @@ func TestServiceResolverConfigEntry(t *testing.T) {
},
},
},
validateErr: `Bad Failover["v1"].ServiceSubset "gone" is not a valid subset of "test"`,
validateErr: `Bad Failover["v1"]: ServiceSubset "gone" is not a valid subset of "test"`,
},
{
name: "failover to self using valid subset",
@ -745,6 +817,109 @@ func TestServiceResolverConfigEntry(t *testing.T) {
},
validateErr: `Bad Failover["*"].Datacenters: found empty datacenter`,
},
{
name: "failover target with an invalid subset",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
Targets: []ServiceResolverFailoverTarget{{ServiceSubset: "subset"}},
},
},
},
validateErr: `Bad Failover["*"].Targets[0]: ServiceSubset "subset" is not a valid subset of "test"`,
},
{
name: "failover targets can't have Peer and ServiceSubset",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
Targets: []ServiceResolverFailoverTarget{{Peer: "cluster-01", ServiceSubset: "subset"}},
},
},
},
validateErr: `Bad Failover["*"].Targets[0]: Peer cannot be set with ServiceSubset`,
},
{
name: "failover targets can't have Peer and Datacenter",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
Targets: []ServiceResolverFailoverTarget{{Peer: "cluster-01", Datacenter: "dc1"}},
},
},
},
validateErr: `Bad Failover["*"].Targets[0]: Peer cannot be set with Datacenter`,
},
{
name: "failover Targets cannot be set with Datacenters",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
Datacenters: []string{"a"},
Targets: []ServiceResolverFailoverTarget{{Peer: "cluster-01"}},
},
},
},
validateErr: `Bad Failover["*"]: Targets cannot be set with Datacenters`,
},
{
name: "failover Targets cannot be set with ServiceSubset",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
ServiceSubset: "v2",
Targets: []ServiceResolverFailoverTarget{{Peer: "cluster-01"}},
},
},
Subsets: map[string]ServiceResolverSubset{
"v2": {Filter: "Service.Meta.version == v2"},
},
},
validateErr: `Bad Failover["*"]: Targets cannot be set with ServiceSubset`,
},
{
name: "failover Targets cannot be set with Service",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
Service: "another-service",
Targets: []ServiceResolverFailoverTarget{{Peer: "cluster-01"}},
},
},
Subsets: map[string]ServiceResolverSubset{
"v2": {Filter: "Service.Meta.version == v2"},
},
},
validateErr: `Bad Failover["*"]: Targets cannot be set with Service`,
},
{
name: "complicated failover targets",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Failover: map[string]ServiceResolverFailover{
"*": {
Targets: []ServiceResolverFailoverTarget{
{Peer: "cluster-01", Service: "test-v2"},
{Service: "test-v2", ServiceSubset: "test"},
{Datacenter: "dc2"},
},
},
},
},
},
{
name: "bad connect timeout",
entry: &ServiceResolverConfigEntry{

View File

@ -216,6 +216,85 @@ func testConfigEntries_ListRelatedServices_AndACLs(t *testing.T, cases []configE
}
}
func TestDecodeConfigEntry_ServiceDefaults(t *testing.T) {
for _, tc := range []struct {
name string
camel string
snake string
expect ConfigEntry
expectErr string
}{
{
name: "service-defaults-with-MaxInboundConnections",
snake: `
kind = "service-defaults"
name = "external"
protocol = "tcp"
destination {
addresses = [
"api.google.com",
"web.google.com"
]
port = 8080
}
max_inbound_connections = 14
`,
camel: `
Kind = "service-defaults"
Name = "external"
Protocol = "tcp"
Destination {
Addresses = [
"api.google.com",
"web.google.com"
]
Port = 8080
}
MaxInboundConnections = 14
`,
expect: &ServiceConfigEntry{
Kind: "service-defaults",
Name: "external",
Protocol: "tcp",
Destination: &DestinationConfig{
Addresses: []string{
"api.google.com",
"web.google.com",
},
Port: 8080,
},
MaxInboundConnections: 14,
},
},
} {
tc := tc
testbody := func(t *testing.T, body string) {
var raw map[string]interface{}
err := hcl.Decode(&raw, body)
require.NoError(t, err)
got, err := DecodeConfigEntry(raw)
if tc.expectErr != "" {
require.Nil(t, got)
require.Error(t, err)
requireContainsLower(t, err.Error(), tc.expectErr)
} else {
require.NoError(t, err)
require.Equal(t, tc.expect, got)
}
}
t.Run(tc.name+" (snake case)", func(t *testing.T) {
testbody(t, tc.snake)
})
t.Run(tc.name+" (camel case)", func(t *testing.T) {
testbody(t, tc.camel)
})
}
}
// TestDecodeConfigEntry is the 'structs' mirror image of
// command/config/write/config_write_test.go:TestParseConfigEntry
func TestDecodeConfigEntry(t *testing.T) {

View File

@ -53,28 +53,15 @@ type CompiledDiscoveryChain struct {
Targets map[string]*DiscoveryTarget `json:",omitempty"`
}
func (c *CompiledDiscoveryChain) WillFailoverThroughMeshGateway(node *DiscoveryGraphNode) bool {
if node.Type != DiscoveryGraphNodeTypeResolver {
return false
}
failover := node.Resolver.Failover
if failover != nil && len(failover.Targets) > 0 {
for _, failTargetID := range failover.Targets {
failTarget := c.Targets[failTargetID]
switch failTarget.MeshGateway.Mode {
case MeshGatewayModeLocal, MeshGatewayModeRemote:
return true
}
}
}
return false
}
// ID returns an ID that encodes the service, namespace, partition, and datacenter.
// This ID allows us to compare a discovery chain target to the chain upstream itself.
func (c *CompiledDiscoveryChain) ID() string {
return chainID("", c.ServiceName, c.Namespace, c.Partition, c.Datacenter)
return chainID(DiscoveryTargetOpts{
Service: c.ServiceName,
Namespace: c.Namespace,
Partition: c.Partition,
Datacenter: c.Datacenter,
})
}
func (c *CompiledDiscoveryChain) CompoundServiceName() ServiceName {
@ -203,6 +190,7 @@ type DiscoveryTarget struct {
Namespace string `json:",omitempty"`
Partition string `json:",omitempty"`
Datacenter string `json:",omitempty"`
Peer string `json:",omitempty"`
MeshGateway MeshGatewayConfig `json:",omitempty"`
Subset ServiceResolverSubset `json:",omitempty"`
@ -258,28 +246,52 @@ func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error {
return nil
}
func NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter string) *DiscoveryTarget {
type DiscoveryTargetOpts struct {
Service string
ServiceSubset string
Namespace string
Partition string
Datacenter string
Peer string
}
func NewDiscoveryTarget(opts DiscoveryTargetOpts) *DiscoveryTarget {
t := &DiscoveryTarget{
Service: service,
ServiceSubset: serviceSubset,
Namespace: namespace,
Partition: partition,
Datacenter: datacenter,
Service: opts.Service,
ServiceSubset: opts.ServiceSubset,
Namespace: opts.Namespace,
Partition: opts.Partition,
Datacenter: opts.Datacenter,
Peer: opts.Peer,
}
t.setID()
return t
}
func chainID(subset, service, namespace, partition, dc string) string {
// NOTE: this format is similar to the SNI syntax for simplicity
if subset == "" {
return fmt.Sprintf("%s.%s.%s.%s", service, namespace, partition, dc)
func (t *DiscoveryTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
return DiscoveryTargetOpts{
Service: t.Service,
ServiceSubset: t.ServiceSubset,
Namespace: t.Namespace,
Partition: t.Partition,
Datacenter: t.Datacenter,
Peer: t.Peer,
}
return fmt.Sprintf("%s.%s.%s.%s.%s", subset, service, namespace, partition, dc)
}
func chainID(opts DiscoveryTargetOpts) string {
// NOTE: this format is similar to the SNI syntax for simplicity
if opts.Peer != "" {
return fmt.Sprintf("%s.%s.default.external.%s", opts.Service, opts.Namespace, opts.Peer)
}
if opts.ServiceSubset == "" {
return fmt.Sprintf("%s.%s.%s.%s", opts.Service, opts.Namespace, opts.Partition, opts.Datacenter)
}
return fmt.Sprintf("%s.%s.%s.%s.%s", opts.ServiceSubset, opts.Service, opts.Namespace, opts.Partition, opts.Datacenter)
}
func (t *DiscoveryTarget) setID() {
t.ID = chainID(t.ServiceSubset, t.Service, t.Namespace, t.Partition, t.Datacenter)
t.ID = chainID(t.ToDiscoveryTargetOpts())
}
func (t *DiscoveryTarget) String() string {

View File

@ -353,7 +353,7 @@ func (q QueryOptions) Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime tim
q.MaxQueryTime = defaultQueryTime
}
// Timeout after maximum jitter has elapsed.
q.MaxQueryTime += lib.RandomStagger(q.MaxQueryTime / JitterFraction)
q.MaxQueryTime += q.MaxQueryTime / JitterFraction
return q.MaxQueryTime + rpcHoldTimeout
}
@ -1413,6 +1413,27 @@ func (s *NodeService) IsGateway() bool {
func (s *NodeService) Validate() error {
var result error
if s.Kind == ServiceKindConnectProxy {
if s.Port == 0 && s.SocketPath == "" {
result = multierror.Append(result, fmt.Errorf("Port or SocketPath must be set for a %s", s.Kind))
}
}
commonValidation := s.ValidateForAgent()
if commonValidation != nil {
result = multierror.Append(result, commonValidation)
}
return result
}
// ValidateForAgent does a subset validation, with the assumption that a local agent can assist with missing values.
//
// I.e. in the catalog case, a local agent cannot be assumed to facilitate auto-assignment of port or socket path,
// so additional checks are needed.
func (s *NodeService) ValidateForAgent() error {
var result error
// TODO(partitions): remember to double check that this doesn't cross partition boundaries
// ConnectProxy validation
@ -1428,10 +1449,6 @@ func (s *NodeService) Validate() error {
"services"))
}
if s.Port == 0 && s.SocketPath == "" {
result = multierror.Append(result, fmt.Errorf("Port or SocketPath must be set for a %s", s.Kind))
}
if s.Connect.Native {
result = multierror.Append(result, fmt.Errorf(
"A Proxy cannot also be Connect Native, only typical services"))
@ -2194,8 +2211,8 @@ type PeeredServiceName struct {
}
type ServiceName struct {
Name string
acl.EnterpriseMeta
Name string
acl.EnterpriseMeta `mapstructure:",squash"`
}
func NewServiceName(name string, entMeta *acl.EnterpriseMeta) ServiceName {

View File

@ -1157,6 +1157,16 @@ func TestStructs_NodeService_ValidateConnectProxy(t *testing.T) {
}
}
func TestStructs_NodeService_ValidateConnectProxyWithAgentAutoAssign(t *testing.T) {
t.Run("connect-proxy: no port set", func(t *testing.T) {
ns := TestNodeServiceProxy(t)
ns.Port = 0
err := ns.ValidateForAgent()
assert.NoError(t, err)
})
}
func TestStructs_NodeService_ValidateConnectProxy_In_Partition(t *testing.T) {
cases := []struct {
Name string

View File

@ -66,6 +66,10 @@ func (m *LocalMaterializer) Run(ctx context.Context) {
if ctx.Err() != nil {
return
}
if m.isTerminalError(err) {
return
}
m.mat.handleError(req, err)
if err := m.mat.retryWaiter.Wait(ctx); err != nil {
@ -74,6 +78,14 @@ func (m *LocalMaterializer) Run(ctx context.Context) {
}
}
// isTerminalError determines whether the given error cannot be recovered from
// and should cause the materializer to halt and be evicted from the view store.
//
// This roughly matches the logic in agent/proxycfg-glue.newUpdateEvent.
func (m *LocalMaterializer) isTerminalError(err error) bool {
return acl.IsErrNotFound(err)
}
// subscribeOnce opens a new subscription to a local backend and runs
// for its lifetime or until the view is closed.
func (m *LocalMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error {

View File

@ -47,6 +47,9 @@ type entry struct {
// requests is the count of active requests using this entry. This entry will
// remain in the store as long as this count remains > 0.
requests int
// evicting is used to mark an entry that will be evicted when the current in-
// flight requests finish.
evicting bool
}
// NewStore creates and returns a Store that is ready for use. The caller must
@ -89,6 +92,7 @@ func (s *Store) Run(ctx context.Context) {
// Only stop the materializer if there are no active requests.
if e.requests == 0 {
s.logger.Trace("evicting item from store", "key", he.Key())
e.stop()
delete(s.byKey, he.Key())
}
@ -187,13 +191,13 @@ func (s *Store) NotifyCallback(
"error", err,
"request-type", req.Type(),
"index", index)
continue
}
index = result.Index
cb(ctx, cache.UpdateEvent{
CorrelationID: correlationID,
Result: result.Value,
Err: err,
Meta: cache.ResultMeta{Index: result.Index, Hit: result.Cached},
})
}
@ -211,6 +215,9 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
defer s.lock.Unlock()
e, ok := s.byKey[key]
if ok {
if e.evicting {
return "", nil, errors.New("item is marked for eviction")
}
e.requests++
s.byKey[key] = e
return key, e.materializer, nil
@ -222,7 +229,18 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
}
ctx, cancel := context.WithCancel(context.Background())
go mat.Run(ctx)
go func() {
mat.Run(ctx)
// Materializers run until they either reach their TTL and are evicted (which
// cancels the given context) or encounter an irrecoverable error.
//
// If the context hasn't been canceled, we know it's the error case so we
// trigger an immediate eviction.
if ctx.Err() == nil {
s.evictNow(key)
}
}()
e = entry{
materializer: mat,
@ -233,6 +251,28 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
return key, e.materializer, nil
}
// evictNow causes the item with the given key to be evicted immediately.
//
// If there are requests in-flight, the item is marked for eviction such that
// once the requests have been served releaseEntry will move it to the top of
// the expiry heap. If there are no requests in-flight, evictNow will move the
// item to the top of the expiry heap itself.
//
// In either case, the entry's evicting flag prevents it from being served by
// readEntry (and thereby gaining new in-flight requests).
func (s *Store) evictNow(key string) {
s.lock.Lock()
defer s.lock.Unlock()
e := s.byKey[key]
e.evicting = true
s.byKey[key] = e
if e.requests == 0 {
s.expireNowLocked(key)
}
}
// releaseEntry decrements the request count and starts an expiry timer if the
// count has reached 0. Must be called once for every call to readEntry.
func (s *Store) releaseEntry(key string) {
@ -246,6 +286,11 @@ func (s *Store) releaseEntry(key string) {
return
}
if e.evicting {
s.expireNowLocked(key)
return
}
if e.expiry.Index() == ttlcache.NotIndexed {
e.expiry = s.expiryHeap.Add(key, s.idleTTL)
s.byKey[key] = e
@ -255,6 +300,17 @@ func (s *Store) releaseEntry(key string) {
s.expiryHeap.Update(e.expiry.Index(), s.idleTTL)
}
// expireNowLocked moves the item with the given key to the top of the expiry
// heap, causing it to be picked up by the expiry loop and evicted immediately.
func (s *Store) expireNowLocked(key string) {
e := s.byKey[key]
if idx := e.expiry.Index(); idx != ttlcache.NotIndexed {
s.expiryHeap.Remove(idx)
}
e.expiry = s.expiryHeap.Add(key, time.Duration(0))
s.byKey[key] = e
}
// makeEntryKey matches agent/cache.makeEntryKey, but may change in the future.
func makeEntryKey(typ string, r cache.RequestInfo) string {
return fmt.Sprintf("%s/%s/%s/%s", typ, r.Datacenter, r.Token, r.Key)

Some files were not shown because too many files have changed in this diff Show More