Merge branch 'master' into 6074-allow-config-MaxHeaderBytes

This commit is contained in:
Michael Montgomery 2020-11-20 07:43:53 -06:00
commit ed719c978b
3002 changed files with 53545 additions and 21595 deletions

4
.changelog/8771.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:bug
api: Fixed a bug where the Check.GRPCUseTLS field could not be set using snake case.
```

View File

@ -1,4 +1,3 @@
```release-note:improvement
api: The `v1/connect/ca/roots` endpoint now accepts a `pem=true` query parameter and will return a PEM encoded certificate chain of
all the certificates that would normally be in the JSON version of the response.
api: The `v1/connect/ca/roots` endpoint now accepts a `pem=true` query parameter and will return a PEM encoded certificate chain of all the certificates that would normally be in the JSON version of the response.
```

3
.changelog/8924.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
connect: fix connect sidecars registered via the API not being automatically deregistered with their parent service after an agent restart by persisting the LocallyRegisteredAsSidecar property.
```

3
.changelog/9002.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: add dashboard_url_template config option for external dashboard links
```

3
.changelog/9006.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
cli: snapshot inspect command supports JSON output
```

3
.changelog/9007.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
server: break up Intention.Apply monolithic method
```

3
.changelog/9008.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: only show topology tab for services that exist
```

3
.changelog/9024.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:security
Fix Consul Enterprise Namespace Config Entry Replication DoS. Previously an operator with service:write ACL permissions in a Consul Enterprise cluster could write a malicious config entry that caused infinite raft writes due to issues with the namespace replication logic. [[CVE-2020-25201](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-25201)]
```

3
.changelog/9059.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
agent: add path_allowlist config option to restrict metrics proxy queries
```

4
.changelog/9081.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:bug
ui: hide metrics for ingress gateways until full support can be implemented
```

3
.changelog/9088.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
telemetry: add initialization and definition for non-expiring key metrics in Prometheus
```

3
.changelog/9098.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
cli: snapshot inspect command provides KV usage breakdown
```

3
.changelog/9099.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
agent: protect the metrics proxy behind ACLs
```

3
.changelog/9101.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
agent: return the default ACL policy to callers as a header
```

12
.changelog/9103.txt Normal file
View File

@ -0,0 +1,12 @@
```release-note:bug
autopilot: **(Enterprise Only)** Previously servers in other zones would not be promoted when all servers in a second zone had failed. Now the actual behavior matches the docs and autopilot will promote a healthy non-voter from any zone to replace failure of an entire zone.
```
```release-note:feature
autopilot: A new `/v1/operator/autopilot/state` HTTP API was created to give greater visibility into what autopilot is doing and how it has classified all the servers it is tracking.
```
```release-note:improvement
autopilot: **(Enterprise Only)** Autopilot now supports using both Redundancy Zones and Automated Upgrades together.
```
```release-note:breaking-change
raft: Raft protocol v2 is no longer supported. If currently using protocol v2 then an intermediate upgrade to a version supporting both v2 and v3 protocols will be necessary (1.0.0 - 1.8.x). Note that the Raft protocol configured with the `raft_protocol` setting and the Consul RPC protocol configured with the `protocol` setting and output by the `consul version` command are distinct and supported Consul RPC protocol versions are not altered.
```

3
.changelog/9113.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:breaking-change
connect: Switch the default gateway port from 443 to 8443 to avoid assumption of Envoy running as root.
```

3
.changelog/9119.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
chore: update to Go 1.14.11 with mitigation for [golang/go#42138](https://github.com/golang/go/issues/42138)
```

7
.changelog/9141.txt Normal file
View File

@ -0,0 +1,7 @@
```release-note:improvement
ui: Moves the Proxy health checks to be displayed with the Service health check under the Health Checks tab
```
```release-note:improvement
ui: Add the Upstreams and Exposed Paths tabs for services in mesh
```

3
.changelog/9142.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
autopilot: Added a new `consul operator autopilot state` command to retrieve and view the Autopilot state from consul.
```

3
.changelog/9151.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
server: remove config entry CAS in legacy intention API bridge code
```

3
.changelog/9156.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
namespace: **(Enterprise Only)** Fixed a bug that could case snapshot restoration to fail when it contained a namespace marked for deletion while still containing other resources in that namespace.
```

3
.changelog/9181.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:deprecation
telemetry: the disable_compat_1.9 config will cover more metrics deprecations in future 1.9 point releases. These metrics will be emitted twice for backwards compatibility - if the flag is true, only the new metric name will be written.
```

18
.changelog/9186.txt Normal file
View File

@ -0,0 +1,18 @@
```release-note:bug
server: skip deleted and deleting namespaces when migrating intentions to config entries
```
```release-note:breaking-change
server: **(Enterprise only)** Pre-existing intentions defined with
non-existent destination namespaces were non-functional and are erased during
the upgrade process. This should not matter as these intentions had nothing to
enforce.
```
```release-note:breaking-change
server: **(OSS only)** Pre-existing intentions defined with either a source or
destination namespace value that is not "default" are rewritten or deleted
during the upgrade process. Wildcards first attempt to downgrade to "default"
unless an intention already exists, otherwise these non-functional intentions
are deleted.
```

18
.changelog/9191.txt Normal file
View File

@ -0,0 +1,18 @@
```release-note:deprecation
cli: **(Enterprise only)** The `-non-voting-server` flag is deprecated in favor of the new `-read-replica` flag. The `-non-voting-server` flag is still present along side the new flag but it will be removed in a future release.
```
```release-note:improvement
cli: **(Enterprise only)** A new `-read-replica` flag can now be used to enable running a server as a read only replica. Previously this was enabled with the now deprecated `-non-voting-server` flag.
```
```release-note:deprecation
config: **(Enterprise only)** The `non_voting_server` configuration setting is deprecated in favor of the new `read_replica` setting. The `non_voting_server` configuration setting is still present but will be removed in a future release.
```
```release-note:improvement
config: **(Enterprise only)** A new `read_replica` configuration setting can now be used to enable running a server as a read only replica. Previously this was enabled with the now deprecated `non_voting_server` setting.
```
```release-note:deprecation
server: **(Enterprise only)** Addition of the `nonvoter` tag to the service registration made for read replicas is deprecated in favor of the new tag name of `read_replica`. Both are present in the registration but the `nonvoter` tag will be completely removed in a future release.
```
```release-note:deprecation
gossip: **(Enterprise only)** Read replicas now advertise themselves by setting the `read_replica` tag. The old `nonvoter` tag is still present but is deprecated and will be removed in a future release.
```

3
.changelog/9198.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
agent: All metrics should be present and available to prometheus scrapers when Consul starts. If any non-deprecated metrics are missing please submit an issue with its name.
```

3
.changelog/9204.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
autopilot: Prevent panic when requesting the autopilot health immediately after a leader is elected.
```

3
.changelog/9207.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:breaking-change
connect: Update Envoy metrics names and labels for proxy listeners so that attributes like datacenter and namespace can be extracted.
```

3
.changelog/9229.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
command: when generating envoy bootstrap configs use the datacenter returned from the agent services endpoint
```

3
.changelog/9240.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:security
Increase the permissions to read from the `/connect/ca/configuration` endpoint to `operator:write`. Previously Connect CA configuration, including the private key, set via this endpoint could be read back by an operator with `operator:read` privileges. [CVE-2020-28053](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-28053)
```

3
.changelog/_666.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
namespace: **(Enterprise Only)** Fixed an issue where namespaced services and checks were not being deleted when the containing namespace was deleted.
```

3
.changelog/_683.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
license: **(Enterprise only)** Fixed an issue where warnings about Namespaces being unlicensed would be emitted erroneously.
```

3
.changelog/_8984.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:breaking-change
sentinel: **(Consul Enterprise only)** update to v0.16.0, which replaces `whitelist` and `blacklist` with `allowlist` and `denylist`
```

View File

@ -3,15 +3,14 @@ version: 2
references:
images:
go: &GOLANG_IMAGE circleci/golang:1.14.9
middleman: &MIDDLEMAN_IMAGE hashicorp/middleman-hashicorp:0.3.40
ember: &EMBER_IMAGE circleci/node:12-browsers
go: &GOLANG_IMAGE docker.mirror.hashicorp.services/circleci/golang:1.15.5
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:12-browsers
paths:
test-results: &TEST_RESULTS_DIR /tmp/test-results
cache:
yarn: &YARN_CACHE_KEY consul-ui-v2-{{ checksum "ui-v2/yarn.lock" }}
yarn: &YARN_CACHE_KEY consul-ui-v3-{{ checksum "ui/yarn.lock" }}
rubygem: &RUBYGEM_CACHE_KEY static-site-gems-v1-{{ checksum "Gemfile.lock" }}
environment: &ENVIRONMENT
@ -27,7 +26,7 @@ steps:
install-gotestsum: &install-gotestsum
name: install gotestsum
environment:
GOTESTSUM_RELEASE: 0.5.1
GOTESTSUM_RELEASE: 0.6.0
command: |
url=https://github.com/gotestyourself/gotestsum/releases/download
curl -sSL "${url}/v${GOTESTSUM_RELEASE}/gotestsum_${GOTESTSUM_RELEASE}_linux_amd64.tar.gz" | \
@ -237,16 +236,15 @@ jobs:
name: go test -race
command: |
mkdir -p $TEST_RESULTS_DIR /tmp/jsonfile
pkgs="$(go list ./... | \
grep -E -v '^github.com/hashicorp/consul/agent(/consul|/local|/xds|/routine-leak-checker)?$' | \
grep -E -v '^github.com/hashicorp/consul/command/')"
gotestsum \
--format=short-verbose \
--jsonfile /tmp/jsonfile/go-test-race.log \
--junitfile $TEST_RESULTS_DIR/gotestsum-report.xml -- \
-tags="$GOTAGS" -p 2 \
-race -gcflags=all=-d=checkptr=0 \
./agent/{ae,cache,cache-types,checks,config,pool,proxycfg,router}/... \
./agent/consul/{authmethod,autopilot,fsm,state,stream}/... \
./agent/{grpc,rpc,rpcclient,submatview}/... \
./snapshot
$pkgs
- store_test_results:
path: *TEST_RESULTS_DIR
@ -345,7 +343,7 @@ jobs:
<<: *build-distros
environment:
<<: *build-env
XC_OS: "darwin freebsd linux windows"
XC_OS: "freebsd linux windows"
XC_ARCH: "386"
# build all amd64 architecture supported OS binaries
@ -440,7 +438,7 @@ jobs:
# upload dev docker image
dev-upload-docker:
docker:
- image: circleci/golang:latest # use a circleci image so the attach_workspace step works (has ca-certs installed)
- image: *GOLANG_IMAGE # use a circleci image so the attach_workspace step works (has ca-certs installed)
environment:
<<: *ENVIRONMENT
steps:
@ -456,7 +454,7 @@ jobs:
# Run integration tests on nomad/v0.8.7
nomad-integration-0_8:
docker:
- image: circleci/golang:1.10
- image: docker.mirror.hashicorp.services/circleci/golang:1.10
environment:
<<: *ENVIRONMENT
NOMAD_WORKING_DIR: &NOMAD_WORKING_DIR /go/src/github.com/hashicorp/nomad
@ -505,7 +503,7 @@ jobs:
build-website-docker-image:
docker:
- image: circleci/buildpack-deps
- image: docker.mirror.hashicorp.services/circleci/buildpack-deps
shell: /usr/bin/env bash -euo pipefail -c
steps:
- checkout
@ -531,7 +529,7 @@ jobs:
algolia-index:
docker:
- image: node:12
- image: docker.mirror.hashicorp.services/node:12
steps:
- checkout
- run:
@ -559,12 +557,13 @@ jobs:
- run:
name: install yarn packages
command: cd ui-v2 && yarn install
command: cd ui/packages/consul-ui && yarn install --focus
- save_cache:
key: *YARN_CACHE_KEY
paths:
- ui-v2/node_modules
- ui/node_modules
- ui/packages/consul-ui/node_modules
- run: *notify-slack-failure
# build ember so frontend tests run faster
@ -578,13 +577,13 @@ jobs:
- checkout
- restore_cache:
key: *YARN_CACHE_KEY
- run: cd ui-v2 && make build-ci
- run: cd ui/packages/consul-ui && make build-ci
# saves the build to a workspace to be passed to a downstream job
- persist_to_workspace:
root: ui-v2
root: ui
paths:
- dist
- packages/consul-ui/dist
- run: *notify-slack-failure
# build ember so frontend tests run faster
@ -604,13 +603,13 @@ jobs:
- checkout
- restore_cache:
key: *YARN_CACHE_KEY
- run: cd ui-v2 && make
- run: cd ui && make
# saves the build to a workspace to be passed to a downstream job
- persist_to_workspace:
root: ui-v2
root: ui
paths:
- dist
- packages/consul-ui/dist
- run: *notify-slack-failure
# build static-assets file
@ -621,7 +620,7 @@ jobs:
- checkout
- attach_workspace:
at: ./pkg
- run: mv pkg/dist pkg/web_ui # 'make static-assets' looks for the 'pkg/web_ui' path
- run: mv pkg/packages/consul-ui/dist pkg/web_ui # 'make static-assets' looks for the 'pkg/web_ui' path
- run: make tools
- run: make static-assets
- persist_to_workspace:
@ -644,10 +643,10 @@ jobs:
- run:
name: commit agent/uiserver/bindata_assetfs.go if there are UI changes
command: |
# check if there are any changes in ui-v2/
# check if there are any changes in ui/
# if there are, we commit the ui static asset file
# HEAD^! is shorthand for HEAD^..HEAD (parent of HEAD and HEAD)
if ! git diff --quiet --exit-code HEAD^! ui-v2/; then
if ! git diff --quiet --exit-code HEAD^! ui/; then
git config --local user.email "hashicorp-ci@users.noreply.github.com"
git config --local user.name "hashicorp-ci"
@ -669,9 +668,9 @@ jobs:
- restore_cache:
key: *YARN_CACHE_KEY
- attach_workspace:
at: ui-v2
at: ui
- run:
working_directory: ui-v2
working_directory: ui/packages/consul-ui
command: make test-node
- run: *notify-slack-failure
@ -689,12 +688,12 @@ jobs:
- restore_cache:
key: *YARN_CACHE_KEY
- attach_workspace:
at: ui-v2
at: ui
- run:
working_directory: ui-v2
working_directory: ui/packages/consul-ui
command: node_modules/.bin/ember exam --split=$CIRCLE_NODE_TOTAL --partition=`expr $CIRCLE_NODE_INDEX + 1` --path dist --silent -r xunit
- store_test_results:
path: ui-v2/test-results
path: ui/packages/consul-ui/test-results
- run: *notify-slack-failure
# run ember frontend tests
@ -711,12 +710,12 @@ jobs:
- restore_cache:
key: *YARN_CACHE_KEY
- attach_workspace:
at: ui-v2
at: ui
- run:
working_directory: ui-v2
working_directory: ui/packages/consul-ui
command: node_modules/.bin/ember exam --split=$CIRCLE_NODE_TOTAL --partition=`expr $CIRCLE_NODE_INDEX + 1` --path dist --silent -r xunit
- store_test_results:
path: ui-v2/test-results
path: ui/packages/consul-ui/test-results
- run: *notify-slack-failure
# run ember frontend unit tests to produce coverage report
@ -728,23 +727,24 @@ jobs:
- restore_cache:
key: *YARN_CACHE_KEY
- attach_workspace:
at: ui-v2
at: ui
- run:
working_directory: ui-v2
working_directory: ui/packages/consul-ui
command: make test-coverage-ci
- run:
name: codecov ui upload
working_directory: ui-v2
working_directory: ui/packages/consul-ui
command: bash <(curl -s https://codecov.io/bash) -v -c -C $CIRCLE_SHA1 -F ui
- run: *notify-slack-failure
envoy-integration-test-1.12.6:
envoy-integration-test-1.13.6: &ENVOY_TESTS
docker:
# We only really need bash and docker-compose which is installed on all
# Circle images but pick Go since we have to pick one of them.
- image: *GOLANG_IMAGE
parallelism: 2
environment:
ENVOY_VERSION: "1.12.6"
ENVOY_VERSION: "1.13.6"
steps: &ENVOY_INTEGRATION_TEST_STEPS
- checkout
# Get go binary from workspace
@ -756,8 +756,12 @@ jobs:
- run:
name: Envoy Integration Tests
command: |
subtests=$(ls -d test/integration/connect/envoy/*/ | xargs -n 1 basename | circleci tests split)
echo "Running $(echo $subtests | wc -w) subtests"
echo "$subtests"
subtests_pipe_sepr=$(echo "$subtests" | xargs | sed 's/ /|/g')
mkdir -p /tmp/test-results/
gotestsum -- -timeout=30m -tags integration ./test/integration/connect/envoy
gotestsum -- -timeout=30m -tags integration ./test/integration/connect/envoy -run="TestEnvoy/($subtests_pipe_sepr)"
environment:
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
GOTESTSUM_FORMAT: standard-verbose
@ -773,26 +777,20 @@ jobs:
path: *TEST_RESULTS_DIR
- run: *notify-slack-failure
envoy-integration-test-1.13.4:
docker:
- image: *GOLANG_IMAGE
envoy-integration-test-1.14.5:
<<: *ENVOY_TESTS
environment:
ENVOY_VERSION: "1.13.4"
steps: *ENVOY_INTEGRATION_TEST_STEPS
ENVOY_VERSION: "1.14.5"
envoy-integration-test-1.14.4:
docker:
- image: *GOLANG_IMAGE
envoy-integration-test-1.15.2:
<<: *ENVOY_TESTS
environment:
ENVOY_VERSION: "1.14.4"
steps: *ENVOY_INTEGRATION_TEST_STEPS
ENVOY_VERSION: "1.15.2"
envoy-integration-test-1.15.0:
docker:
- image: *GOLANG_IMAGE
envoy-integration-test-1.16.0:
<<: *ENVOY_TESTS
environment:
ENVOY_VERSION: "1.15.0"
steps: *ENVOY_INTEGRATION_TEST_STEPS
ENVOY_VERSION: "1.16.0"
# run integration tests for the connect ca providers
test-connect-ca-providers:
@ -822,7 +820,7 @@ jobs:
# only runs on master: checks latest commit to see if the PR associated has a backport/* or docs* label to cherry-pick
cherry-picker:
docker:
- image: alpine:3.11
- image: docker.mirror.hashicorp.services/alpine:3.12
steps:
- run: apk add --no-cache --no-progress git bash curl ncurses jq openssh-client
- checkout
@ -834,7 +832,7 @@ jobs:
trigger-oss-merge:
docker:
- image: alpine:3.11
- image: docker.mirror.hashicorp.services/alpine:3.12
steps:
- run: apk add --no-cache --no-progress curl jq
- run:
@ -895,6 +893,7 @@ workflows:
branches:
only:
- master
- /release\/\d+\.\d+\.x$/
requires:
- build-static-assets
- dev-build:
@ -928,16 +927,16 @@ workflows:
- nomad-integration-0_8:
requires:
- dev-build
- envoy-integration-test-1.12.6:
- envoy-integration-test-1.13.6:
requires:
- dev-build
- envoy-integration-test-1.13.4:
- envoy-integration-test-1.14.5:
requires:
- dev-build
- envoy-integration-test-1.14.4:
- envoy-integration-test-1.15.2:
requires:
- dev-build
- envoy-integration-test-1.15.0:
- envoy-integration-test-1.16.0:
requires:
- dev-build
website:

View File

@ -131,6 +131,16 @@ fi
# save PR number
pr_number=$(echo "$resp" | jq '.items[].number')
# comment on the PR with the build number to make it easy to re-run the job when
# cherry-pick labels are added in the future
github_message=":cherries: If backport labels were added before merging, cherry-picking will start automatically.\n\nTo retroactively trigger a backport after merging, add backport labels and re-run ${CIRCLE_BUILD_URL}."
curl -f -s -H "Authorization: token ${GITHUB_TOKEN}" \
-X POST \
-d "{ \"body\": \"${github_message}\"}" \
"https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/issues/${pr_number}/comments"
# If the API returned a non-zero count, we have found a PR with that commit so we find
# the labels from the PR

View File

@ -70,7 +70,7 @@ theme/testing:
theme/tls:
- '(mtls|mTLS|tls|TLS)'
theme/ui:
- '(ui|browser|chrome|firefox|IE|ie|Chrome)'
- '(browser|chrome|firefox|IE|Chrome)'
theme/windows:
- '(windows|Windows|Microsoft|microsoft)'
# thinking:

View File

@ -1,19 +1,6 @@
# backport/1.6:
# backport/1.7:
# backport/1.8:
# beta/1.8:
# blocks-release:
# crash:
# do-not-merge:
# help-wanted:
# needs-discussion:
# needs-investigation:
# post-beta:
pr/dependencies:
- vendor/**/*
- go.*
# pr/needs-rebase:
# pr/needs-tests:
theme/acls:
- acl/**/*
theme/agent-cache:
@ -65,7 +52,7 @@ theme/testing:
theme/tls:
- tlsutil/**/*
theme/ui:
- ui-v2/**/*
- ui/**/*
# theme/windows:
# thinking:
# type/bug:
@ -74,11 +61,3 @@ type/ci:
# type/crash:
type/docs:
- website/**/*
# type/enhancement:
# type/good-first-issue:
# type/question:
# type/umbrella-☂️:
# version/0.8.3:
# waiting-pr-merge:
# waiting-reply:
# waiting-reply-🤖:

View File

@ -1,15 +0,0 @@
name: "Issue Labeler"
on:
issues:
types: [opened]
jobs:
triage:
runs-on: ubuntu-latest
steps:
- uses: github/issue-labeler@v2.2
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
configuration-path: .github/issue-labeler.yml
not-before: "2020-09-14T08:23:00Z"
enable-versioned-regex: 0

View File

@ -1,5 +1,93 @@
## UNRELEASED
## 1.9.0-rc1 (November 17, 2020)
BREAKING CHANGES:
* connect: Update Envoy metrics names and labels for proxy listeners so that attributes like datacenter and namespace can be extracted. [[GH-9207](https://github.com/hashicorp/consul/issues/9207)]
* server: **(Enterprise only)** Pre-existing intentions defined with
non-existent destination namespaces were non-functional and are erased during
the upgrade process. This should not matter as these intentions had nothing to
enforce. [[GH-9186](https://github.com/hashicorp/consul/issues/9186)]
* server: **(OSS only)** Pre-existing intentions defined with either a source or
destination namespace value that is not "default" are rewritten or deleted
during the upgrade process. Wildcards first attempt to downgrade to "default"
unless an intention already exists, otherwise these non-functional intentions
are deleted. [[GH-9186](https://github.com/hashicorp/consul/issues/9186)]
FEATURES:
* agent: return the default ACL policy to callers as a header [[GH-9101](https://github.com/hashicorp/consul/issues/9101)]
* autopilot: Added a new `consul operator autopilot state` command to retrieve and view the Autopilot state from consul. [[GH-9142](https://github.com/hashicorp/consul/issues/9142)]
IMPROVEMENTS:
* agent: All metrics should be present and available to prometheus scrapers when Consul starts. If any non-deprecated metrics are missing please submit an issue with its name. [[GH-9198](https://github.com/hashicorp/consul/issues/9198)]
* server: break up Intention.Apply monolithic method [[GH-9007](https://github.com/hashicorp/consul/issues/9007)]
* server: remove config entry CAS in legacy intention API bridge code [[GH-9151](https://github.com/hashicorp/consul/issues/9151)]
* ui: Add the Upstreams and Exposed Paths tabs for services in mesh [[GH-9141](https://github.com/hashicorp/consul/issues/9141)]
* ui: Moves the Proxy health checks to be displayed with the Service health check under the Health Checks tab [[GH-9141](https://github.com/hashicorp/consul/issues/9141)]
DEPRECATIONS:
* telemetry: the disable_compat_1.9 config will cover more metrics deprecations in future 1.9 point releases. These metrics will be emitted twice for backwards compatibility - if the flag is true, only the new metric name will be written. [[GH-9181](https://github.com/hashicorp/consul/issues/9181)]
BUG FIXES:
* autopilot: Prevent panic when requesting the autopilot health immediately after a leader is elected. [[GH-9204](https://github.com/hashicorp/consul/issues/9204)]
* license: **(Enterprise only)** Fixed an issue where warnings about Namespaces being unlicensed would be emitted erroneously.
* namespace: **(Enterprise Only)** Fixed a bug that could case snapshot restoration to fail when it contained a namespace marked for deletion while still containing other resources in that namespace. [[GH-9156](https://github.com/hashicorp/consul/issues/9156)]
* namespace: **(Enterprise Only)** Fixed an issue where namespaced services and checks were not being deleted when the containing namespace was deleted.
* server: skip deleted and deleting namespaces when migrating intentions to config entries [[GH-9186](https://github.com/hashicorp/consul/issues/9186)]
## 1.9.0-beta3 (November 10, 2020)
BREAKING CHANGES:
* connect: Switch the default gateway port from 443 to 8443 to avoid assumption of Envoy running as root. [[GH-9113](https://github.com/hashicorp/consul/issues/9113)]
* raft: Raft protocol v2 is no longer supported. If currently using protocol v2 then an intermediate upgrade to a version supporting both v2 and v3 protocols will be necessary (1.0.0 - 1.8.x). Note that the Raft protocol configured with the `raft_protocol` setting and the Consul RPC protocol configured with the `protocol` setting and output by the `consul version` command are distinct and supported Consul RPC protocol versions are not altered. [[GH-9103](https://github.com/hashicorp/consul/issues/9103)]
FEATURES:
* autopilot: A new `/v1/operator/autopilot/state` HTTP API was created to give greater visibility into what autopilot is doing and how it has classified all the servers it is tracking. [[GH-9103](https://github.com/hashicorp/consul/issues/9103)]
IMPROVEMENTS:
* autopilot: **(Enterprise Only)** Autopilot now supports using both Redundancy Zones and Automated Upgrades together. [[GH-9103](https://github.com/hashicorp/consul/issues/9103)]
* chore: update to Go 1.14.11 with mitigation for [golang/go#42138](https://github.com/golang/go/issues/42138) [[GH-9119](https://github.com/hashicorp/consul/issues/9119)]
BUG FIXES:
* autopilot: **(Enterprise Only)** Previously servers in other zones would not be promoted when all servers in a second zone had failed. Now the actual behavior matches the docs and autopilot will promote a healthy non-voter from any zone to replace failure of an entire zone. [[GH-9103](https://github.com/hashicorp/consul/issues/9103)]
## 1.9.0-beta2 (November 07, 2020)
BREAKING CHANGES:
* sentinel: **(Consul Enterprise only)** update to v0.16.0, which replaces `whitelist` and `blacklist` with `allowlist` and `denylist`
SECURITY:
* Fix Consul Enterprise Namespace Config Entry Replication DoS. Previously an operator with service:write ACL permissions in a Consul Enterprise cluster could write a malicious config entry that caused infinite raft writes due to issues with the namespace replication logic. [[CVE-2020-25201](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-25201)] [[GH-9024](https://github.com/hashicorp/consul/issues/9024)]
FEATURES:
* agent: Add a new RPC endpoint for streaming cluster state change events to clients.
* telemetry: add initialization and definition for non-expiring key metrics in Prometheus [[GH-9088](https://github.com/hashicorp/consul/issues/9088)]
IMPROVEMENTS:
* agent: add path_allowlist config option to restrict metrics proxy queries [[GH-9059](https://github.com/hashicorp/consul/issues/9059)]
* agent: protect the metrics proxy behind ACLs [[GH-9099](https://github.com/hashicorp/consul/issues/9099)]
* ui: add dashboard_url_template config option for external dashboard links [[GH-9002](https://github.com/hashicorp/consul/issues/9002)]
BUG FIXES:
* api: Fixed a bug where the Check.GRPCUseTLS field could not be set using snake case. [[GH-8771](https://github.com/hashicorp/consul/issues/8771)]
* connect: fix connect sidecars registered via the API not being automatically deregistered with their parent service after an agent restart by persisting the LocallyRegisteredAsSidecar property. [[GH-8924](https://github.com/hashicorp/consul/issues/8924)]
* ui: hide metrics for ingress gateways until full support can be implemented [[GH-9081](https://github.com/hashicorp/consul/issues/9081)]
* ui: only show topology tab for services that exist [[GH-9008](https://github.com/hashicorp/consul/issues/9008)]
## 1.9.0-beta1 (October 12, 2020)
BREAKING CHANGES:
@ -57,6 +145,33 @@ BUG FIXES:
* raft: (Enterprise only) properly update consul server meta non_voter for non-voting Enterprise Consul servers [[GH-8731](https://github.com/hashicorp/consul/issues/8731)]
* ui: show correct datacenter for gateways [[GH-8704](https://github.com/hashicorp/consul/issues/8704)]
## 1.8.6 (November 19, 2020)
SECURITY:
* Increase the permissions to read from the `/connect/ca/configuration` endpoint to `operator:write`. Previously Connect CA configuration, including the private key, set via this endpoint could be read back by an operator with `operator:read` privileges. [CVE-2020-28053](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-28053) [[GH-9240](https://github.com/hashicorp/consul/issues/9240)]
## 1.8.5 (October 23, 2020)
SECURITY:
* Fix Consul Enterprise Namespace Config Entry Replication DoS. Previously an operator with service:write ACL permissions in a Consul Enterprise cluster could write a malicious config entry that caused infinite raft writes due to issues with the namespace replication logic. [[CVE-2020-25201](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-25201)] [[GH-9024](https://github.com/hashicorp/consul/issues/9024)]
IMPROVEMENTS:
* api: The `v1/connect/ca/roots` endpoint now accepts a `pem=true` query parameter and will return a PEM encoded certificate chain of all the certificates that would normally be in the JSON version of the response. [[GH-8774](https://github.com/hashicorp/consul/issues/8774)]
* connect: The Vault provider will now automatically renew the lease of the token used, if supported. [[GH-8560](https://github.com/hashicorp/consul/issues/8560)]
* connect: update supported envoy releases to 1.14.5, 1.13.6, 1.12.7, 1.11.2 for 1.8.x [[GH-8999](https://github.com/hashicorp/consul/issues/8999)]
BUG FIXES:
* agent: when enable_central_service_config is enabled ensure agent reload doesn't revert check state to critical [[GH-8747](https://github.com/hashicorp/consul/issues/8747)]
* connect: Fixed an issue where the Vault intermediate was not renewed in the primary datacenter. [[GH-8784](https://github.com/hashicorp/consul/issues/8784)]
* connect: fix Vault provider not respecting IntermediateCertTTL [[GH-8646](https://github.com/hashicorp/consul/issues/8646)]
* connect: fix connect sidecars registered via the API not being automatically deregistered with their parent service after an agent restart by persisting the LocallyRegisteredAsSidecar property. [[GH-8924](https://github.com/hashicorp/consul/issues/8924)]
* fixed a bug that caused logs to be flooded with `[WARN] agent.router: Non-server in server-only area` [[GH-8685](https://github.com/hashicorp/consul/issues/8685)]
* ui: show correct datacenter for gateways [[GH-8704](https://github.com/hashicorp/consul/issues/8704)]
## 1.8.4 (September 11, 2020)
FEATURES:
@ -202,6 +317,26 @@ BUGFIXES:
* ui: Miscellaneous amends for Safari and Firefox [[GH-7904](https://github.com/hashicorp/consul/issues/7904)] [[GH-7907](https://github.com/hashicorp/consul/pull/7907)]
* ui: Ensure a value is always passed to CONSUL_SSO_ENABLED [[GH-7913](https://github.com/hashicorp/consul/pull/7913)]
## 1.7.10 (November 19, 2020)
SECURITY:
* Increase the permissions to read from the `/connect/ca/configuration` endpoint to `operator:write`. Previously Connect CA configuration, including the private key, set via this endpoint could be read back by an operator with `operator:read` privileges. [CVE-2020-28053](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-28053) [[GH-9240](https://github.com/hashicorp/consul/issues/9240)]
## 1.7.9 (October 26, 2020)
SECURITY:
* Fix Consul Enterprise Namespace Config Entry Replication DoS. Previously an operator with service:write ACL permissions in a Consul Enterprise cluster could write a malicious config entry that caused infinite raft writes due to issues with the namespace replication logic. [CVE-2020-25201] (https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-25201) [[GH-9024](https://github.com/hashicorp/consul/issues/9024)]
IMPROVEMENTS:
* connect: update supported envoy releases to 1.13.6, 1.12.7, 1.11.2, 1.10.0 for 1.7.x [[GH-9000](https://github.com/hashicorp/consul/issues/9000)]
BUG FIXES:
* agent: when enable_central_service_config is enabled ensure agent reload doesn't revert check state to critical [[GH-8747](https://github.com/hashicorp/consul/issues/8747)]
## 1.7.8 (September 11, 2020)
FEATURES:
@ -437,6 +572,12 @@ BUGFIXES:
* ui: Discovery-Chain: Improve parsing of redirects [[GH-7174](https://github.com/hashicorp/consul/pull/7174)]
* ui: Fix styling of duplicate intention error message [[GH6936]](https://github.com/hashicorp/consul/pull/6936)
## 1.6.10 (November 19, 2020)
SECURITY:
* Increase the permissions to read from the `/connect/ca/configuration` endpoint to `operator:write`. Previously Connect CA configuration, including the private key, set via this endpoint could be read back by an operator with `operator:read` privileges. [CVE-2020-28053](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-28053) [[GH-9240](https://github.com/hashicorp/consul/issues/9240)]
## 1.6.9 (September 11, 2020)
BUG FIXES:

View File

@ -100,6 +100,10 @@ func checkAllowNodeRead(t *testing.T, authz Authorizer, prefix string, entCtx *A
require.Equal(t, Allow, authz.NodeRead(prefix, entCtx))
}
func checkAllowNodeReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.NodeReadAll(entCtx))
}
func checkAllowNodeWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.NodeWrite(prefix, entCtx))
}
@ -124,6 +128,10 @@ func checkAllowServiceRead(t *testing.T, authz Authorizer, prefix string, entCtx
require.Equal(t, Allow, authz.ServiceRead(prefix, entCtx))
}
func checkAllowServiceReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.ServiceReadAll(entCtx))
}
func checkAllowServiceWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.ServiceWrite(prefix, entCtx))
}
@ -204,6 +212,10 @@ func checkDenyNodeRead(t *testing.T, authz Authorizer, prefix string, entCtx *Au
require.Equal(t, Deny, authz.NodeRead(prefix, entCtx))
}
func checkDenyNodeReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.NodeReadAll(entCtx))
}
func checkDenyNodeWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.NodeWrite(prefix, entCtx))
}
@ -228,6 +240,10 @@ func checkDenyServiceRead(t *testing.T, authz Authorizer, prefix string, entCtx
require.Equal(t, Deny, authz.ServiceRead(prefix, entCtx))
}
func checkDenyServiceReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.ServiceReadAll(entCtx))
}
func checkDenyServiceWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.ServiceWrite(prefix, entCtx))
}
@ -308,6 +324,10 @@ func checkDefaultNodeRead(t *testing.T, authz Authorizer, prefix string, entCtx
require.Equal(t, Default, authz.NodeRead(prefix, entCtx))
}
func checkDefaultNodeReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.NodeReadAll(entCtx))
}
func checkDefaultNodeWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.NodeWrite(prefix, entCtx))
}
@ -332,6 +352,10 @@ func checkDefaultServiceRead(t *testing.T, authz Authorizer, prefix string, entC
require.Equal(t, Default, authz.ServiceRead(prefix, entCtx))
}
func checkDefaultServiceReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.ServiceReadAll(entCtx))
}
func checkDefaultServiceWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.ServiceWrite(prefix, entCtx))
}
@ -381,12 +405,14 @@ func TestACL(t *testing.T) {
{name: "DenyKeyringWrite", check: checkDenyKeyringWrite},
{name: "DenyKeyWrite", check: checkDenyKeyWrite},
{name: "DenyNodeRead", check: checkDenyNodeRead},
{name: "DenyNodeReadAll", check: checkDenyNodeReadAll},
{name: "DenyNodeWrite", check: checkDenyNodeWrite},
{name: "DenyOperatorRead", check: checkDenyOperatorRead},
{name: "DenyOperatorWrite", check: checkDenyOperatorWrite},
{name: "DenyPreparedQueryRead", check: checkDenyPreparedQueryRead},
{name: "DenyPreparedQueryWrite", check: checkDenyPreparedQueryWrite},
{name: "DenyServiceRead", check: checkDenyServiceRead},
{name: "DenyServiceReadAll", check: checkDenyServiceReadAll},
{name: "DenyServiceWrite", check: checkDenyServiceWrite},
{name: "DenySessionRead", check: checkDenySessionRead},
{name: "DenySessionWrite", check: checkDenySessionWrite},
@ -411,12 +437,14 @@ func TestACL(t *testing.T) {
{name: "AllowKeyringWrite", check: checkAllowKeyringWrite},
{name: "AllowKeyWrite", check: checkAllowKeyWrite},
{name: "AllowNodeRead", check: checkAllowNodeRead},
{name: "AllowNodeReadAll", check: checkAllowNodeReadAll},
{name: "AllowNodeWrite", check: checkAllowNodeWrite},
{name: "AllowOperatorRead", check: checkAllowOperatorRead},
{name: "AllowOperatorWrite", check: checkAllowOperatorWrite},
{name: "AllowPreparedQueryRead", check: checkAllowPreparedQueryRead},
{name: "AllowPreparedQueryWrite", check: checkAllowPreparedQueryWrite},
{name: "AllowServiceRead", check: checkAllowServiceRead},
{name: "AllowServiceReadAll", check: checkAllowServiceReadAll},
{name: "AllowServiceWrite", check: checkAllowServiceWrite},
{name: "AllowSessionRead", check: checkAllowSessionRead},
{name: "AllowSessionWrite", check: checkAllowSessionWrite},
@ -441,12 +469,14 @@ func TestACL(t *testing.T) {
{name: "AllowKeyringWrite", check: checkAllowKeyringWrite},
{name: "AllowKeyWrite", check: checkAllowKeyWrite},
{name: "AllowNodeRead", check: checkAllowNodeRead},
{name: "AllowNodeReadAll", check: checkAllowNodeReadAll},
{name: "AllowNodeWrite", check: checkAllowNodeWrite},
{name: "AllowOperatorRead", check: checkAllowOperatorRead},
{name: "AllowOperatorWrite", check: checkAllowOperatorWrite},
{name: "AllowPreparedQueryRead", check: checkAllowPreparedQueryRead},
{name: "AllowPreparedQueryWrite", check: checkAllowPreparedQueryWrite},
{name: "AllowServiceRead", check: checkAllowServiceRead},
{name: "AllowServiceReadAll", check: checkAllowServiceReadAll},
{name: "AllowServiceWrite", check: checkAllowServiceWrite},
{name: "AllowSessionRead", check: checkAllowSessionRead},
{name: "AllowSessionWrite", check: checkAllowSessionWrite},
@ -995,6 +1025,7 @@ func TestACL(t *testing.T) {
}),
},
checks: []aclCheck{
{name: "ReadAllDenied", prefix: "", check: checkDenyNodeReadAll},
{name: "DefaultReadDenied", prefix: "nope", check: checkDenyNodeRead},
{name: "DefaultWriteDenied", prefix: "nope", check: checkDenyNodeWrite},
{name: "DenyReadDenied", prefix: "root-nope", check: checkDenyNodeRead},
@ -1075,6 +1106,7 @@ func TestACL(t *testing.T) {
}),
},
checks: []aclCheck{
{name: "ReadAllDenied", prefix: "", check: checkDenyNodeReadAll},
{name: "DefaultReadAllowed", prefix: "nope", check: checkAllowNodeRead},
{name: "DefaultWriteAllowed", prefix: "nope", check: checkAllowNodeWrite},
{name: "DenyReadDenied", prefix: "root-nope", check: checkDenyNodeRead},
@ -1335,6 +1367,7 @@ func TestACL(t *testing.T) {
}),
},
checks: []aclCheck{
{name: "ServiceReadAllDenied", prefix: "", check: checkDenyServiceReadAll},
{name: "KeyReadDenied", prefix: "other", check: checkDenyKeyRead},
{name: "KeyWriteDenied", prefix: "other", check: checkDenyKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "other", check: checkDenyKeyWritePrefix},
@ -1464,6 +1497,7 @@ func TestACL(t *testing.T) {
}),
},
checks: []aclCheck{
{name: "ServiceReadAllDenied", prefix: "", check: checkDenyServiceReadAll},
{name: "KeyReadAllowed", prefix: "other", check: checkAllowKeyRead},
{name: "KeyWriteAllowed", prefix: "other", check: checkAllowKeyWrite},
{name: "KeyWritePrefixAllowed", prefix: "other", check: checkAllowKeyWritePrefix},
@ -1708,6 +1742,9 @@ func TestACL(t *testing.T) {
},
},
checks: []aclCheck{
{name: "NodeReadAllDenied", prefix: "", check: checkDenyNodeReadAll},
{name: "ServiceReadAllDenied", prefix: "", check: checkDenyServiceReadAll},
{name: "AgentReadPrefixAllowed", prefix: "fo", check: checkAllowAgentRead},
{name: "AgentWritePrefixDenied", prefix: "fo", check: checkDenyAgentWrite},
{name: "AgentReadPrefixAllowed", prefix: "for", check: checkAllowAgentRead},
@ -2101,3 +2138,78 @@ func TestACLEnforce(t *testing.T) {
})
}
}
func TestACL_ReadAll(t *testing.T) {
type testcase struct {
name string
rules string
check func(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext)
}
tests := []testcase{
{
name: "node:bar:read",
rules: `node "bar" { policy = "read" }`,
check: checkDenyNodeReadAll,
},
{
name: "node:bar:write",
rules: `node "bar" { policy = "write" }`,
check: checkDenyNodeReadAll,
},
{
name: "node:*:read",
rules: `node_prefix "" { policy = "read" }`,
check: checkAllowNodeReadAll,
},
{
name: "node:*:write",
rules: `node_prefix "" { policy = "write" }`,
check: checkAllowNodeReadAll,
},
{
name: "service:bar:read",
rules: `service "bar" { policy = "read" }`,
check: checkDenyServiceReadAll,
},
{
name: "service:bar:write",
rules: `service "bar" { policy = "write" }`,
check: checkDenyServiceReadAll,
},
{
name: "service:*:read",
rules: `service_prefix "" { policy = "read" }`,
check: checkAllowServiceReadAll,
},
{
name: "service:*:write",
rules: `service_prefix "" { policy = "write" }`,
check: checkAllowServiceReadAll,
},
}
body := func(t *testing.T, rules string, defaultPolicy Authorizer, check func(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext)) {
t.Helper()
policy, err := NewPolicyFromSource("", 0, rules, SyntaxCurrent, nil, nil)
require.NoError(t, err)
acl, err := NewPolicyAuthorizerWithDefaults(defaultPolicy, []*Policy{policy}, nil)
require.NoError(t, err)
check(t, acl, "", nil)
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Run("default deny", func(t *testing.T) {
body(t, tc.rules, DenyAll(), tc.check)
})
t.Run("default allow", func(t *testing.T) {
body(t, tc.rules, AllowAll(), checkAllowNodeReadAll)
})
})
}
}

View File

@ -107,6 +107,9 @@ type Authorizer interface {
// NodeRead checks for permission to read (discover) a given node.
NodeRead(string, *AuthorizerContext) EnforcementDecision
// NodeReadAll checks for permission to read (discover) all nodes.
NodeReadAll(*AuthorizerContext) EnforcementDecision
// NodeWrite checks for permission to create or update (register) a
// given node.
NodeWrite(string, *AuthorizerContext) EnforcementDecision
@ -130,6 +133,9 @@ type Authorizer interface {
// ServiceRead checks for permission to read a given service
ServiceRead(string, *AuthorizerContext) EnforcementDecision
// ServiceReadAll checks for permission to read all services
ServiceReadAll(*AuthorizerContext) EnforcementDecision
// ServiceWrite checks for permission to create or update a given
// service
ServiceWrite(string, *AuthorizerContext) EnforcementDecision

View File

@ -12,6 +12,8 @@ type mockAuthorizer struct {
mock.Mock
}
var _ Authorizer = (*mockAuthorizer)(nil)
// ACLRead checks for permission to list all the ACLs
func (m *mockAuthorizer) ACLRead(ctx *AuthorizerContext) EnforcementDecision {
ret := m.Called(ctx)
@ -115,6 +117,11 @@ func (m *mockAuthorizer) NodeRead(segment string, ctx *AuthorizerContext) Enforc
return ret.Get(0).(EnforcementDecision)
}
func (m *mockAuthorizer) NodeReadAll(ctx *AuthorizerContext) EnforcementDecision {
ret := m.Called(ctx)
return ret.Get(0).(EnforcementDecision)
}
// NodeWrite checks for permission to create or update (register) a
// given node.
func (m *mockAuthorizer) NodeWrite(segment string, ctx *AuthorizerContext) EnforcementDecision {
@ -156,6 +163,11 @@ func (m *mockAuthorizer) ServiceRead(segment string, ctx *AuthorizerContext) Enf
return ret.Get(0).(EnforcementDecision)
}
func (m *mockAuthorizer) ServiceReadAll(ctx *AuthorizerContext) EnforcementDecision {
ret := m.Called(ctx)
return ret.Get(0).(EnforcementDecision)
}
// ServiceWrite checks for permission to create or update a given
// service
func (m *mockAuthorizer) ServiceWrite(segment string, ctx *AuthorizerContext) EnforcementDecision {
@ -183,8 +195,6 @@ func (m *mockAuthorizer) Snapshot(ctx *AuthorizerContext) EnforcementDecision {
}
func TestACL_Enforce(t *testing.T) {
t.Parallel()
type testCase struct {
method string
resource Resource

View File

@ -152,6 +152,12 @@ func (c *ChainedAuthorizer) NodeRead(node string, entCtx *AuthorizerContext) Enf
})
}
func (c *ChainedAuthorizer) NodeReadAll(entCtx *AuthorizerContext) EnforcementDecision {
return c.executeChain(func(authz Authorizer) EnforcementDecision {
return authz.NodeReadAll(entCtx)
})
}
// NodeWrite checks for permission to create or update (register) a
// given node.
func (c *ChainedAuthorizer) NodeWrite(node string, entCtx *AuthorizerContext) EnforcementDecision {
@ -199,6 +205,12 @@ func (c *ChainedAuthorizer) ServiceRead(name string, entCtx *AuthorizerContext)
})
}
func (c *ChainedAuthorizer) ServiceReadAll(entCtx *AuthorizerContext) EnforcementDecision {
return c.executeChain(func(authz Authorizer) EnforcementDecision {
return authz.ServiceReadAll(entCtx)
})
}
// ServiceWrite checks for permission to create or update a given
// service
func (c *ChainedAuthorizer) ServiceWrite(name string, entCtx *AuthorizerContext) EnforcementDecision {

View File

@ -6,6 +6,8 @@ import (
type testAuthorizer EnforcementDecision
var _ Authorizer = testAuthorizer(Allow)
func (authz testAuthorizer) ACLRead(*AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
@ -54,6 +56,9 @@ func (authz testAuthorizer) KeyringWrite(*AuthorizerContext) EnforcementDecision
func (authz testAuthorizer) NodeRead(string, *AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
func (authz testAuthorizer) NodeReadAll(*AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
func (authz testAuthorizer) NodeWrite(string, *AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
@ -72,6 +77,9 @@ func (authz testAuthorizer) PreparedQueryWrite(string, *AuthorizerContext) Enfor
func (authz testAuthorizer) ServiceRead(string, *AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
func (authz testAuthorizer) ServiceReadAll(*AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
func (authz testAuthorizer) ServiceWrite(string, *AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
@ -86,11 +94,7 @@ func (authz testAuthorizer) Snapshot(*AuthorizerContext) EnforcementDecision {
}
func TestChainedAuthorizer(t *testing.T) {
t.Parallel()
t.Run("No Authorizers", func(t *testing.T) {
t.Parallel()
authz := NewChainedAuthorizer([]Authorizer{})
checkDenyACLRead(t, authz, "foo", nil)
checkDenyACLWrite(t, authz, "foo", nil)
@ -121,8 +125,6 @@ func TestChainedAuthorizer(t *testing.T) {
})
t.Run("Authorizer Defaults", func(t *testing.T) {
t.Parallel()
authz := NewChainedAuthorizer([]Authorizer{testAuthorizer(Default)})
checkDenyACLRead(t, authz, "foo", nil)
checkDenyACLWrite(t, authz, "foo", nil)
@ -153,8 +155,6 @@ func TestChainedAuthorizer(t *testing.T) {
})
t.Run("Authorizer No Defaults", func(t *testing.T) {
t.Parallel()
authz := NewChainedAuthorizer([]Authorizer{testAuthorizer(Allow)})
checkAllowACLRead(t, authz, "foo", nil)
checkAllowACLWrite(t, authz, "foo", nil)
@ -185,8 +185,6 @@ func TestChainedAuthorizer(t *testing.T) {
})
t.Run("First Found", func(t *testing.T) {
t.Parallel()
authz := NewChainedAuthorizer([]Authorizer{testAuthorizer(Deny), testAuthorizer(Allow)})
checkDenyACLRead(t, authz, "foo", nil)
checkDenyACLWrite(t, authz, "foo", nil)

View File

@ -350,7 +350,7 @@ type enforceCallback func(raw interface{}, prefixOnly bool) EnforcementDecision
func anyAllowed(tree *radix.Tree, enforceFn enforceCallback) EnforcementDecision {
decision := Default
// special case for handling a catch-all prefix rule. If the rule woul Deny access then our default decision
// special case for handling a catch-all prefix rule. If the rule would Deny access then our default decision
// should be to Deny, but this decision should still be overridable with other more specific rules.
if raw, found := tree.Get(""); found {
decision = enforceFn(raw, true)
@ -686,6 +686,10 @@ func (p *policyAuthorizer) NodeRead(name string, _ *AuthorizerContext) Enforceme
return Default
}
func (p *policyAuthorizer) NodeReadAll(_ *AuthorizerContext) EnforcementDecision {
return p.allAllowed(p.nodeRules, AccessRead)
}
// NodeWrite checks if writing (registering) a node is allowed
func (p *policyAuthorizer) NodeWrite(name string, _ *AuthorizerContext) EnforcementDecision {
if rule, ok := getPolicy(name, p.nodeRules); ok {
@ -720,6 +724,10 @@ func (p *policyAuthorizer) ServiceRead(name string, _ *AuthorizerContext) Enforc
return Default
}
func (p *policyAuthorizer) ServiceReadAll(_ *AuthorizerContext) EnforcementDecision {
return p.allAllowed(p.serviceRules, AccessRead)
}
// ServiceWrite checks if writing (registering) a service is allowed
func (p *policyAuthorizer) ServiceWrite(name string, _ *AuthorizerContext) EnforcementDecision {
if rule, ok := getPolicy(name, p.serviceRules); ok {

View File

@ -13,8 +13,6 @@ import (
// ensure compatibility from version to version those tests have been only minimally altered. The tests in this
// file are specific to the newer functionality.
func TestPolicyAuthorizer(t *testing.T) {
t.Parallel()
type aclCheck struct {
name string
prefix string
@ -446,8 +444,6 @@ func TestPolicyAuthorizer(t *testing.T) {
name := name
tcase := tcase
t.Run(name, func(t *testing.T) {
t.Parallel()
authz, err := NewPolicyAuthorizer([]*Policy{tcase.policy}, nil)
require.NoError(t, err)
@ -458,7 +454,6 @@ func TestPolicyAuthorizer(t *testing.T) {
}
t.Run(checkName, func(t *testing.T) {
check := check
t.Parallel()
check.check(t, authz, check.prefix, nil)
})
@ -468,8 +463,6 @@ func TestPolicyAuthorizer(t *testing.T) {
}
func TestAnyAllowed(t *testing.T) {
t.Parallel()
type radixInsertion struct {
segment string
value *policyAuthorizerRadixLeaf
@ -719,8 +712,6 @@ func TestAnyAllowed(t *testing.T) {
}
func TestAllAllowed(t *testing.T) {
t.Parallel()
type radixInsertion struct {
segment string
value *policyAuthorizerRadixLeaf

View File

@ -142,6 +142,13 @@ func (s *staticAuthorizer) NodeRead(string, *AuthorizerContext) EnforcementDecis
return Deny
}
func (s *staticAuthorizer) NodeReadAll(*AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) NodeWrite(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
@ -184,6 +191,13 @@ func (s *staticAuthorizer) ServiceRead(string, *AuthorizerContext) EnforcementDe
return Deny
}
func (s *staticAuthorizer) ServiceReadAll(*AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) ServiceWrite(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow

View File

@ -5,11 +5,7 @@ import (
)
func TestStaticAuthorizer(t *testing.T) {
t.Parallel()
t.Run("AllowAll", func(t *testing.T) {
t.Parallel()
authz := AllowAll()
checkDenyACLRead(t, authz, "foo", nil)
checkDenyACLWrite(t, authz, "foo", nil)
@ -40,7 +36,6 @@ func TestStaticAuthorizer(t *testing.T) {
})
t.Run("DenyAll", func(t *testing.T) {
t.Parallel()
authz := DenyAll()
checkDenyACLRead(t, authz, "foo", nil)
checkDenyACLWrite(t, authz, "foo", nil)
@ -71,7 +66,6 @@ func TestStaticAuthorizer(t *testing.T) {
})
t.Run("ManageAll", func(t *testing.T) {
t.Parallel()
authz := ManageAll()
checkAllowACLRead(t, authz, "foo", nil)
checkAllowACLWrite(t, authz, "foo", nil)

View File

@ -360,10 +360,16 @@ func New(bd BaseDeps) (*Agent, error) {
}
cacheName := cachetype.HealthServicesName
if bd.RuntimeConfig.CacheUseStreamingBackend {
if bd.RuntimeConfig.UseStreamingBackend {
cacheName = cachetype.StreamingHealthServicesName
}
a.rpcClientHealth = &health.Client{Cache: bd.Cache, NetRPC: &a, CacheName: cacheName}
a.rpcClientHealth = &health.Client{
Cache: bd.Cache,
NetRPC: &a,
CacheName: cacheName,
// Temporarily until streaming supports all connect events
CacheNameConnect: cachetype.HealthServicesName,
}
a.serviceManager = NewServiceManager(&a)
@ -792,19 +798,7 @@ func (a *Agent) listenHTTP() ([]apiServer, error) {
httpServer.ConnState = connLimitFn
}
servers = append(servers, apiServer{
Protocol: proto,
Addr: l.Addr(),
Shutdown: httpServer.Shutdown,
Run: func() error {
err := httpServer.Serve(l)
if err == nil || err == http.ErrServerClosed {
return nil
}
return fmt.Errorf("%s server %s failed: %w", proto, l.Addr(), err)
},
MaxHeaderBytes: a.config.HTTPMaxHeaderBytes,
})
servers = append(servers, newAPIServerHTTP(proto, l, httpServer))
}
return nil
}
@ -1106,8 +1100,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
if runtimeCfg.SessionTTLMin != 0 {
cfg.SessionTTLMin = runtimeCfg.SessionTTLMin
}
if runtimeCfg.NonVotingServer {
cfg.NonVoter = runtimeCfg.NonVotingServer
if runtimeCfg.ReadReplica {
cfg.ReadReplica = runtimeCfg.ReadReplica
}
// These are fully specified in the agent defaults, so we can simply
@ -1723,6 +1717,11 @@ type persistedService struct {
Token string
Service *structs.NodeService
Source string
// whether this service was registered as a sidecar, see structs.NodeService
// we store this field here because it is excluded from json serialization
// to exclude it from API output, but we need it to properly deregister
// persisted sidecars.
LocallyRegisteredAsSidecar bool `json:",omitempty"`
}
// persistService saves a service definition to a JSON file in the data dir
@ -1731,9 +1730,10 @@ func (a *Agent) persistService(service *structs.NodeService, source configSource
svcPath := filepath.Join(a.config.DataDir, servicesDir, svcID.StringHash())
wrapped := persistedService{
Token: a.State.ServiceToken(service.CompoundServiceID()),
Service: service,
Source: source.String(),
Token: a.State.ServiceToken(service.CompoundServiceID()),
Service: service,
Source: source.String(),
LocallyRegisteredAsSidecar: service.LocallyRegisteredAsSidecar,
}
encoded, err := json.Marshal(wrapped)
if err != nil {
@ -3182,6 +3182,10 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
continue
}
}
// Restore LocallyRegisteredAsSidecar, see persistedService.LocallyRegisteredAsSidecar
p.Service.LocallyRegisteredAsSidecar = p.LocallyRegisteredAsSidecar
serviceID := p.Service.CompoundServiceID()
source, ok := ConfigSourceFromName(p.Source)

View File

@ -136,7 +136,7 @@ func (s *HTTPHandlers) AgentMetrics(resp http.ResponseWriter, req *http.Request)
return nil, acl.ErrPermissionDenied
}
if enablePrometheusOutput(req) {
if s.agent.config.Telemetry.PrometheusRetentionTime < 1 {
if s.agent.config.Telemetry.PrometheusOpts.Expiration < 1 {
resp.WriteHeader(http.StatusUnsupportedMediaType)
fmt.Fprint(resp, "Prometheus is not enabled since its retention time is not positive")
return nil, nil
@ -170,7 +170,7 @@ func (s *HTTPHandlers) AgentReload(resp http.ResponseWriter, req *http.Request)
return nil, s.agent.ReloadConfig()
}
func buildAgentService(s *structs.NodeService) api.AgentService {
func buildAgentService(s *structs.NodeService, dc string) api.AgentService {
weights := api.AgentWeights{Passing: 1, Warning: 1}
if s.Weights != nil {
if s.Weights.Passing > 0 {
@ -200,6 +200,7 @@ func buildAgentService(s *structs.NodeService) api.AgentService {
CreateIndex: s.CreateIndex,
ModifyIndex: s.ModifyIndex,
Weights: weights,
Datacenter: dc,
}
if as.Tags == nil {
@ -253,9 +254,11 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request
// anyway.
agentSvcs := make(map[string]*api.AgentService)
dc := s.agent.config.Datacenter
// Use empty list instead of nil
for id, s := range services {
agentService := buildAgentService(s)
agentService := buildAgentService(s, dc)
agentSvcs[id.ID] = &agentService
}
@ -303,6 +306,8 @@ func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request)
sid := structs.NewServiceID(id, &entMeta)
dc := s.agent.config.Datacenter
resultHash, service, err := s.agent.LocalBlockingQuery(false, hash, queryOpts.MaxQueryTime,
func(ws memdb.WatchSet) (string, interface{}, error) {
@ -330,7 +335,7 @@ func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request)
}
// Calculate the content hash over the response, minus the hash field
aSvc := buildAgentService(svc)
aSvc := buildAgentService(svc, dc)
reply := &aSvc
rawHash, err := hashstructure.Hash(reply, nil)
@ -768,6 +773,8 @@ func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *htt
sid := structs.NewServiceID(serviceID, &entMeta)
dc := s.agent.config.Datacenter
if service := s.agent.State.Service(sid); service != nil {
if authz != nil && authz.ServiceRead(service.Service, &authzContext) != acl.Allow {
return nil, acl.ErrPermissionDenied
@ -776,7 +783,7 @@ func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *htt
if returnTextPlain(req) {
return status, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "text/plain"}
}
serviceInfo := buildAgentService(service)
serviceInfo := buildAgentService(service, dc)
result := &api.AgentServiceChecksInfo{
AggregatedStatus: status,
Checks: healthChecks,
@ -822,6 +829,8 @@ func (s *HTTPHandlers) AgentHealthServiceByName(resp http.ResponseWriter, req *h
return nil, acl.ErrPermissionDenied
}
dc := s.agent.config.Datacenter
code := http.StatusNotFound
status := fmt.Sprintf("ServiceName %s Not Found", serviceName)
services := s.agent.State.Services(&entMeta)
@ -831,7 +840,7 @@ func (s *HTTPHandlers) AgentHealthServiceByName(resp http.ResponseWriter, req *h
sid := structs.NewServiceID(service.ID, &entMeta)
scode, sstatus, healthChecks := agentHealthService(sid, s)
serviceInfo := buildAgentService(service)
serviceInfo := buildAgentService(service, dc)
res := api.AgentServiceChecksInfo{
AggregatedStatus: sstatus,
Checks: healthChecks,

View File

@ -365,8 +365,9 @@ func TestAgent_Service(t *testing.T) {
Passing: 1,
Warning: 1,
},
Meta: map[string]string{},
Tags: []string{},
Meta: map[string]string{},
Tags: []string{},
Datacenter: "dc1",
}
fillAgentServiceEnterpriseMeta(expectedResponse, structs.DefaultEnterpriseMeta())
@ -391,8 +392,9 @@ func TestAgent_Service(t *testing.T) {
Port: 1818,
},
},
Meta: map[string]string{},
Tags: []string{},
Meta: map[string]string{},
Tags: []string{},
Datacenter: "dc1",
}
fillAgentServiceEnterpriseMeta(expectWebResponse, structs.DefaultEnterpriseMeta())

View File

@ -13,6 +13,7 @@ import (
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"strconv"
@ -23,12 +24,23 @@ import (
"github.com/golang/protobuf/jsonpb"
"github.com/google/tcpproxy"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/serf/coordinate"
"github.com/hashicorp/serf/serf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"golang.org/x/time/rate"
"gopkg.in/square/go-jose.v2/jwt"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/checks"
"github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest"
"github.com/hashicorp/consul/ipaddr"
@ -38,13 +50,8 @@ import (
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/serf/coordinate"
"github.com/hashicorp/serf/serf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/time/rate"
"gopkg.in/square/go-jose.v2/jwt"
)
func getService(a *TestAgent, id string) *structs.NodeService {
@ -2499,6 +2506,75 @@ func TestAgent_PurgeCheckOnDuplicate(t *testing.T) {
require.Equal(t, expected, result)
}
func TestAgent_DeregisterPersistedSidecarAfterRestart(t *testing.T) {
t.Parallel()
nodeID := NodeID()
a := StartTestAgent(t, TestAgent{
HCL: `
node_id = "` + nodeID + `"
node_name = "Node ` + nodeID + `"
server = false
bootstrap = false
enable_central_service_config = false
`})
defer a.Shutdown()
srv := &structs.NodeService{
ID: "svc",
Service: "svc",
Weights: &structs.Weights{
Passing: 2,
Warning: 1,
},
Tags: []string{"tag2"},
Port: 8200,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
Connect: structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{},
},
}
connectSrv, _, _, err := a.sidecarServiceFromNodeService(srv, "")
require.NoError(t, err)
// First persist the check
err = a.AddService(srv, nil, true, "", ConfigSourceLocal)
require.NoError(t, err)
err = a.AddService(connectSrv, nil, true, "", ConfigSourceLocal)
require.NoError(t, err)
// check both services were registered
require.NotNil(t, a.State.Service(srv.CompoundServiceID()))
require.NotNil(t, a.State.Service(connectSrv.CompoundServiceID()))
a.Shutdown()
// Start again with the check registered in config
a2 := StartTestAgent(t, TestAgent{
Name: "Agent2",
DataDir: a.DataDir,
HCL: `
node_id = "` + nodeID + `"
node_name = "Node ` + nodeID + `"
server = false
bootstrap = false
enable_central_service_config = false
`})
defer a2.Shutdown()
// check both services were restored
require.NotNil(t, a2.State.Service(srv.CompoundServiceID()))
require.NotNil(t, a2.State.Service(connectSrv.CompoundServiceID()))
err = a2.RemoveService(srv.CompoundServiceID())
require.NoError(t, err)
// check both services were deregistered
require.Nil(t, a2.State.Service(srv.CompoundServiceID()))
require.Nil(t, a2.State.Service(connectSrv.CompoundServiceID()))
}
func TestAgent_loadChecks_token(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, `
@ -4693,3 +4769,68 @@ func TestSharedRPCRouter(t *testing.T) {
require.NotNil(t, mgr)
require.NotNil(t, server)
}
func TestAgent_ListenHTTP_MultipleAddresses(t *testing.T) {
ports, err := freeport.Take(2)
require.NoError(t, err)
t.Cleanup(func() { freeport.Return(ports) })
caConfig := tlsutil.Config{}
tlsConf, err := tlsutil.NewConfigurator(caConfig, hclog.New(nil))
require.NoError(t, err)
bd := BaseDeps{
Deps: consul.Deps{
Logger: hclog.NewInterceptLogger(nil),
Tokens: new(token.Store),
TLSConfigurator: tlsConf,
},
RuntimeConfig: &config.RuntimeConfig{
HTTPAddrs: []net.Addr{
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[0]},
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[1]},
},
},
Cache: cache.New(cache.Options{}),
}
agent, err := New(bd)
require.NoError(t, err)
srvs, err := agent.listenHTTP()
require.NoError(t, err)
defer func() {
ctx := context.Background()
for _, srv := range srvs {
srv.Shutdown(ctx)
}
}()
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
t.Cleanup(cancel)
g := new(errgroup.Group)
for _, s := range srvs {
g.Go(s.Run)
}
require.Len(t, srvs, 2)
require.Len(t, uniqueAddrs(srvs), 2)
client := &http.Client{}
for _, s := range srvs {
u := url.URL{Scheme: s.Protocol, Host: s.Addr.String()}
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
require.NoError(t, err)
resp, err := client.Do(req.WithContext(ctx))
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
}
}
func uniqueAddrs(srvs []apiServer) map[string]struct{} {
result := make(map[string]struct{}, len(srvs))
for _, s := range srvs {
result[s.Addr.String()] = struct{}{}
}
return result
}

View File

@ -2,7 +2,9 @@ package agent
import (
"context"
"fmt"
"net"
"net/http"
"sync"
"time"
@ -37,8 +39,6 @@ type apiServer struct {
Run func() error
// Shutdown function used to stop the server
Shutdown func(context.Context) error
MaxHeaderBytes int
}
// NewAPIServers returns an empty apiServers that is ready to Start servers.
@ -94,3 +94,18 @@ func (s *apiServers) Shutdown(ctx context.Context) {
func (s *apiServers) WaitForShutdown() error {
return s.group.Wait()
}
func newAPIServerHTTP(proto string, l net.Listener, httpServer *http.Server) apiServer {
return apiServer{
Protocol: proto,
Addr: l.Addr(),
Shutdown: httpServer.Shutdown,
Run: func() error {
err := httpServer.Serve(l)
if err == nil || err == http.ErrServerClosed {
return nil
}
return fmt.Errorf("%s server %s failed: %w", proto, l.Addr(), err)
},
}
}

View File

@ -634,6 +634,7 @@ type testAutoConfig struct {
ac *AutoConfig
tokenUpdates chan struct{}
originalToken string
stop func()
initialRoots *structs.IndexedCARoots
initialCert *structs.IssuedCert
@ -835,6 +836,7 @@ func startedAutoConfig(t *testing.T, autoEncrypt bool) testAutoConfig {
initialRoots: indexedRoots,
initialCert: cert,
extraCerts: extraCerts,
stop: cancel,
}
}
@ -1098,16 +1100,15 @@ func TestFallback(t *testing.T) {
// now wait for the fallback routine to be invoked
require.True(t, waitForChans(100*time.Millisecond, fallbackCtx.Done()), "fallback routines did not get invoked within the alotted time")
// persisting these to disk happens after the RPC we waited on above will have fired
// There is no deterministic way to know once its been written so we wrap this in a retry.
testretry.Run(t, func(r *testretry.R) {
resp, err := testAC.ac.readPersistedAutoConfig()
require.NoError(r, err)
testAC.stop()
<-testAC.ac.done
// ensure the roots got persisted to disk
require.Equal(r, thirdCert.CertPEM, resp.Certificate.GetCertPEM())
require.Equal(r, secondRoots.ActiveRootID, resp.CARoots.GetActiveRootID())
})
resp, err := testAC.ac.readPersistedAutoConfig()
require.NoError(t, err)
// ensure the roots got persisted to disk
require.Equal(t, thirdCert.CertPEM, resp.Certificate.GetCertPEM())
require.Equal(t, secondRoots.ActiveRootID, resp.CARoots.GetActiveRootID())
}
func TestIntroToken(t *testing.T) {

View File

@ -9,17 +9,15 @@ import (
"github.com/hashicorp/consul/types"
)
func newEndOfSnapshotEvent(topic pbsubscribe.Topic, index uint64) *pbsubscribe.Event {
func newEndOfSnapshotEvent(index uint64) *pbsubscribe.Event {
return &pbsubscribe.Event{
Topic: topic,
Index: index,
Payload: &pbsubscribe.Event_EndOfSnapshot{EndOfSnapshot: true},
}
}
func newNewSnapshotToFollowEvent(topic pbsubscribe.Topic) *pbsubscribe.Event {
func newNewSnapshotToFollowEvent() *pbsubscribe.Event {
return &pbsubscribe.Event{
Topic: topic,
Payload: &pbsubscribe.Event_NewSnapshotToFollow{NewSnapshotToFollow: true},
}
}
@ -37,8 +35,6 @@ func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsub
addr := fmt.Sprintf("10.10.%d.%d", nodeNum/256, nodeNum%256)
return &pbsubscribe.Event{
Topic: pbsubscribe.Topic_ServiceHealth,
Key: svc,
Index: index,
Payload: &pbsubscribe.Event_ServiceHealth{
ServiceHealth: &pbsubscribe.ServiceHealthUpdate{
@ -117,8 +113,6 @@ func newEventServiceHealthDeregister(index uint64, nodeNum int, svc string) *pbs
node := fmt.Sprintf("node%d", nodeNum)
return &pbsubscribe.Event{
Topic: pbsubscribe.Topic_ServiceHealth,
Key: svc,
Index: index,
Payload: &pbsubscribe.Event_ServiceHealth{
ServiceHealth: &pbsubscribe.ServiceHealthUpdate{
@ -164,7 +158,6 @@ func newEventBatchWithEvents(first *pbsubscribe.Event, evs ...*pbsubscribe.Event
events[i+1] = evs[i]
}
return &pbsubscribe.Event{
Topic: first.Topic,
Index: first.Index,
Payload: &pbsubscribe.Event_EventBatch{
EventBatch: &pbsubscribe.EventBatch{Events: events},

View File

@ -3,6 +3,7 @@ package cachetype
import (
"context"
"fmt"
"reflect"
"time"
"github.com/hashicorp/go-bexpr"
@ -72,7 +73,7 @@ func (c *StreamingHealthServices) Fetch(opts cache.FetchOptions, req cache.Reque
Token: srvReq.Token,
Datacenter: srvReq.Datacenter,
Index: index,
// TODO(streaming): set Namespace from srvReq.EnterpriseMeta.Namespace
Namespace: srvReq.EnterpriseMeta.GetNamespace(),
}
if srvReq.Connect {
req.Topic = pbsubscribe.Topic_ServiceHealthConnect
@ -138,15 +139,14 @@ func (s *streamingHealthState) Fetch(opts cache.FetchOptions) (cache.FetchResult
}
func newHealthView(filterExpr string) (*healthView, error) {
s := &healthView{state: make(map[string]structs.CheckServiceNode)}
// We apply filtering to the raw CheckServiceNodes before we are done mutating
// state in Update to save from storing stuff in memory we'll only filter
// later. Because the state is just a map of those types, we can simply run
// that map through filter and it will remove any entries that don't match.
var err error
s.filter, err = bexpr.CreateFilter(filterExpr, nil, s.state)
return s, err
fe, err := newFilterEvaluator(filterExpr)
if err != nil {
return nil, err
}
return &healthView{
state: make(map[string]structs.CheckServiceNode),
filter: fe,
}, nil
}
// healthView implements submatview.View for storing the view state
@ -156,7 +156,7 @@ func newHealthView(filterExpr string) (*healthView, error) {
// involves re-sorting each time etc. though.
type healthView struct {
state map[string]structs.CheckServiceNode
filter *bexpr.Filter
filter filterEvaluator
}
// Update implements View
@ -171,24 +171,41 @@ func (s *healthView) Update(events []*pbsubscribe.Event) error {
id := serviceHealth.CheckServiceNode.UniqueID()
switch serviceHealth.Op {
case pbsubscribe.CatalogOp_Register:
csn := pbservice.CheckServiceNodeToStructs(serviceHealth.CheckServiceNode)
s.state[id] = *csn
csn := *pbservice.CheckServiceNodeToStructs(serviceHealth.CheckServiceNode)
passed, err := s.filter.Evaluate(csn)
switch {
case err != nil:
return err
case passed:
s.state[id] = csn
}
case pbsubscribe.CatalogOp_Deregister:
delete(s.state, id)
}
}
// TODO(streaming): should this filter be applied to only the new CheckServiceNode
// instead of the full map, which should already be filtered.
if s.filter != nil {
filtered, err := s.filter.Execute(s.state)
if err != nil {
return err
}
s.state = filtered.(map[string]structs.CheckServiceNode)
}
return nil
}
type filterEvaluator interface {
Evaluate(datum interface{}) (bool, error)
}
func newFilterEvaluator(expr string) (filterEvaluator, error) {
if expr == "" {
return noopFilterEvaluator{}, nil
}
return bexpr.CreateEvaluatorForType(expr, nil, reflect.TypeOf(structs.CheckServiceNode{}))
}
// noopFilterEvaluator may be used in place of a bexpr.Evaluator. The Evaluate
// method always return true, so no items will be filtered out.
type noopFilterEvaluator struct{}
func (noopFilterEvaluator) Evaluate(_ interface{}) (bool, error) {
return true, nil
}
// Result returns the structs.IndexedCheckServiceNodes stored by this view.
func (s *healthView) Result(index uint64) (interface{}, error) {
result := structs.IndexedCheckServiceNodes{

View File

@ -14,11 +14,13 @@ import (
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbcommon"
"github.com/hashicorp/consul/proto/pbsubscribe"
)
func TestStreamingHealthServices_EmptySnapshot(t *testing.T) {
client := NewTestStreamingClient()
namespace := pbcommon.DefaultEnterpriseMeta.Namespace
client := NewTestStreamingClient(namespace)
typ := StreamingHealthServices{deps: MaterializerDeps{
Client: client,
Logger: hclog.Default(),
@ -26,15 +28,16 @@ func TestStreamingHealthServices_EmptySnapshot(t *testing.T) {
// Initially there are no services registered. Server should send an
// EndOfSnapshot message immediately with index of 1.
client.QueueEvents(newEndOfSnapshotEvent(pbsubscribe.Topic_ServiceHealth, 1))
client.QueueEvents(newEndOfSnapshotEvent(1))
opts := cache.FetchOptions{
MinIndex: 0,
Timeout: time.Second,
}
req := &structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "web",
Datacenter: "dc1",
ServiceName: "web",
EnterpriseMeta: structs.EnterpriseMetaInitializer(namespace),
}
empty := &structs.IndexedCheckServiceNodes{
Nodes: structs.CheckServiceNodes{},
@ -215,8 +218,17 @@ func requireResultsSame(t *testing.T, want, got *structs.IndexedCheckServiceNode
require.ElementsMatch(t, wantIDs, gotIDs)
}
// getNamespace returns a namespace if namespace support exists, otherwise
// returns the empty string. It allows the same tests to work in both oss and ent
// without duplicating the tests.
func getNamespace(ns string) string {
meta := structs.EnterpriseMetaInitializer(ns)
return meta.GetNamespace()
}
func TestStreamingHealthServices_FullSnapshot(t *testing.T) {
client := NewTestStreamingClient()
namespace := getNamespace("ns2")
client := NewTestStreamingClient(namespace)
typ := StreamingHealthServices{deps: MaterializerDeps{
Client: client,
Logger: hclog.Default(),
@ -230,7 +242,7 @@ func TestStreamingHealthServices_FullSnapshot(t *testing.T) {
registerServiceWeb(5, 1),
registerServiceWeb(5, 2),
registerServiceWeb(5, 3),
newEndOfSnapshotEvent(pbsubscribe.Topic_ServiceHealth, 5))
newEndOfSnapshotEvent(5))
// This contains the view state so important we share it between calls.
opts := cache.FetchOptions{
@ -238,8 +250,9 @@ func TestStreamingHealthServices_FullSnapshot(t *testing.T) {
Timeout: 1 * time.Second,
}
req := &structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "web",
Datacenter: "dc1",
ServiceName: "web",
EnterpriseMeta: structs.EnterpriseMetaInitializer(namespace),
}
gatherNodes := func(res interface{}) []string {
@ -301,7 +314,7 @@ func TestStreamingHealthServices_FullSnapshot(t *testing.T) {
registerServiceWeb(50, 3), // overlap existing node
registerServiceWeb(50, 4),
registerServiceWeb(50, 5),
newEndOfSnapshotEvent(pbsubscribe.Topic_ServiceHealth, 50))
newEndOfSnapshotEvent(50))
// Make another blocking query with THE SAME index. It should immediately
// return the new snapshot.
@ -324,11 +337,11 @@ func TestStreamingHealthServices_FullSnapshot(t *testing.T) {
client.QueueErr(tempError("temporary connection error"))
client.QueueEvents(
newNewSnapshotToFollowEvent(pbsubscribe.Topic_ServiceHealth),
newNewSnapshotToFollowEvent(),
registerServiceWeb(50, 3), // overlap existing node
registerServiceWeb(50, 4),
registerServiceWeb(50, 5),
newEndOfSnapshotEvent(pbsubscribe.Topic_ServiceHealth, 50))
newEndOfSnapshotEvent(50))
start := time.Now()
opts.MinIndex = 49
@ -345,7 +358,8 @@ func TestStreamingHealthServices_FullSnapshot(t *testing.T) {
}
func TestStreamingHealthServices_EventBatches(t *testing.T) {
client := NewTestStreamingClient()
namespace := getNamespace("ns3")
client := NewTestStreamingClient(namespace)
typ := StreamingHealthServices{deps: MaterializerDeps{
Client: client,
Logger: hclog.Default(),
@ -358,7 +372,7 @@ func TestStreamingHealthServices_EventBatches(t *testing.T) {
newEventServiceHealthRegister(5, 3, "web"))
client.QueueEvents(
batchEv,
newEndOfSnapshotEvent(pbsubscribe.Topic_ServiceHealth, 5))
newEndOfSnapshotEvent(5))
// This contains the view state so important we share it between calls.
opts := cache.FetchOptions{
@ -366,8 +380,9 @@ func TestStreamingHealthServices_EventBatches(t *testing.T) {
Timeout: 1 * time.Second,
}
req := &structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "web",
Datacenter: "dc1",
ServiceName: "web",
EnterpriseMeta: structs.EnterpriseMetaInitializer(namespace),
}
gatherNodes := func(res interface{}) []string {
@ -415,7 +430,8 @@ func TestStreamingHealthServices_EventBatches(t *testing.T) {
}
func TestStreamingHealthServices_Filtering(t *testing.T) {
client := NewTestStreamingClient()
namespace := getNamespace("ns3")
client := NewTestStreamingClient(namespace)
typ := StreamingHealthServices{deps: MaterializerDeps{
Client: client,
Logger: hclog.Default(),
@ -428,7 +444,7 @@ func TestStreamingHealthServices_Filtering(t *testing.T) {
newEventServiceHealthRegister(5, 3, "web"))
client.QueueEvents(
batchEv,
newEndOfSnapshotEvent(pbsubscribe.Topic_ServiceHealth, 5))
newEndOfSnapshotEvent(5))
// This contains the view state so important we share it between calls.
opts := cache.FetchOptions{
@ -436,8 +452,9 @@ func TestStreamingHealthServices_Filtering(t *testing.T) {
Timeout: 1 * time.Second,
}
req := &structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "web",
Datacenter: "dc1",
ServiceName: "web",
EnterpriseMeta: structs.EnterpriseMetaInitializer(namespace),
QueryOptions: structs.QueryOptions{
Filter: `Node.Node == "node2"`,
},

View File

@ -2,6 +2,7 @@ package cachetype
import (
"context"
"fmt"
"google.golang.org/grpc"
@ -12,8 +13,9 @@ import (
// for queueing up custom events to a subscriber.
type TestStreamingClient struct {
pbsubscribe.StateChangeSubscription_SubscribeClient
events chan eventOrErr
ctx context.Context
events chan eventOrErr
ctx context.Context
expectedNamespace string
}
type eventOrErr struct {
@ -21,17 +23,22 @@ type eventOrErr struct {
Event *pbsubscribe.Event
}
func NewTestStreamingClient() *TestStreamingClient {
func NewTestStreamingClient(ns string) *TestStreamingClient {
return &TestStreamingClient{
events: make(chan eventOrErr, 32),
events: make(chan eventOrErr, 32),
expectedNamespace: ns,
}
}
func (t *TestStreamingClient) Subscribe(
ctx context.Context,
_ *pbsubscribe.SubscribeRequest,
req *pbsubscribe.SubscribeRequest,
_ ...grpc.CallOption,
) (pbsubscribe.StateChangeSubscription_SubscribeClient, error) {
if req.Namespace != t.expectedNamespace {
return nil, fmt.Errorf("wrong SubscribeRequest.Namespace %v, expected %v",
req.Namespace, t.expectedNamespace)
}
t.ctx = ctx
return t, nil
}

87
agent/cache/cache.go vendored
View File

@ -15,7 +15,6 @@
package cache
import (
"container/heap"
"context"
"fmt"
"io"
@ -25,13 +24,43 @@ import (
"time"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"golang.org/x/time/rate"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/lib/ttlcache"
)
//go:generate mockery -all -inpkg
// TODO(kit): remove the namespace from these once the metrics themselves change
var Gauges = []prometheus.GaugeDefinition{
{
Name: []string{"consul", "cache", "entries_count"},
Help: "",
},
}
// TODO(kit): remove the namespace from these once the metrics themselves change
var Counters = []prometheus.CounterDefinition{
{
Name: []string{"consul", "cache", "bypass"},
Help: "",
},
{
Name: []string{"consul", "cache", "fetch_success"},
Help: "",
},
{
Name: []string{"consul", "cache", "fetch_error"},
Help: "",
},
{
Name: []string{"consul", "cache", "evict_expired"},
Help: "",
},
}
// Constants related to refresh backoff. We probably don't ever need to
// make these configurable knobs since they primarily exist to lower load.
const (
@ -88,7 +117,7 @@ type Cache struct {
// internal storage format so changing this should be possible safely.
entriesLock sync.RWMutex
entries map[string]cacheEntry
entriesExpiryHeap *expiryHeap
entriesExpiryHeap *ttlcache.ExpiryHeap
// stopped is used as an atomic flag to signal that the Cache has been
// discarded so background fetches and expiry processing should stop.
@ -166,16 +195,11 @@ func applyDefaultValuesOnOptions(options Options) Options {
// Further settings can be tweaked on the returned value.
func New(options Options) *Cache {
options = applyDefaultValuesOnOptions(options)
// Initialize the heap. The buffer of 1 is really important because
// its possible for the expiry loop to trigger the heap to update
// itself and it'd block forever otherwise.
h := &expiryHeap{NotifyCh: make(chan struct{}, 1)}
heap.Init(h)
ctx, cancel := context.WithCancel(context.Background())
c := &Cache{
types: make(map[string]typeEntry),
entries: make(map[string]cacheEntry),
entriesExpiryHeap: h,
entriesExpiryHeap: ttlcache.NewExpiryHeap(),
stopCh: make(chan struct{}),
options: options,
rateLimitContext: ctx,
@ -406,8 +430,7 @@ RETRY_GET:
// Touch the expiration and fix the heap.
c.entriesLock.Lock()
entry.Expiry.Update(r.TypeEntry.Opts.LastGetTTL)
c.entriesExpiryHeap.Fix(entry.Expiry)
c.entriesExpiryHeap.Update(entry.Expiry.Index(), r.TypeEntry.Opts.LastGetTTL)
c.entriesLock.Unlock()
// We purposely do not return an error here since the cache only works with
@ -635,6 +658,7 @@ func (c *Cache) fetch(key string, r getOptions, allowNew bool, attempt uint, ign
// Error handling
if err == nil {
labels := []metrics.Label{{Name: "result_not_modified", Value: strconv.FormatBool(result.NotModified)}}
// TODO(kit): move tEntry.Name to a label on the first write here and deprecate the second write
metrics.IncrCounterWithLabels([]string{"consul", "cache", "fetch_success"}, 1, labels)
metrics.IncrCounterWithLabels([]string{"consul", "cache", tEntry.Name, "fetch_success"}, 1, labels)
@ -664,6 +688,7 @@ func (c *Cache) fetch(key string, r getOptions, allowNew bool, attempt uint, ign
newEntry.RefreshLostContact = time.Time{}
}
} else {
// TODO(kit): Add tEntry.Name to label on fetch_error and deprecate second write
metrics.IncrCounter([]string{"consul", "cache", "fetch_error"}, 1)
metrics.IncrCounter([]string{"consul", "cache", tEntry.Name, "fetch_error"}, 1)
@ -688,10 +713,8 @@ func (c *Cache) fetch(key string, r getOptions, allowNew bool, attempt uint, ign
// If this is a new entry (not in the heap yet), then setup the
// initial expiry information and insert. If we're already in
// the heap we do nothing since we're reusing the same entry.
if newEntry.Expiry == nil || newEntry.Expiry.HeapIndex == -1 {
newEntry.Expiry = &cacheEntryExpiry{Key: key}
newEntry.Expiry.Update(tEntry.Opts.LastGetTTL)
heap.Push(c.entriesExpiryHeap, newEntry.Expiry)
if newEntry.Expiry == nil || newEntry.Expiry.Index() == ttlcache.NotIndexed {
newEntry.Expiry = c.entriesExpiryHeap.Add(key, tEntry.Opts.LastGetTTL)
}
c.entries[key] = newEntry
@ -748,47 +771,30 @@ func backOffWait(failures uint) time.Duration {
// runExpiryLoop is a blocking function that watches the expiration
// heap and invalidates entries that have expired.
func (c *Cache) runExpiryLoop() {
var expiryTimer *time.Timer
for {
// If we have a previous timer, stop it.
if expiryTimer != nil {
expiryTimer.Stop()
}
// Get the entry expiring soonest
var entry *cacheEntryExpiry
var expiryCh <-chan time.Time
c.entriesLock.RLock()
if len(c.entriesExpiryHeap.Entries) > 0 {
entry = c.entriesExpiryHeap.Entries[0]
expiryTimer = time.NewTimer(time.Until(entry.Expires))
expiryCh = expiryTimer.C
}
timer := c.entriesExpiryHeap.Next()
c.entriesLock.RUnlock()
select {
case <-c.stopCh:
timer.Stop()
return
case <-c.entriesExpiryHeap.NotifyCh:
// Entries changed, so the heap may have changed. Restart loop.
timer.Stop()
continue
case <-expiryCh:
case <-timer.Wait():
c.entriesLock.Lock()
// Perform cleanup operations on the entry's state, if applicable.
state := c.entries[entry.Key].State
if closer, ok := state.(io.Closer); ok {
entry := timer.Entry
if closer, ok := c.entries[entry.Key()].State.(io.Closer); ok {
closer.Close()
}
// Entry expired! Remove it.
delete(c.entries, entry.Key)
heap.Remove(c.entriesExpiryHeap, entry.HeapIndex)
// This is subtle but important: if we race and simultaneously
// evict and fetch a new value, then we set this to -1 to
// have it treated as a new value so that the TTL is extended.
entry.HeapIndex = -1
delete(c.entries, entry.Key())
c.entriesExpiryHeap.Remove(entry.Index())
// Set some metrics
metrics.IncrCounter([]string{"consul", "cache", "evict_expired"}, 1)
@ -829,7 +835,6 @@ func (c *Cache) Prepopulate(t string, res FetchResult, dc, token, k string) erro
Index: res.Index,
FetchedAt: time.Now(),
Waiter: make(chan struct{}),
Expiry: &cacheEntryExpiry{Key: key},
FetchRateLimiter: rate.NewLimiter(
c.options.EntryFetchRate,
c.options.EntryFetchMaxBurst,

View File

@ -15,6 +15,7 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/time/rate"
"github.com/hashicorp/consul/lib/ttlcache"
"github.com/hashicorp/consul/sdk/testutil"
)
@ -1000,6 +1001,9 @@ func (t *testPartitionType) RegisterOptions() RegisterOptions {
// Test that background refreshing reports correct Age in failure and happy
// states.
func TestCacheGet_refreshAge(t *testing.T) {
if testing.Short() {
t.Skip("too slow for -short run")
}
t.Parallel()
require := require.New(t)
@ -1402,3 +1406,73 @@ OUT:
}
}
}
func TestCache_ExpiryLoop_ExitsWhenStopped(t *testing.T) {
c := &Cache{
stopCh: make(chan struct{}),
entries: make(map[string]cacheEntry),
entriesExpiryHeap: ttlcache.NewExpiryHeap(),
}
chStart := make(chan struct{})
chDone := make(chan struct{})
go func() {
close(chStart)
c.runExpiryLoop()
close(chDone)
}()
<-chStart
close(c.stopCh)
select {
case <-chDone:
case <-time.After(50 * time.Millisecond):
t.Fatalf("expected loop to exit when stopped")
}
}
func TestCache_Prepopulate(t *testing.T) {
typ := &fakeType{index: 5}
c := New(Options{})
c.RegisterType("t", typ)
c.Prepopulate("t", FetchResult{Value: 17, Index: 1}, "dc1", "token", "v1")
ctx := context.Background()
req := fakeRequest{
info: RequestInfo{
Key: "v1",
Token: "token",
Datacenter: "dc1",
MinIndex: 1,
},
}
result, _, err := c.Get(ctx, "t", req)
require.NoError(t, err)
require.Equal(t, 17, result)
}
type fakeType struct {
index uint64
}
func (f fakeType) Fetch(_ FetchOptions, _ Request) (FetchResult, error) {
idx := atomic.LoadUint64(&f.index)
return FetchResult{Value: int(idx * 2), Index: idx}, nil
}
func (f fakeType) RegisterOptions() RegisterOptions {
return RegisterOptions{Refresh: true}
}
var _ Type = (*fakeType)(nil)
type fakeRequest struct {
info RequestInfo
}
func (f fakeRequest) CacheInfo() RequestInfo {
return f.info
}
var _ Request = (*fakeRequest)(nil)

122
agent/cache/entry.go vendored
View File

@ -1,10 +1,11 @@
package cache
import (
"container/heap"
"time"
"golang.org/x/time/rate"
"github.com/hashicorp/consul/lib/ttlcache"
)
// cacheEntry stores a single cache entry.
@ -31,8 +32,8 @@ type cacheEntry struct {
// Expiry contains information about the expiration of this
// entry. This is a pointer as its shared as a value in the
// expiryHeap as well.
Expiry *cacheEntryExpiry
// ExpiryHeap as well.
Expiry *ttlcache.Entry
// FetchedAt stores the time the cache entry was retrieved for determining
// it's age later.
@ -46,118 +47,3 @@ type cacheEntry struct {
// FetchRateLimiter limits the rate at which fetch is called for this entry.
FetchRateLimiter *rate.Limiter
}
// cacheEntryExpiry contains the expiration information for a cache
// entry. Any modifications to this struct should be done only while
// the Cache entriesLock is held.
type cacheEntryExpiry struct {
Key string // Key in the cache map
Expires time.Time // Time when entry expires (monotonic clock)
HeapIndex int // Index in the heap
}
// Update the expiry to d time from now.
func (e *cacheEntryExpiry) Update(d time.Duration) {
e.Expires = time.Now().Add(d)
}
// expiryHeap is a heap implementation that stores information about
// when entries expire. Implements container/heap.Interface.
//
// All operations on the heap and read/write of the heap contents require
// the proper entriesLock to be held on Cache.
type expiryHeap struct {
Entries []*cacheEntryExpiry
// NotifyCh is sent a value whenever the 0 index value of the heap
// changes. This can be used to detect when the earliest value
// changes.
//
// There is a single edge case where the heap will not automatically
// send a notification: if heap.Fix is called manually and the index
// changed is 0 and the change doesn't result in any moves (stays at index
// 0), then we won't detect the change. To work around this, please
// always call the expiryHeap.Fix method instead.
NotifyCh chan struct{}
}
// Identical to heap.Fix for this heap instance but will properly handle
// the edge case where idx == 0 and no heap modification is necessary,
// and still notify the NotifyCh.
//
// This is important for cache expiry since the expiry time may have been
// extended and if we don't send a message to the NotifyCh then we'll never
// reset the timer and the entry will be evicted early.
func (h *expiryHeap) Fix(entry *cacheEntryExpiry) {
idx := entry.HeapIndex
heap.Fix(h, idx)
// This is the edge case we handle: if the prev (idx) and current (HeapIndex)
// is zero, it means the head-of-line didn't change while the value
// changed. Notify to reset our expiry worker.
if idx == 0 && entry.HeapIndex == 0 {
h.notify()
}
}
func (h *expiryHeap) Len() int { return len(h.Entries) }
func (h *expiryHeap) Swap(i, j int) {
h.Entries[i], h.Entries[j] = h.Entries[j], h.Entries[i]
h.Entries[i].HeapIndex = i
h.Entries[j].HeapIndex = j
// If we're moving the 0 index, update the channel since we need
// to re-update the timer we're waiting on for the soonest expiring
// value.
if i == 0 || j == 0 {
h.notify()
}
}
func (h *expiryHeap) Less(i, j int) bool {
// The usage of Before here is important (despite being obvious):
// this function uses the monotonic time that should be available
// on the time.Time value so the heap is immune to wall clock changes.
return h.Entries[i].Expires.Before(h.Entries[j].Expires)
}
// heap.Interface, this isn't expected to be called directly.
func (h *expiryHeap) Push(x interface{}) {
entry := x.(*cacheEntryExpiry)
// Set initial heap index, if we're going to the end then Swap
// won't be called so we need to initialize
entry.HeapIndex = len(h.Entries)
// For the first entry, we need to trigger a channel send because
// Swap won't be called; nothing to swap! We can call it right away
// because all heap operations are within a lock.
if len(h.Entries) == 0 {
h.notify()
}
h.Entries = append(h.Entries, entry)
}
// heap.Interface, this isn't expected to be called directly.
func (h *expiryHeap) Pop() interface{} {
old := h.Entries
n := len(old)
x := old[n-1]
h.Entries = old[0 : n-1]
return x
}
func (h *expiryHeap) notify() {
select {
case h.NotifyCh <- struct{}{}:
// Good
default:
// If the send would've blocked, we just ignore it. The reason this
// is safe is because NotifyCh should always be a buffered channel.
// If this blocks, it means that there is a pending message anyways
// so the receiver will restart regardless.
}
}

View File

@ -1,91 +0,0 @@
package cache
import (
"container/heap"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestExpiryHeap_impl(t *testing.T) {
var _ heap.Interface = new(expiryHeap)
}
func TestExpiryHeap(t *testing.T) {
require := require.New(t)
now := time.Now()
ch := make(chan struct{}, 10) // buffered to prevent blocking in tests
h := &expiryHeap{NotifyCh: ch}
// Init, shouldn't trigger anything
heap.Init(h)
testNoMessage(t, ch)
// Push an initial value, expect one message
entry := &cacheEntryExpiry{Key: "foo", HeapIndex: -1, Expires: now.Add(100)}
heap.Push(h, entry)
require.Equal(0, entry.HeapIndex)
testMessage(t, ch)
testNoMessage(t, ch) // exactly one asserted above
// Push another that goes earlier than entry
entry2 := &cacheEntryExpiry{Key: "bar", HeapIndex: -1, Expires: now.Add(50)}
heap.Push(h, entry2)
require.Equal(0, entry2.HeapIndex)
require.Equal(1, entry.HeapIndex)
testMessage(t, ch)
testNoMessage(t, ch) // exactly one asserted above
// Push another that goes at the end
entry3 := &cacheEntryExpiry{Key: "bar", HeapIndex: -1, Expires: now.Add(1000)}
heap.Push(h, entry3)
require.Equal(2, entry3.HeapIndex)
testNoMessage(t, ch) // no notify cause index 0 stayed the same
// Remove the first entry (not Pop, since we don't use Pop, but that works too)
remove := h.Entries[0]
heap.Remove(h, remove.HeapIndex)
require.Equal(0, entry.HeapIndex)
require.Equal(1, entry3.HeapIndex)
testMessage(t, ch)
testMessage(t, ch) // we have two because two swaps happen
testNoMessage(t, ch)
// Let's change entry 3 to be early, and fix it
entry3.Expires = now.Add(10)
h.Fix(entry3)
require.Equal(1, entry.HeapIndex)
require.Equal(0, entry3.HeapIndex)
testMessage(t, ch)
testNoMessage(t, ch)
// Let's change entry 3 again, this is an edge case where if the 0th
// element changed, we didn't trigger the channel. Our Fix func should.
entry.Expires = now.Add(20)
h.Fix(entry3)
require.Equal(1, entry.HeapIndex) // no move
require.Equal(0, entry3.HeapIndex)
testMessage(t, ch)
testNoMessage(t, ch) // one message
}
func testNoMessage(t *testing.T, ch <-chan struct{}) {
t.Helper()
select {
case <-ch:
t.Fatal("should not have a message")
default:
}
}
func testMessage(t *testing.T, ch <-chan struct{}) {
t.Helper()
select {
case <-ch:
default:
t.Fatal("should have a message")
}
}

View File

@ -5,11 +5,127 @@ import (
"net/http"
"strings"
metrics "github.com/armon/go-metrics"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/structs"
)
var CatalogCounters = []prometheus.CounterDefinition{
{
Name: []string{"client", "api", "catalog_register"},
Help: "Increments whenever a Consul agent receives a catalog register request.",
},
{
Name: []string{"client", "rpc", "error", "catalog_register"},
Help: "Increments whenever a Consul agent receives an RPC error for a catalog register request.",
},
{
Name: []string{"client", "api", "success", "catalog_register"},
Help: "Increments whenever a Consul agent successfully responds to a catalog register request.",
},
{
Name: []string{"client", "api", "catalog_deregister"},
Help: "Increments whenever a Consul agent receives a catalog deregister request.",
},
{
Name: []string{"client", "api", "catalog_datacenters"},
Help: "Increments whenever a Consul agent receives a request to list datacenters in the catalog.",
},
{
Name: []string{"client", "rpc", "error", "catalog_deregister"},
Help: "Increments whenever a Consul agent receives an RPC error for a catalog deregister request.",
},
{
Name: []string{"client", "api", "success", "catalog_nodes"},
Help: "Increments whenever a Consul agent successfully responds to a request to list nodes.",
},
{
Name: []string{"client", "rpc", "error", "catalog_nodes"},
Help: "Increments whenever a Consul agent receives an RPC error for a request to list nodes.",
},
{
Name: []string{"client", "api", "success", "catalog_deregister"},
Help: "Increments whenever a Consul agent successfully responds to a catalog deregister request.",
},
{
Name: []string{"client", "rpc", "error", "catalog_datacenters"},
Help: "Increments whenever a Consul agent receives an RPC error for a request to list datacenters.",
},
{
Name: []string{"client", "api", "success", "catalog_datacenters"},
Help: "Increments whenever a Consul agent successfully responds to a request to list datacenters.",
},
{
Name: []string{"client", "api", "catalog_nodes"},
Help: "Increments whenever a Consul agent receives a request to list nodes from the catalog.",
},
{
Name: []string{"client", "api", "catalog_services"},
Help: "Increments whenever a Consul agent receives a request to list services from the catalog.",
},
{
Name: []string{"client", "rpc", "error", "catalog_services"},
Help: "Increments whenever a Consul agent receives an RPC error for a request to list services.",
},
{
Name: []string{"client", "api", "success", "catalog_services"},
Help: "Increments whenever a Consul agent successfully responds to a request to list services.",
},
{
Name: []string{"client", "api", "catalog_service_nodes"},
Help: "Increments whenever a Consul agent receives a request to list nodes offering a service.",
},
{
Name: []string{"client", "rpc", "error", "catalog_service_nodes"},
Help: "Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service.",
},
{
Name: []string{"client", "api", "success", "catalog_service_nodes"},
Help: "Increments whenever a Consul agent successfully responds to a request to list nodes offering a service.",
},
{
Name: []string{"client", "api", "error", "catalog_service_nodes"},
Help: "",
},
{
Name: []string{"client", "api", "catalog_node_services"},
Help: "Increments whenever a Consul agent successfully responds to a request to list nodes offering a service.",
},
{
Name: []string{"client", "api", "success", "catalog_node_services"},
Help: "Increments whenever a Consul agent successfully responds to a request to list services in a node.",
},
{
Name: []string{"client", "rpc", "error", "catalog_node_services"},
Help: "Increments whenever a Consul agent receives an RPC error for a request to list services in a node.",
},
{
Name: []string{"client", "api", "catalog_node_service_list"},
Help: "",
},
{
Name: []string{"client", "rpc", "error", "catalog_node_service_list"},
Help: "",
},
{
Name: []string{"client", "api", "success", "catalog_node_service_list"},
Help: "",
},
{
Name: []string{"client", "api", "catalog_gateway_services"},
Help: "Increments whenever a Consul agent receives a request to list services associated with a gateway.",
},
{
Name: []string{"client", "rpc", "error", "catalog_gateway_services"},
Help: "Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway.",
},
{
Name: []string{"client", "api", "success", "catalog_gateway_services"},
Help: "Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway.",
},
}
func (s *HTTPHandlers) CatalogRegister(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_register"}, 1,
[]metrics.Label{{Name: "node", Value: s.nodeName()}})

View File

@ -9,6 +9,7 @@ import (
"net"
"net/url"
"os"
"path"
"path/filepath"
"reflect"
"regexp"
@ -16,6 +17,7 @@ import (
"strings"
"time"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/go-bexpr"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
@ -942,13 +944,15 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
DisableHostname: b.boolVal(c.Telemetry.DisableHostname),
DogstatsdAddr: b.stringVal(c.Telemetry.DogstatsdAddr),
DogstatsdTags: c.Telemetry.DogstatsdTags,
PrometheusRetentionTime: b.durationVal("prometheus_retention_time", c.Telemetry.PrometheusRetentionTime),
FilterDefault: b.boolVal(c.Telemetry.FilterDefault),
AllowedPrefixes: telemetryAllowedPrefixes,
BlockedPrefixes: telemetryBlockedPrefixes,
MetricsPrefix: b.stringVal(c.Telemetry.MetricsPrefix),
StatsdAddr: b.stringVal(c.Telemetry.StatsdAddr),
StatsiteAddr: b.stringVal(c.Telemetry.StatsiteAddr),
PrometheusOpts: prometheus.PrometheusOpts{
Expiration: b.durationVal("prometheus_retention_time", c.Telemetry.PrometheusRetentionTime),
},
},
// Agent
@ -1031,7 +1035,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
NodeID: types.NodeID(b.stringVal(c.NodeID)),
NodeMeta: c.NodeMeta,
NodeName: b.nodeName(c.NodeName),
NonVotingServer: b.boolVal(c.NonVotingServer),
ReadReplica: b.boolVal(c.ReadReplica),
PidFile: b.stringVal(c.PidFile),
PrimaryDatacenter: primaryDatacenter,
PrimaryGateways: b.expandAllOptionalAddrs("primary_gateways", c.PrimaryGateways),
@ -1094,7 +1098,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
Watches: c.Watches,
}
rt.CacheUseStreamingBackend = b.boolVal(c.Cache.UseStreamingBackend)
rt.UseStreamingBackend = b.boolVal(c.UseStreamingBackend)
if rt.Cache.EntryFetchMaxBurst <= 0 {
return RuntimeConfig{}, fmt.Errorf("cache.entry_fetch_max_burst must be strictly positive, was: %v", rt.Cache.EntryFetchMaxBurst)
@ -1103,6 +1107,16 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
return RuntimeConfig{}, fmt.Errorf("cache.entry_fetch_rate must be strictly positive, was: %v", rt.Cache.EntryFetchRate)
}
if rt.UIConfig.MetricsProvider == "prometheus" {
// Handle defaulting for the built-in version of prometheus.
if len(rt.UIConfig.MetricsProxy.PathAllowlist) == 0 {
rt.UIConfig.MetricsProxy.PathAllowlist = []string{
"/api/v1/query",
"/api/v1/query_range",
}
}
}
if err := b.BuildEnterpriseRuntimeConfig(&rt, &c); err != nil {
return rt, err
}
@ -1144,6 +1158,10 @@ func (b *Builder) Validate(rt RuntimeConfig) error {
// check required params we cannot recover from first
//
if rt.RaftProtocol != 3 {
return fmt.Errorf("raft_protocol version %d is not supported by this version of Consul", rt.RaftProtocol)
}
if err := validateBasicName("datacenter", rt.Datacenter, false); err != nil {
return err
}
@ -1181,6 +1199,11 @@ func (b *Builder) Validate(rt RuntimeConfig) error {
rt.UIConfig.MetricsProxy.BaseURL)
}
}
for _, allowedPath := range rt.UIConfig.MetricsProxy.PathAllowlist {
if err := validateAbsoluteURLPath(allowedPath); err != nil {
return fmt.Errorf("ui_config.metrics_proxy.path_allowlist: %v", err)
}
}
for k, v := range rt.UIConfig.DashboardURLTemplates {
if err := validateBasicName("ui_config.dashboard_url_templates key names", k, false); err != nil {
return err
@ -1747,8 +1770,9 @@ func (b *Builder) uiMetricsProxyVal(v RawUIMetricsProxy) UIMetricsProxy {
}
return UIMetricsProxy{
BaseURL: b.stringVal(v.BaseURL),
AddHeaders: hdrs,
BaseURL: b.stringVal(v.BaseURL),
AddHeaders: hdrs,
PathAllowlist: v.PathAllowlist,
}
}
@ -2326,3 +2350,24 @@ func validateRemoteScriptsChecks(conf RuntimeConfig) error {
}
return nil
}
func validateAbsoluteURLPath(p string) error {
if !path.IsAbs(p) {
return fmt.Errorf("path %q is not an absolute path", p)
}
// A bit more extra validation that these are actually paths.
u, err := url.Parse(p)
if err != nil ||
u.Scheme != "" ||
u.Opaque != "" ||
u.User != nil ||
u.Host != "" ||
u.RawQuery != "" ||
u.Fragment != "" ||
u.Path != p {
return fmt.Errorf("path %q is not an absolute path", p)
}
return nil
}

View File

@ -13,6 +13,9 @@ var (
"non_voting_server": func(c *Config) {
// to maintain existing compatibility we don't nullify the value
},
"read_replica": func(c *Config) {
// to maintain existing compatibility we don't nullify the value
},
"segment": func(c *Config) {
// to maintain existing compatibility we don't nullify the value
},

View File

@ -24,11 +24,18 @@ func TestBuilder_validateEnterpriseConfigKeys(t *testing.T) {
cases := map[string]testCase{
"non_voting_server": {
config: Config{
NonVotingServer: &boolVal,
ReadReplica: &boolVal,
},
keys: []string{"non_voting_server"},
badKeys: []string{"non_voting_server"},
},
"read_replica": {
config: Config{
ReadReplica: &boolVal,
},
keys: []string{"read_replica"},
badKeys: []string{"read_replica"},
},
"segment": {
config: Config{
SegmentName: &stringVal,
@ -118,11 +125,11 @@ func TestBuilder_validateEnterpriseConfigKeys(t *testing.T) {
},
"multi": {
config: Config{
NonVotingServer: &boolVal,
SegmentName: &stringVal,
ReadReplica: &boolVal,
SegmentName: &stringVal,
},
keys: []string{"non_voting_server", "segment", "acl.tokens.agent_master"},
badKeys: []string{"non_voting_server", "segment"},
keys: []string{"non_voting_server", "read_replica", "segment", "acl.tokens.agent_master"},
badKeys: []string{"non_voting_server", "read_replica", "segment"},
},
}

View File

@ -103,9 +103,6 @@ type Cache struct {
EntryFetchMaxBurst *int `json:"entry_fetch_max_burst,omitempty" hcl:"entry_fetch_max_burst" mapstructure:"entry_fetch_max_burst"`
// EntryFetchRate represents the max calls/sec for a single cache entry
EntryFetchRate *float64 `json:"entry_fetch_rate,omitempty" hcl:"entry_fetch_rate" mapstructure:"entry_fetch_rate"`
// UseStreamingBackend instead of blocking queries to populate the cache.
// Only supported by some cache types.
UseStreamingBackend *bool `json:"use_streaming_backend" hcl:"use_streaming_backend" mapstructure:"use_streaming_backend"`
}
// Config defines the format of a configuration file in either JSON or
@ -264,6 +261,10 @@ type Config struct {
RPC RPC `mapstructure:"rpc"`
// UseStreamingBackend instead of blocking queries for service health and
// any other endpoints which support streaming.
UseStreamingBackend *bool `json:"use_streaming_backend" hcl:"use_streaming_backend" mapstructure:"use_streaming_backend"`
// This isn't used by Consul but we've documented a feature where users
// can deploy their snapshot agent configs alongside their Consul configs
// so we have a placeholder here so it can be parsed but this doesn't
@ -288,7 +289,7 @@ type Config struct {
// Enterprise Only
Audit *Audit `json:"audit,omitempty" hcl:"audit" mapstructure:"audit"`
// Enterprise Only
NonVotingServer *bool `json:"non_voting_server,omitempty" hcl:"non_voting_server" mapstructure:"non_voting_server"`
ReadReplica *bool `json:"read_replica,omitempty" hcl:"read_replica" mapstructure:"read_replica" alias:"non_voting_server"`
// Enterprise Only
SegmentName *string `json:"segment,omitempty" hcl:"segment" mapstructure:"segment"`
// Enterprise Only
@ -797,8 +798,9 @@ type RawUIConfig struct {
}
type RawUIMetricsProxy struct {
BaseURL *string `json:"base_url,omitempty" hcl:"base_url" mapstructure:"base_url"`
AddHeaders []RawUIMetricsProxyAddHeader `json:"add_headers,omitempty" hcl:"add_headers" mapstructure:"add_headers"`
BaseURL *string `json:"base_url,omitempty" hcl:"base_url" mapstructure:"base_url"`
AddHeaders []RawUIMetricsProxyAddHeader `json:"add_headers,omitempty" hcl:"add_headers" mapstructure:"add_headers"`
PathAllowlist []string `json:"path_allowlist,omitempty" hcl:"path_allowlist" mapstructure:"path_allowlist"`
}
type RawUIMetricsProxyAddHeader struct {

View File

@ -119,6 +119,7 @@ func DefaultSource() Source {
expose_min_port = 21500
expose_max_port = 21755
}
raft_protocol = 3
telemetry = {
metrics_prefix = "consul"
filter_default = true

View File

@ -90,7 +90,8 @@ func AddFlags(fs *flag.FlagSet, f *BuilderOpts) {
add(&f.Config.NodeName, "node", "Name of this node. Must be unique in the cluster.")
add(&f.Config.NodeID, "node-id", "A unique ID for this node across space and time. Defaults to a randomly-generated ID that persists in the data-dir.")
add(&f.Config.NodeMeta, "node-meta", "An arbitrary metadata key/value pair for this node, of the format `key:value`. Can be specified multiple times.")
add(&f.Config.NonVotingServer, "non-voting-server", "(Enterprise-only) This flag is used to make the server not participate in the Raft quorum, and have it only receive the data replication stream. This can be used to add read scalability to a cluster in cases where a high volume of reads to servers are needed.")
add(&f.Config.ReadReplica, "non-voting-server", "(Enterprise-only) DEPRECATED: -read-replica should be used instead")
add(&f.Config.ReadReplica, "read-replica", "(Enterprise-only) This flag is used to make the server not participate in the Raft quorum, and have it only receive the data replication stream. This can be used to add read scalability to a cluster in cases where a high volume of reads to servers are needed.")
add(&f.Config.PidFile, "pid-file", "Path to file to store agent PID.")
add(&f.Config.RPCProtocol, "protocol", "Sets the protocol version. Defaults to latest.")
add(&f.Config.RaftProtocol, "raft-protocol", "Sets the Raft protocol version. Defaults to latest.")

View File

@ -77,8 +77,8 @@ type RuntimeConfig struct {
// ACLDefaultPolicy is used to control the ACL interaction when
// there is no defined policy. This can be "allow" which means
// ACLs are used to black-list, or "deny" which means ACLs are
// white-lists.
// ACLs are used to deny-list, or "deny" which means ACLs are
// allow-lists.
//
// hcl: acl.default_policy = ("allow"|"deny")
ACLDefaultPolicy string
@ -853,12 +853,12 @@ type RuntimeConfig struct {
// flag: -node-meta "key:value" -node-meta "key:value" ...
NodeMeta map[string]string
// NonVotingServer is whether this server will act as a non-voting member
// ReadReplica is whether this server will act as a non-voting member
// of the cluster to help provide read scalability. (Enterprise-only)
//
// hcl: non_voting_server = (true|false)
// flag: -non-voting-server
NonVotingServer bool
ReadReplica bool
// PidFile is the file to store our PID in.
//
@ -945,7 +945,9 @@ type RuntimeConfig struct {
RPCConfig consul.RPCConfig
CacheUseStreamingBackend bool
// UseStreamingBackend enables streaming as a replacement for agent/cache
// in the client agent for endpoints which support streaming.
UseStreamingBackend bool
// RaftProtocol sets the Raft protocol version to use on this server.
// Defaults to 3.
@ -1546,8 +1548,9 @@ type UIConfig struct {
}
type UIMetricsProxy struct {
BaseURL string
AddHeaders []UIMetricsProxyAddHeader
BaseURL string
AddHeaders []UIMetricsProxyAddHeader
PathAllowlist []string
}
type UIMetricsProxyAddHeader struct {
@ -1848,6 +1851,21 @@ func sanitize(name string, v reflect.Value) reflect.Value {
case isArray(typ) || isSlice(typ):
ma := make([]interface{}, 0, v.Len())
if name == "AddHeaders" {
// must be UIConfig.MetricsProxy.AddHeaders
for i := 0; i < v.Len(); i++ {
addr := v.Index(i).Addr()
hdr := addr.Interface().(*UIMetricsProxyAddHeader)
hm := map[string]interface{}{
"Name": hdr.Name,
"Value": "hidden",
}
ma = append(ma, hm)
}
return reflect.ValueOf(ma)
}
if strings.HasPrefix(name, "SerfAllowedCIDRs") {
for i := 0; i < v.Len(); i++ {
addr := v.Index(i).Addr()

View File

@ -12,12 +12,16 @@ var entTokenConfigSanitize = `"EnterpriseConfig": {},`
func entFullRuntimeConfig(rt *RuntimeConfig) {}
var enterpriseNonVotingServerWarnings []string = []string{enterpriseConfigKeyError{key: "non_voting_server"}.Error()}
var enterpriseReadReplicaWarnings []string = []string{enterpriseConfigKeyError{key: "read_replica"}.Error()}
var enterpriseConfigKeyWarnings []string
func init() {
for k := range enterpriseConfigMap {
if k == "non_voting_server" {
// this is an alias for "read_replica" so we shouldn't see it in warnings
continue
}
enterpriseConfigKeyWarnings = append(enterpriseConfigKeyWarnings, enterpriseConfigKeyError{key: k}.Error())
}
}

View File

@ -18,6 +18,7 @@ import (
"testing"
"time"
"github.com/armon/go-metrics/prometheus"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/cache"
@ -620,10 +621,10 @@ func TestBuilder_BuildAndValidate_ConfigFlagsAndEdgecases(t *testing.T) {
`-data-dir=` + dataDir,
},
patch: func(rt *RuntimeConfig) {
rt.NonVotingServer = true
rt.ReadReplica = true
rt.DataDir = dataDir
},
warns: enterpriseNonVotingServerWarnings,
warns: enterpriseReadReplicaWarnings,
},
{
desc: "-pid-file",
@ -673,14 +674,22 @@ func TestBuilder_BuildAndValidate_ConfigFlagsAndEdgecases(t *testing.T) {
{
desc: "-raft-protocol",
args: []string{
`-raft-protocol=1`,
`-raft-protocol=3`,
`-data-dir=` + dataDir,
},
patch: func(rt *RuntimeConfig) {
rt.RaftProtocol = 1
rt.RaftProtocol = 3
rt.DataDir = dataDir
},
},
{
desc: "-raft-protocol unsupported",
args: []string{
`-raft-protocol=2`,
`-data-dir=` + dataDir,
},
err: "raft_protocol version 2 is not supported by this version of Consul",
},
{
desc: "-recursor",
args: []string{
@ -4556,6 +4565,189 @@ func TestBuilder_BuildAndValidate_ConfigFlagsAndEdgecases(t *testing.T) {
`},
err: `ui_config.metrics_proxy.base_url must be a valid http or https URL.`,
},
{
desc: "metrics_proxy.path_allowlist invalid (empty)",
args: []string{`-data-dir=` + dataDir},
json: []string{`{
"ui_config": {
"metrics_proxy": {
"path_allowlist": ["", "/foo"]
}
}
}`},
hcl: []string{`
ui_config {
metrics_proxy {
path_allowlist = ["", "/foo"]
}
}
`},
err: `ui_config.metrics_proxy.path_allowlist: path "" is not an absolute path`,
},
{
desc: "metrics_proxy.path_allowlist invalid (relative)",
args: []string{`-data-dir=` + dataDir},
json: []string{`{
"ui_config": {
"metrics_proxy": {
"path_allowlist": ["bar/baz", "/foo"]
}
}
}`},
hcl: []string{`
ui_config {
metrics_proxy {
path_allowlist = ["bar/baz", "/foo"]
}
}
`},
err: `ui_config.metrics_proxy.path_allowlist: path "bar/baz" is not an absolute path`,
},
{
desc: "metrics_proxy.path_allowlist invalid (weird)",
args: []string{`-data-dir=` + dataDir},
json: []string{`{
"ui_config": {
"metrics_proxy": {
"path_allowlist": ["://bar/baz", "/foo"]
}
}
}`},
hcl: []string{`
ui_config {
metrics_proxy {
path_allowlist = ["://bar/baz", "/foo"]
}
}
`},
err: `ui_config.metrics_proxy.path_allowlist: path "://bar/baz" is not an absolute path`,
},
{
desc: "metrics_proxy.path_allowlist invalid (fragment)",
args: []string{`-data-dir=` + dataDir},
json: []string{`{
"ui_config": {
"metrics_proxy": {
"path_allowlist": ["/bar/baz#stuff", "/foo"]
}
}
}`},
hcl: []string{`
ui_config {
metrics_proxy {
path_allowlist = ["/bar/baz#stuff", "/foo"]
}
}
`},
err: `ui_config.metrics_proxy.path_allowlist: path "/bar/baz#stuff" is not an absolute path`,
},
{
desc: "metrics_proxy.path_allowlist invalid (querystring)",
args: []string{`-data-dir=` + dataDir},
json: []string{`{
"ui_config": {
"metrics_proxy": {
"path_allowlist": ["/bar/baz?stu=ff", "/foo"]
}
}
}`},
hcl: []string{`
ui_config {
metrics_proxy {
path_allowlist = ["/bar/baz?stu=ff", "/foo"]
}
}
`},
err: `ui_config.metrics_proxy.path_allowlist: path "/bar/baz?stu=ff" is not an absolute path`,
},
{
desc: "metrics_proxy.path_allowlist invalid (encoded slash)",
args: []string{`-data-dir=` + dataDir},
json: []string{`{
"ui_config": {
"metrics_proxy": {
"path_allowlist": ["/bar%2fbaz", "/foo"]
}
}
}`},
hcl: []string{`
ui_config {
metrics_proxy {
path_allowlist = ["/bar%2fbaz", "/foo"]
}
}
`},
err: `ui_config.metrics_proxy.path_allowlist: path "/bar%2fbaz" is not an absolute path`,
},
{
desc: "metrics_proxy.path_allowlist ok",
args: []string{`-data-dir=` + dataDir},
json: []string{`{
"ui_config": {
"metrics_proxy": {
"path_allowlist": ["/bar/baz", "/foo"]
}
}
}`},
hcl: []string{`
ui_config {
metrics_proxy {
path_allowlist = ["/bar/baz", "/foo"]
}
}
`},
patch: func(rt *RuntimeConfig) {
rt.UIConfig.MetricsProxy.PathAllowlist = []string{"/bar/baz", "/foo"}
rt.DataDir = dataDir
},
},
{
desc: "metrics_proxy.path_allowlist defaulted for prometheus",
args: []string{`-data-dir=` + dataDir},
json: []string{`{
"ui_config": {
"metrics_provider": "prometheus"
}
}`},
hcl: []string{`
ui_config {
metrics_provider = "prometheus"
}
`},
patch: func(rt *RuntimeConfig) {
rt.UIConfig.MetricsProvider = "prometheus"
rt.UIConfig.MetricsProxy.PathAllowlist = []string{
"/api/v1/query",
"/api/v1/query_range",
}
rt.DataDir = dataDir
},
},
{
desc: "metrics_proxy.path_allowlist not overridden with defaults for prometheus",
args: []string{`-data-dir=` + dataDir},
json: []string{`{
"ui_config": {
"metrics_provider": "prometheus",
"metrics_proxy": {
"path_allowlist": ["/bar/baz", "/foo"]
}
}
}`},
hcl: []string{`
ui_config {
metrics_provider = "prometheus"
metrics_proxy {
path_allowlist = ["/bar/baz", "/foo"]
}
}
`},
patch: func(rt *RuntimeConfig) {
rt.UIConfig.MetricsProvider = "prometheus"
rt.UIConfig.MetricsProxy.PathAllowlist = []string{"/bar/baz", "/foo"}
rt.DataDir = dataDir
},
},
{
desc: "metrics_proxy.base_url http(s)",
args: []string{`-data-dir=` + dataDir},
@ -4906,9 +5098,9 @@ func TestFullConfig(t *testing.T) {
"bootstrap_expect": 53,
"cache": {
"entry_fetch_max_burst": 42,
"entry_fetch_rate": 0.334,
"use_streaming_backend": true
"entry_fetch_rate": 0.334
},
"use_streaming_backend": true,
"ca_file": "erA7T0PM",
"ca_path": "mQEN1Mfp",
"cert_file": "7s4QAzDk",
@ -5131,10 +5323,11 @@ func TestFullConfig(t *testing.T) {
"primary_datacenter": "ejtmd43d",
"primary_gateways": [ "aej8eeZo", "roh2KahS" ],
"primary_gateways_interval": "18866s",
"raft_protocol": 19016,
"raft_protocol": 3,
"raft_snapshot_threshold": 16384,
"raft_snapshot_interval": "30s",
"raft_trailing_logs": 83749,
"read_replica": true,
"reconnect_timeout": "23739s",
"reconnect_timeout_wan": "26694s",
"recursors": [ "63.38.39.58", "92.49.18.18" ],
@ -5471,7 +5664,8 @@ func TestFullConfig(t *testing.T) {
"name": "p3nynwc9",
"value": "TYBgnN2F"
}
]
],
"path_allowlist": ["/aSh3cu", "/eiK/2Th"]
},
"dashboard_url_templates": {
"u2eziu2n_lower_case": "http://lkjasd.otr"
@ -5593,8 +5787,8 @@ func TestFullConfig(t *testing.T) {
cache = {
entry_fetch_max_burst = 42
entry_fetch_rate = 0.334
use_streaming_backend = true
},
use_streaming_backend = true
ca_file = "erA7T0PM"
ca_path = "mQEN1Mfp"
cert_file = "7s4QAzDk"
@ -5820,10 +6014,11 @@ func TestFullConfig(t *testing.T) {
primary_datacenter = "ejtmd43d"
primary_gateways = [ "aej8eeZo", "roh2KahS" ]
primary_gateways_interval = "18866s"
raft_protocol = 19016
raft_protocol = 3
raft_snapshot_threshold = 16384
raft_snapshot_interval = "30s"
raft_trailing_logs = 83749
read_replica = true
reconnect_timeout = "23739s"
reconnect_timeout_wan = "26694s"
recursors = [ "63.38.39.58", "92.49.18.18" ]
@ -6162,6 +6357,7 @@ func TestFullConfig(t *testing.T) {
value = "TYBgnN2F"
}
]
path_allowlist = ["/aSh3cu", "/eiK/2Th"]
}
dashboard_url_templates {
u2eziu2n_lower_case = "http://lkjasd.otr"
@ -6569,7 +6765,7 @@ func TestFullConfig(t *testing.T) {
NodeID: types.NodeID("AsUIlw99"),
NodeMeta: map[string]string{"5mgGQMBk": "mJLtVMSG", "A7ynFMJB": "0Nx6RGab"},
NodeName: "otlLxGaI",
NonVotingServer: true,
ReadReplica: true,
PidFile: "43xN80Km",
PrimaryDatacenter: "ejtmd43d",
PrimaryGateways: []string{"aej8eeZo", "roh2KahS"},
@ -6582,7 +6778,7 @@ func TestFullConfig(t *testing.T) {
RPCRateLimit: 12029.43,
RPCMaxBurst: 44848,
RPCMaxConnsPerClient: 2954,
RaftProtocol: 19016,
RaftProtocol: 3,
RaftSnapshotThreshold: 16384,
RaftSnapshotInterval: 30 * time.Second,
RaftTrailingLogs: 83749,
@ -6891,17 +7087,17 @@ func TestFullConfig(t *testing.T) {
},
},
},
CacheUseStreamingBackend: true,
SerfAdvertiseAddrLAN: tcpAddr("17.99.29.16:8301"),
SerfAdvertiseAddrWAN: tcpAddr("78.63.37.19:8302"),
SerfBindAddrLAN: tcpAddr("99.43.63.15:8301"),
SerfBindAddrWAN: tcpAddr("67.88.33.19:8302"),
SerfAllowedCIDRsLAN: []net.IPNet{},
SerfAllowedCIDRsWAN: []net.IPNet{},
SessionTTLMin: 26627 * time.Second,
SkipLeaveOnInt: true,
StartJoinAddrsLAN: []string{"LR3hGDoG", "MwVpZ4Up"},
StartJoinAddrsWAN: []string{"EbFSc3nA", "kwXTh623"},
UseStreamingBackend: true,
SerfAdvertiseAddrLAN: tcpAddr("17.99.29.16:8301"),
SerfAdvertiseAddrWAN: tcpAddr("78.63.37.19:8302"),
SerfBindAddrLAN: tcpAddr("99.43.63.15:8301"),
SerfBindAddrWAN: tcpAddr("67.88.33.19:8302"),
SerfAllowedCIDRsLAN: []net.IPNet{},
SerfAllowedCIDRsWAN: []net.IPNet{},
SessionTTLMin: 26627 * time.Second,
SkipLeaveOnInt: true,
StartJoinAddrsLAN: []string{"LR3hGDoG", "MwVpZ4Up"},
StartJoinAddrsWAN: []string{"EbFSc3nA", "kwXTh623"},
Telemetry: lib.TelemetryConfig{
CirconusAPIApp: "p4QOTe9j",
CirconusAPIToken: "E3j35V23",
@ -6924,9 +7120,11 @@ func TestFullConfig(t *testing.T) {
AllowedPrefixes: []string{"oJotS8XJ"},
BlockedPrefixes: []string{"cazlEhGn"},
MetricsPrefix: "ftO6DySn",
PrometheusRetentionTime: 15 * time.Second,
StatsdAddr: "drce87cy",
StatsiteAddr: "HpFwKB8R",
PrometheusOpts: prometheus.PrometheusOpts{
Expiration: 15 * time.Second,
},
},
TLSCipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256},
TLSMinVersion: "pAOWafkR",
@ -6956,6 +7154,7 @@ func TestFullConfig(t *testing.T) {
Value: "TYBgnN2F",
},
},
PathAllowlist: []string{"/aSh3cu", "/eiK/2Th"},
},
DashboardURLTemplates: map[string]string{"u2eziu2n_lower_case": "http://lkjasd.otr"},
},
@ -7262,6 +7461,7 @@ func TestSanitize(t *testing.T) {
EntryFetchRate: 0.334,
},
ConsulCoordinateUpdatePeriod: 15 * time.Second,
RaftProtocol: 3,
RetryJoinLAN: []string{
"foo=bar key=baz secret=boom bang=bar",
},
@ -7298,6 +7498,13 @@ func TestSanitize(t *testing.T) {
*parseCIDR(t, "127.0.0.0/8"),
},
TxnMaxReqLen: 5678000000000000,
UIConfig: UIConfig{
MetricsProxy: UIMetricsProxy{
AddHeaders: []UIMetricsProxyAddHeader{
{Name: "foo", Value: "secret"},
},
},
},
}
rtJSON := `{
@ -7493,13 +7700,13 @@ func TestSanitize(t *testing.T) {
"NodeID": "",
"NodeMeta": {},
"NodeName": "",
"NonVotingServer": false,
"PidFile": "",
"PrimaryDatacenter": "",
"PrimaryGateways": [
"pmgw_foo=bar pmgw_key=baz pmgw_secret=boom pmgw_bang=bar"
],
"PrimaryGatewaysInterval": "0s",
"ReadReplica": false,
"RPCAdvertiseAddr": "",
"RPCBindAddr": "",
"RPCHandshakeTimeout": "0s",
@ -7511,7 +7718,7 @@ func TestSanitize(t *testing.T) {
"RPCConfig": {
"EnableStreaming": false
},
"RaftProtocol": 0,
"RaftProtocol": 3,
"RaftSnapshotInterval": "0s",
"RaftSnapshotThreshold": 0,
"RaftTrailingLogs": 0,
@ -7541,7 +7748,7 @@ func TestSanitize(t *testing.T) {
"SerfBindAddrWAN": "",
"SerfPortLAN": 0,
"SerfPortWAN": 0,
"CacheUseStreamingBackend": false,
"UseStreamingBackend": false,
"ServerMode": false,
"ServerName": "",
"ServerPort": 0,
@ -7626,9 +7833,15 @@ func TestSanitize(t *testing.T) {
"DogstatsdTags": [],
"FilterDefault": false,
"MetricsPrefix": "",
"PrometheusRetentionTime": "0s",
"StatsdAddr": "",
"StatsiteAddr": ""
"StatsiteAddr": "",
"PrometheusOpts": {
"Expiration": "0s",
"Registerer": null,
"GaugeDefinitions": [],
"CounterDefinitions": [],
"SummaryDefinitions": []
}
},
"TranslateWANAddrs": false,
"TxnMaxReqLen": 5678000000000000,
@ -7640,8 +7853,14 @@ func TestSanitize(t *testing.T) {
"MetricsProviderFiles": [],
"MetricsProviderOptionsJSON": "",
"MetricsProxy": {
"AddHeaders": [],
"BaseURL": ""
"AddHeaders": [
{
"Name": "foo",
"Value": "hidden"
}
],
"BaseURL": "",
"PathAllowlist": []
},
"DashboardURLTemplates": {}
},

View File

@ -6,10 +6,11 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/stretchr/testify/require"
)
type consulCAMockDelegate struct {
@ -48,10 +49,7 @@ func (c *consulCAMockDelegate) ApplyCARequest(req *structs.CARequest) (interface
}
func newMockDelegate(t *testing.T, conf *structs.CAConfiguration) *consulCAMockDelegate {
s, err := state.NewStateStore(nil)
if err != nil {
t.Fatalf("err: %s", err)
}
s := state.NewStateStore(nil)
if s == nil {
t.Fatalf("missing state store")
}

View File

@ -79,7 +79,7 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error {
v.spiffeID = connect.SpiffeIDSigningForCluster(&structs.CAConfiguration{ClusterID: v.clusterID})
// Look up the token to see if we can auto-renew its lease.
secret, err := client.Auth().Token().Lookup(config.Token)
secret, err := client.Auth().Token().LookupSelf()
if err != nil {
return err
}

View File

@ -6,7 +6,8 @@ import (
"sync"
"time"
metrics "github.com/armon/go-metrics"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/logging"
@ -15,6 +16,32 @@ import (
"golang.org/x/time/rate"
)
var ACLCounters = []prometheus.CounterDefinition{
{
Name: []string{"acl", "token", "cache_hit"},
Help: "",
},
{
Name: []string{"acl", "token", "cache_miss"},
Help: "",
},
}
var ACLSummaries = []prometheus.SummaryDefinition{
{
Name: []string{"acl", "resolveTokenLegacy"},
Help: "",
},
{
Name: []string{"acl", "ResolveToken"},
Help: "",
},
{
Name: []string{"acl", "ResolveTokenToIdentity"},
Help: "",
},
}
// These must be kept in sync with the constants in command/agent/acl.go.
const (
// anonymousToken is the token ID we re-write to if there is no token ID

View File

@ -11,7 +11,8 @@ import (
"regexp"
"time"
metrics "github.com/armon/go-metrics"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/authmethod"
"github.com/hashicorp/consul/agent/consul/state"
@ -30,6 +31,73 @@ const (
aclBootstrapReset = "acl-bootstrap-reset"
)
var ACLEndpointSummaries = []prometheus.SummaryDefinition{
{
Name: []string{"acl", "token", "clone"},
Help: "",
},
{
Name: []string{"acl", "token", "upsert"},
Help: "",
},
{
Name: []string{"acl", "token", "delete"},
Help: "",
},
{
Name: []string{"acl", "policy", "upsert"},
Help: "",
},
{
Name: []string{"acl", "policy", "delete"},
Help: "",
},
{
Name: []string{"acl", "policy", "delete"},
Help: "",
},
{
Name: []string{"acl", "role", "upsert"},
Help: "",
},
{
Name: []string{"acl", "role", "delete"},
Help: "",
},
{
Name: []string{"acl", "bindingrule", "upsert"},
Help: "",
},
{
Name: []string{"acl", "bindingrule", "delete"},
Help: "",
},
{
Name: []string{"acl", "authmethod", "upsert"},
Help: "",
},
{
Name: []string{"acl", "authmethod", "delete"},
Help: "",
},
{
Name: []string{"acl", "login"},
Help: "",
},
{
Name: []string{"acl", "login"},
Help: "",
},
{
Name: []string{"acl", "logout"},
Help: "",
},
{
Name: []string{"acl", "logout"},
Help: "",
},
}
// Regex for matching
var (
validPolicyName = regexp.MustCompile(`^[A-Za-z0-9\-_]{1,128}$`)

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
@ -12,6 +13,13 @@ import (
"github.com/hashicorp/go-memdb"
)
var ACLEndpointLegacySummaries = []prometheus.SummaryDefinition{
{
Name: []string{"acl", "apply"},
Help: "Measures the time it takes to complete an update to the ACL store.",
},
}
// Bootstrap is used to perform a one-time ACL bootstrap operation on
// a cluster to get the first management token.
func (a *ACL) Bootstrap(args *structs.DCSpecificRequest, reply *structs.ACL) error {

View File

@ -449,9 +449,9 @@ func TestACLEndpoint_ReplicationStatus(t *testing.T) {
err := msgpackrpc.CallWithCodec(codec, "ACL.ReplicationStatus", &getR, &status)
require.NoError(t, err)
require.True(t, status.Enabled)
require.True(t, status.Running)
require.Equal(t, "dc2", status.SourceDatacenter)
require.True(r, status.Enabled)
require.True(r, status.Running)
require.Equal(r, "dc2", status.SourceDatacenter)
})
}

View File

@ -1264,7 +1264,7 @@ func TestACLResolver_DownPolicy(t *testing.T) {
legacy: false,
localTokens: false,
localPolicies: false,
tokenReadFn: func(args *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error {
tokenReadFn: func(_ *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error {
if !tokenResolved {
reply.Token = foundToken
tokenResolved = true
@ -1330,7 +1330,7 @@ func TestACLResolver_DownPolicy(t *testing.T) {
legacy: false,
localTokens: false,
localPolicies: false,
tokenReadFn: func(args *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error {
tokenReadFn: func(_ *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error {
// no limit
reply.Token = foundToken
return nil
@ -1440,7 +1440,7 @@ func TestACLResolver_Client(t *testing.T) {
legacy: false,
localTokens: false,
localPolicies: false,
tokenReadFn: func(args *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error {
tokenReadFn: func(_ *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error {
atomic.AddInt32(&tokenReads, 1)
if deleted {
return acl.ErrNotFound
@ -1558,7 +1558,7 @@ func TestACLResolver_Client(t *testing.T) {
legacy: true,
localTokens: false,
localPolicies: false,
getPolicyFn: func(args *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
getPolicyFn: func(_ *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
reply.Parent = "deny"
reply.TTL = 30
reply.ETag = "nothing"
@ -1900,7 +1900,7 @@ func TestACLResolver_Legacy(t *testing.T) {
legacy: true,
localTokens: false,
localPolicies: false,
getPolicyFn: func(args *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
getPolicyFn: func(_ *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
if !cached {
reply.Parent = "deny"
reply.TTL = 30
@ -1951,7 +1951,7 @@ func TestACLResolver_Legacy(t *testing.T) {
legacy: true,
localTokens: false,
localPolicies: false,
getPolicyFn: func(args *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
getPolicyFn: func(_ *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
if !cached {
reply.Parent = "deny"
reply.TTL = 0
@ -2004,7 +2004,7 @@ func TestACLResolver_Legacy(t *testing.T) {
legacy: true,
localTokens: false,
localPolicies: false,
getPolicyFn: func(args *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
getPolicyFn: func(_ *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
if !cached {
reply.Parent = "deny"
reply.TTL = 0
@ -2058,7 +2058,7 @@ func TestACLResolver_Legacy(t *testing.T) {
legacy: true,
localTokens: false,
localPolicies: false,
getPolicyFn: func(args *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
getPolicyFn: func(_ *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
if !cached {
reply.Parent = "deny"
reply.TTL = 0
@ -2112,7 +2112,7 @@ func TestACLResolver_Legacy(t *testing.T) {
legacy: true,
localTokens: false,
localPolicies: false,
getPolicyFn: func(args *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
getPolicyFn: func(_ *structs.ACLPolicyResolveLegacyRequest, reply *structs.ACLPolicyResolveLegacyResponse) error {
if !cached {
reply.Parent = "deny"
reply.TTL = 0

View File

@ -280,7 +280,7 @@ func TestAutoConfigInitialConfiguration(t *testing.T) {
},
},
},
patchResponse: func(t *testing.T, srv *Server, resp *pbautoconf.AutoConfigResponse) {
patchResponse: func(t *testing.T, _ *Server, resp *pbautoconf.AutoConfigResponse) {
// we are expecting an ACL token but cannot check anything for equality
// so here we check that it was set and overwrite it
require.NotNil(t, resp.Config)

View File

@ -3,60 +3,49 @@ package consul
import (
"context"
"fmt"
"net"
"strconv"
"github.com/armon/go-metrics"
"github.com/hashicorp/consul/agent/consul/autopilot"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/agent/metadata"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/raft"
autopilot "github.com/hashicorp/raft-autopilot"
"github.com/hashicorp/serf/serf"
)
var AutopilotGauges = []prometheus.GaugeDefinition{
{
Name: []string{"autopilot", "failure_tolerance"},
Help: "Tracks the number of voting servers that the cluster can lose while continuing to function.",
},
{
Name: []string{"autopilot", "healthy"},
Help: "Tracks the overall health of the local server cluster. 1 if all servers are healthy, 0 if one or more are unhealthy.",
},
}
// AutopilotDelegate is a Consul delegate for autopilot operations.
type AutopilotDelegate struct {
server *Server
}
func (d *AutopilotDelegate) AutopilotConfig() *autopilot.Config {
return d.server.getOrCreateAutopilotConfig()
return d.server.getOrCreateAutopilotConfig().ToAutopilotLibraryConfig()
}
func (d *AutopilotDelegate) FetchStats(ctx context.Context, servers []serf.Member) map[string]*autopilot.ServerStats {
func (d *AutopilotDelegate) KnownServers() map[raft.ServerID]*autopilot.Server {
return d.server.autopilotServers()
}
func (d *AutopilotDelegate) FetchServerStats(ctx context.Context, servers map[raft.ServerID]*autopilot.Server) map[raft.ServerID]*autopilot.ServerStats {
return d.server.statsFetcher.Fetch(ctx, servers)
}
func (d *AutopilotDelegate) IsServer(m serf.Member) (*autopilot.ServerInfo, error) {
if m.Tags["role"] != "consul" {
return nil, nil
}
portStr := m.Tags["port"]
port, err := strconv.Atoi(portStr)
if err != nil {
return nil, err
}
buildVersion, err := metadata.Build(&m)
if err != nil {
return nil, err
}
server := &autopilot.ServerInfo{
Name: m.Name,
ID: m.Tags["id"],
Addr: &net.TCPAddr{IP: m.Addr, Port: port},
Build: *buildVersion,
Status: m.Status,
}
return server, nil
}
// Heartbeat a metric for monitoring if we're the leader
func (d *AutopilotDelegate) NotifyHealth(health autopilot.OperatorHealthReply) {
func (d *AutopilotDelegate) NotifyState(state *autopilot.State) {
// emit metrics if we are the leader regarding overall healthiness and the failure tolerance
if d.server.raft.State() == raft.Leader {
metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(health.FailureTolerance))
if health.Healthy {
metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(state.FailureTolerance))
if state.Healthy {
metrics.SetGauge([]string{"autopilot", "healthy"}, 1)
} else {
metrics.SetGauge([]string{"autopilot", "healthy"}, 0)
@ -64,23 +53,88 @@ func (d *AutopilotDelegate) NotifyHealth(health autopilot.OperatorHealthReply) {
}
}
func (d *AutopilotDelegate) PromoteNonVoters(conf *autopilot.Config, health autopilot.OperatorHealthReply) ([]raft.Server, error) {
future := d.server.raft.GetConfiguration()
if err := future.Error(); err != nil {
return nil, fmt.Errorf("failed to get raft configuration: %v", err)
func (d *AutopilotDelegate) RemoveFailedServer(srv *autopilot.Server) {
go func() {
if err := d.server.RemoveFailedNode(srv.Name, false); err != nil {
d.server.logger.Error("failedto remove server", "name", srv.Name, "id", srv.ID, "error", err)
}
}()
}
func (s *Server) initAutopilot(config *Config) {
apDelegate := &AutopilotDelegate{s}
s.autopilot = autopilot.New(
s.raft,
apDelegate,
autopilot.WithLogger(s.logger),
autopilot.WithReconcileInterval(config.AutopilotInterval),
autopilot.WithUpdateInterval(config.ServerHealthInterval),
autopilot.WithPromoter(s.autopilotPromoter()),
)
}
func (s *Server) autopilotServers() map[raft.ServerID]*autopilot.Server {
servers := make(map[raft.ServerID]*autopilot.Server)
for _, member := range s.serfLAN.Members() {
srv, err := s.autopilotServer(member)
if err != nil {
s.logger.Warn("Error parsing server info", "name", member.Name, "error", err)
continue
} else if srv == nil {
// this member was a client
continue
}
servers[srv.ID] = srv
}
return autopilot.PromoteStableServers(conf, health, future.Configuration().Servers), nil
return servers
}
func (d *AutopilotDelegate) Raft() *raft.Raft {
return d.server.raft
func (s *Server) autopilotServer(m serf.Member) (*autopilot.Server, error) {
ok, srv := metadata.IsConsulServer(m)
if !ok {
return nil, nil
}
return s.autopilotServerFromMetadata(srv)
}
func (d *AutopilotDelegate) SerfLAN() *serf.Serf {
return d.server.serfLAN
}
func (s *Server) autopilotServerFromMetadata(srv *metadata.Server) (*autopilot.Server, error) {
server := &autopilot.Server{
Name: srv.ShortName,
ID: raft.ServerID(srv.ID),
Address: raft.ServerAddress(srv.Addr.String()),
Version: srv.Build.String(),
RaftVersion: srv.RaftVersion,
Ext: s.autopilotServerExt(srv),
}
func (d *AutopilotDelegate) SerfWAN() *serf.Serf {
return d.server.serfWAN
switch srv.Status {
case serf.StatusLeft:
server.NodeStatus = autopilot.NodeLeft
case serf.StatusAlive, serf.StatusLeaving:
// we want to treat leaving as alive to prevent autopilot from
// prematurely removing the node.
server.NodeStatus = autopilot.NodeAlive
case serf.StatusFailed:
server.NodeStatus = autopilot.NodeFailed
default:
server.NodeStatus = autopilot.NodeUnknown
}
// populate the node meta if there is any. When a node first joins or if
// there are ACL issues then this could be empty if the server has not
// yet been able to register itself in the catalog
_, node, err := s.fsm.State().GetNodeID(types.NodeID(srv.ID))
if err != nil {
return nil, fmt.Errorf("error retrieving node from state store: %w", err)
}
if node != nil {
server.Meta = node.Meta
}
return server, nil
}

View File

@ -1,540 +0,0 @@
package autopilot
import (
"context"
"fmt"
"net"
"strconv"
"sync"
"time"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-version"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
)
// Delegate is the interface for the Autopilot mechanism
type Delegate interface {
AutopilotConfig() *Config
FetchStats(context.Context, []serf.Member) map[string]*ServerStats
IsServer(serf.Member) (*ServerInfo, error)
NotifyHealth(OperatorHealthReply)
PromoteNonVoters(*Config, OperatorHealthReply) ([]raft.Server, error)
Raft() *raft.Raft
SerfLAN() *serf.Serf
SerfWAN() *serf.Serf
}
// Autopilot is a mechanism for automatically managing the Raft
// quorum using server health information along with updates from Serf gossip.
// For more information, see https://www.consul.io/docs/guides/autopilot.html
type Autopilot struct {
logger hclog.Logger
delegate Delegate
interval time.Duration
healthInterval time.Duration
clusterHealth OperatorHealthReply
clusterHealthLock sync.RWMutex
enabled bool
removeDeadCh chan struct{}
shutdownCh chan struct{}
shutdownLock sync.Mutex
waitGroup sync.WaitGroup
}
type ServerInfo struct {
Name string
ID string
Addr net.Addr
Build version.Version
Status serf.MemberStatus
}
func NewAutopilot(logger hclog.Logger, delegate Delegate, interval, healthInterval time.Duration) *Autopilot {
return &Autopilot{
logger: logger.Named(logging.Autopilot),
delegate: delegate,
interval: interval,
healthInterval: healthInterval,
removeDeadCh: make(chan struct{}),
}
}
func (a *Autopilot) Start() {
a.shutdownLock.Lock()
defer a.shutdownLock.Unlock()
// Nothing to do
if a.enabled {
return
}
a.shutdownCh = make(chan struct{})
a.waitGroup = sync.WaitGroup{}
a.clusterHealth = OperatorHealthReply{}
a.waitGroup.Add(2)
go a.run()
go a.serverHealthLoop()
a.enabled = true
}
func (a *Autopilot) Stop() {
a.shutdownLock.Lock()
defer a.shutdownLock.Unlock()
// Nothing to do
if !a.enabled {
return
}
close(a.shutdownCh)
a.waitGroup.Wait()
a.enabled = false
}
// run periodically looks for nonvoting servers to promote and dead servers to remove.
func (a *Autopilot) run() {
defer a.waitGroup.Done()
// Monitor server health until shutdown
ticker := time.NewTicker(a.interval)
defer ticker.Stop()
for {
select {
case <-a.shutdownCh:
return
case <-ticker.C:
if err := a.promoteServers(); err != nil {
a.logger.Error("Error promoting servers", "error", err)
}
if err := a.pruneDeadServers(); err != nil {
a.logger.Error("Error checking for dead servers to remove", "error", err)
}
case <-a.removeDeadCh:
if err := a.pruneDeadServers(); err != nil {
a.logger.Error("Error checking for dead servers to remove", "error", err)
}
}
}
}
// promoteServers asks the delegate for any promotions and carries them out.
func (a *Autopilot) promoteServers() error {
conf := a.delegate.AutopilotConfig()
if conf == nil {
return nil
}
// Skip the non-voter promotions unless all servers support the new APIs
minRaftProtocol, err := a.MinRaftProtocol()
if err != nil {
return fmt.Errorf("error getting server raft protocol versions: %s", err)
}
if minRaftProtocol >= 3 {
promotions, err := a.delegate.PromoteNonVoters(conf, a.GetClusterHealth())
if err != nil {
return fmt.Errorf("error checking for non-voters to promote: %s", err)
}
if err := a.handlePromotions(promotions); err != nil {
return fmt.Errorf("error handling promotions: %s", err)
}
}
return nil
}
// fmtServer prints info about a server in a standard way for logging.
func fmtServer(server raft.Server) string {
return fmt.Sprintf("Server (ID: %q Address: %q)", server.ID, server.Address)
}
// NumPeers counts the number of voting peers in the given raft config.
func NumPeers(raftConfig raft.Configuration) int {
var numPeers int
for _, server := range raftConfig.Servers {
if server.Suffrage == raft.Voter {
numPeers++
}
}
return numPeers
}
// RemoveDeadServers triggers a pruning of dead servers in a non-blocking way.
func (a *Autopilot) RemoveDeadServers() {
select {
case a.removeDeadCh <- struct{}{}:
default:
}
}
func canRemoveServers(peers, minQuorum, deadServers int) (bool, string) {
if peers-deadServers < minQuorum {
return false, fmt.Sprintf("denied, because removing %d/%d servers would leave less then minimal allowed quorum of %d servers", deadServers, peers, minQuorum)
}
// Only do removals if a minority of servers will be affected.
// For failure tolerance of F we need n = 2F+1 servers.
// This means we can safely remove up to (n-1)/2 servers.
if deadServers > (peers-1)/2 {
return false, fmt.Sprintf("denied, because removing the majority of servers %d/%d is not safe", deadServers, peers)
}
return true, fmt.Sprintf("allowed, because removing %d/%d servers leaves a majority of servers above the minimal allowed quorum %d", deadServers, peers, minQuorum)
}
// pruneDeadServers removes up to numPeers/2 failed servers
func (a *Autopilot) pruneDeadServers() error {
conf := a.delegate.AutopilotConfig()
if conf == nil || !conf.CleanupDeadServers {
return nil
}
// Failed servers are known to Serf and marked failed, and stale servers
// are known to Raft but not Serf.
var failed []serf.Member
staleRaftServers := make(map[string]raft.Server)
raftNode := a.delegate.Raft()
future := raftNode.GetConfiguration()
if err := future.Error(); err != nil {
return err
}
raftConfig := future.Configuration()
for _, server := range raftConfig.Servers {
staleRaftServers[string(server.Address)] = server
}
serfWAN := a.delegate.SerfWAN()
serfLAN := a.delegate.SerfLAN()
for _, member := range serfLAN.Members() {
server, err := a.delegate.IsServer(member)
if err != nil {
a.logger.Warn("Error parsing server info", "name", member.Name, "error", err)
continue
}
if server != nil {
// todo(kyhavlov): change this to index by UUID
s, found := staleRaftServers[server.Addr.String()]
if found {
delete(staleRaftServers, server.Addr.String())
}
if member.Status == serf.StatusFailed {
// If the node is a nonvoter, we can remove it immediately.
if found && s.Suffrage == raft.Nonvoter {
a.logger.Info("Attempting removal of failed server node", "name", member.Name)
go serfLAN.RemoveFailedNode(member.Name)
if serfWAN != nil {
go serfWAN.RemoveFailedNode(member.Name)
}
} else {
failed = append(failed, member)
}
}
}
}
deadServers := len(failed) + len(staleRaftServers)
// nothing to do
if deadServers == 0 {
return nil
}
if ok, msg := canRemoveServers(NumPeers(raftConfig), int(conf.MinQuorum), deadServers); !ok {
a.logger.Debug("Failed to remove dead servers", "error", msg)
return nil
}
for _, node := range failed {
a.logger.Info("Attempting removal of failed server node", "name", node.Name)
go serfLAN.RemoveFailedNode(node.Name)
if serfWAN != nil {
go serfWAN.RemoveFailedNode(fmt.Sprintf("%s.%s", node.Name, node.Tags["dc"]))
}
}
minRaftProtocol, err := a.MinRaftProtocol()
if err != nil {
return err
}
for _, raftServer := range staleRaftServers {
a.logger.Info("Attempting removal of stale server", "server", fmtServer(raftServer))
var future raft.Future
if minRaftProtocol >= 2 {
future = raftNode.RemoveServer(raftServer.ID, 0, 0)
} else {
future = raftNode.RemovePeer(raftServer.Address)
}
if err := future.Error(); err != nil {
return err
}
}
return nil
}
// MinRaftProtocol returns the lowest supported Raft protocol among alive servers
func (a *Autopilot) MinRaftProtocol() (int, error) {
return minRaftProtocol(a.delegate.SerfLAN().Members(), a.delegate.IsServer)
}
func minRaftProtocol(members []serf.Member, serverFunc func(serf.Member) (*ServerInfo, error)) (int, error) {
minVersion := -1
for _, m := range members {
if m.Status != serf.StatusAlive {
continue
}
server, err := serverFunc(m)
if err != nil {
return -1, err
}
if server == nil {
continue
}
vsn, ok := m.Tags["raft_vsn"]
if !ok {
vsn = "1"
}
raftVsn, err := strconv.Atoi(vsn)
if err != nil {
return -1, err
}
if minVersion == -1 || raftVsn < minVersion {
minVersion = raftVsn
}
}
if minVersion == -1 {
return minVersion, fmt.Errorf("No servers found")
}
return minVersion, nil
}
// handlePromotions is a helper shared with Consul Enterprise that attempts to
// apply desired server promotions to the Raft configuration.
func (a *Autopilot) handlePromotions(promotions []raft.Server) error {
// This used to wait to only promote to maintain an odd quorum of
// servers, but this was at odds with the dead server cleanup when doing
// rolling updates (add one new server, wait, and then kill an old
// server). The dead server cleanup would still count the old server as
// a peer, which is conservative and the right thing to do, and this
// would wait to promote, so you could get into a stalemate. It is safer
// to promote early than remove early, so by promoting as soon as
// possible we have chosen that as the solution here.
for _, server := range promotions {
a.logger.Info("Promoting server to voter", "server", fmtServer(server))
addFuture := a.delegate.Raft().AddVoter(server.ID, server.Address, 0, 0)
if err := addFuture.Error(); err != nil {
return fmt.Errorf("failed to add raft peer: %v", err)
}
}
// If we promoted a server, trigger a check to remove dead servers.
if len(promotions) > 0 {
select {
case a.removeDeadCh <- struct{}{}:
default:
}
}
return nil
}
// serverHealthLoop monitors the health of the servers in the cluster
func (a *Autopilot) serverHealthLoop() {
defer a.waitGroup.Done()
// Monitor server health until shutdown
ticker := time.NewTicker(a.healthInterval)
defer ticker.Stop()
for {
select {
case <-a.shutdownCh:
return
case <-ticker.C:
if err := a.updateClusterHealth(); err != nil {
a.logger.Error("Error updating cluster health", "error", err)
}
}
}
}
// updateClusterHealth fetches the Raft stats of the other servers and updates
// s.clusterHealth based on the configured Autopilot thresholds
func (a *Autopilot) updateClusterHealth() error {
// Don't do anything if the min Raft version is too low
minRaftProtocol, err := a.MinRaftProtocol()
if err != nil {
return fmt.Errorf("error getting server raft protocol versions: %s", err)
}
if minRaftProtocol < 3 {
return nil
}
autopilotConf := a.delegate.AutopilotConfig()
// Bail early if autopilot config hasn't been initialized yet
if autopilotConf == nil {
return nil
}
// Get the the serf members which are Consul servers
var serverMembers []serf.Member
serverMap := make(map[string]*ServerInfo)
for _, member := range a.delegate.SerfLAN().Members() {
if member.Status == serf.StatusLeft {
continue
}
server, err := a.delegate.IsServer(member)
if err != nil {
a.logger.Warn("Error parsing server info", "name", member.Name, "error", err)
continue
}
if server != nil {
serverMap[server.ID] = server
serverMembers = append(serverMembers, member)
}
}
raftNode := a.delegate.Raft()
future := raftNode.GetConfiguration()
if err := future.Error(); err != nil {
return fmt.Errorf("error getting Raft configuration %s", err)
}
servers := future.Configuration().Servers
// Fetch the health for each of the servers in parallel so we get as
// consistent of a sample as possible. We capture the leader's index
// here as well so it roughly lines up with the same point in time.
targetLastIndex := raftNode.LastIndex()
d := time.Now().Add(a.healthInterval / 2)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
fetchedStats := a.delegate.FetchStats(ctx, serverMembers)
// Build a current list of server healths
leader := raftNode.Leader()
var clusterHealth OperatorHealthReply
voterCount := 0
healthyCount := 0
healthyVoterCount := 0
for _, server := range servers {
health := ServerHealth{
ID: string(server.ID),
Address: string(server.Address),
Leader: server.Address == leader,
LastContact: -1,
Voter: server.Suffrage == raft.Voter,
}
parts, ok := serverMap[string(server.ID)]
if ok {
health.Name = parts.Name
health.SerfStatus = parts.Status
health.Version = parts.Build.String()
if stats, ok := fetchedStats[string(server.ID)]; ok {
if err := a.updateServerHealth(&health, parts, stats, autopilotConf, targetLastIndex); err != nil {
a.logger.Warn("Error updating server health", "server", fmtServer(server), "error", err)
}
}
} else {
health.SerfStatus = serf.StatusNone
}
if health.Voter {
voterCount++
}
if health.Healthy {
healthyCount++
if health.Voter {
healthyVoterCount++
}
}
clusterHealth.Servers = append(clusterHealth.Servers, health)
}
clusterHealth.Healthy = healthyCount == len(servers)
// If we have extra healthy voters, update FailureTolerance
requiredQuorum := voterCount/2 + 1
if healthyVoterCount > requiredQuorum {
clusterHealth.FailureTolerance = healthyVoterCount - requiredQuorum
}
a.delegate.NotifyHealth(clusterHealth)
a.clusterHealthLock.Lock()
a.clusterHealth = clusterHealth
a.clusterHealthLock.Unlock()
return nil
}
// updateServerHealth computes the resulting health of the server based on its
// fetched stats and the state of the leader.
func (a *Autopilot) updateServerHealth(health *ServerHealth,
server *ServerInfo, stats *ServerStats,
autopilotConf *Config, targetLastIndex uint64) error {
health.LastTerm = stats.LastTerm
health.LastIndex = stats.LastIndex
if stats.LastContact != "never" {
var err error
health.LastContact, err = time.ParseDuration(stats.LastContact)
if err != nil {
return fmt.Errorf("error parsing last_contact duration: %s", err)
}
}
raftNode := a.delegate.Raft()
lastTerm, err := strconv.ParseUint(raftNode.Stats()["last_log_term"], 10, 64)
if err != nil {
return fmt.Errorf("error parsing last_log_term: %s", err)
}
health.Healthy = health.IsHealthy(lastTerm, targetLastIndex, autopilotConf)
// If this is a new server or the health changed, reset StableSince
lastHealth := a.GetServerHealth(server.ID)
if lastHealth == nil || lastHealth.Healthy != health.Healthy {
health.StableSince = time.Now()
} else {
health.StableSince = lastHealth.StableSince
}
return nil
}
func (a *Autopilot) GetClusterHealth() OperatorHealthReply {
a.clusterHealthLock.RLock()
defer a.clusterHealthLock.RUnlock()
return a.clusterHealth
}
func (a *Autopilot) GetServerHealth(id string) *ServerHealth {
a.clusterHealthLock.RLock()
defer a.clusterHealthLock.RUnlock()
return a.clusterHealth.ServerHealth(id)
}
func IsPotentialVoter(suffrage raft.ServerSuffrage) bool {
switch suffrage {
case raft.Voter, raft.Staging:
return true
default:
return false
}
}

View File

@ -1,111 +0,0 @@
package autopilot
import (
"errors"
"net"
"testing"
"github.com/hashicorp/serf/serf"
"github.com/stretchr/testify/require"
)
func TestMinRaftProtocol(t *testing.T) {
t.Parallel()
makeMember := func(version string) serf.Member {
return serf.Member{
Name: "foo",
Addr: net.IP([]byte{127, 0, 0, 1}),
Tags: map[string]string{
"role": "consul",
"dc": "dc1",
"port": "10000",
"vsn": "1",
"raft_vsn": version,
},
Status: serf.StatusAlive,
}
}
cases := []struct {
members []serf.Member
expected int
err error
}{
// No servers, error
{
members: []serf.Member{},
expected: -1,
err: errors.New("No servers found"),
},
// One server
{
members: []serf.Member{
makeMember("1"),
},
expected: 1,
},
// One server, bad version formatting
{
members: []serf.Member{
makeMember("asdf"),
},
expected: -1,
err: errors.New(`strconv.Atoi: parsing "asdf": invalid syntax`),
},
// Multiple servers, different versions
{
members: []serf.Member{
makeMember("1"),
makeMember("2"),
},
expected: 1,
},
// Multiple servers, same version
{
members: []serf.Member{
makeMember("2"),
makeMember("2"),
},
expected: 2,
},
}
serverFunc := func(m serf.Member) (*ServerInfo, error) {
return &ServerInfo{}, nil
}
for _, tc := range cases {
result, err := minRaftProtocol(tc.members, serverFunc)
if result != tc.expected {
t.Fatalf("bad: %v, %v, %v", result, tc.expected, tc)
}
if tc.err != nil {
if err == nil || tc.err.Error() != err.Error() {
t.Fatalf("bad: %v, %v, %v", err, tc.err, tc)
}
}
}
}
func TestAutopilot_canRemoveServers(t *testing.T) {
type test struct {
peers int
minQuorum int
deadServers int
ok bool
}
tests := []test{
{1, 1, 1, false},
{3, 3, 1, false},
{4, 3, 3, false},
{5, 3, 3, false},
{5, 3, 2, true},
{5, 3, 1, true},
{9, 3, 5, false},
}
for _, test := range tests {
ok, msg := canRemoveServers(test.peers, test.minQuorum, test.deadServers)
require.Equal(t, test.ok, ok)
t.Logf("%+v: %s", test, msg)
}
}

View File

@ -1,26 +0,0 @@
package autopilot
import (
"time"
"github.com/hashicorp/raft"
)
// PromoteStableServers is a basic autopilot promotion policy that promotes any
// server which has been healthy and stable for the duration specified in the
// given Autopilot config.
func PromoteStableServers(autopilotConfig *Config, health OperatorHealthReply, servers []raft.Server) []raft.Server {
// Find any non-voters eligible for promotion.
now := time.Now()
var promotions []raft.Server
for _, server := range servers {
if !IsPotentialVoter(server.Suffrage) {
health := health.ServerHealth(string(server.ID))
if health.IsStable(now, autopilotConfig) {
promotions = append(promotions, server)
}
}
}
return promotions
}

View File

@ -1,102 +0,0 @@
package autopilot
import (
"testing"
"time"
"github.com/hashicorp/raft"
"github.com/stretchr/testify/require"
)
func TestPromotion(t *testing.T) {
config := &Config{
LastContactThreshold: 5 * time.Second,
MaxTrailingLogs: 100,
ServerStabilizationTime: 3 * time.Second,
}
cases := []struct {
name string
conf *Config
health OperatorHealthReply
servers []raft.Server
promotions []raft.Server
}{
{
name: "one stable voter, no promotions",
conf: config,
health: OperatorHealthReply{
Servers: []ServerHealth{
{
ID: "a",
Healthy: true,
StableSince: time.Now().Add(-10 * time.Second),
},
},
},
servers: []raft.Server{
{ID: "a", Suffrage: raft.Voter},
},
},
{
name: "one stable nonvoter, should be promoted",
conf: config,
health: OperatorHealthReply{
Servers: []ServerHealth{
{
ID: "a",
Healthy: true,
StableSince: time.Now().Add(-10 * time.Second),
},
{
ID: "b",
Healthy: true,
StableSince: time.Now().Add(-10 * time.Second),
},
},
},
servers: []raft.Server{
{ID: "a", Suffrage: raft.Voter},
{ID: "b", Suffrage: raft.Nonvoter},
},
promotions: []raft.Server{
{ID: "b", Suffrage: raft.Nonvoter},
},
},
{
name: "unstable servers, neither should be promoted",
conf: config,
health: OperatorHealthReply{
Servers: []ServerHealth{
{
ID: "a",
Healthy: true,
StableSince: time.Now().Add(-10 * time.Second),
},
{
ID: "b",
Healthy: false,
StableSince: time.Now().Add(-10 * time.Second),
},
{
ID: "c",
Healthy: true,
StableSince: time.Now().Add(-1 * time.Second),
},
},
},
servers: []raft.Server{
{ID: "a", Suffrage: raft.Voter},
{ID: "b", Suffrage: raft.Nonvoter},
{ID: "c", Suffrage: raft.Nonvoter},
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
promotions := PromoteStableServers(tc.conf, tc.health, tc.servers)
require.Equal(t, tc.promotions, promotions)
})
}
}

View File

@ -1,94 +0,0 @@
package autopilot
import (
"testing"
"time"
"github.com/hashicorp/serf/serf"
)
func TestServerHealth_IsHealthy(t *testing.T) {
cases := []struct {
health ServerHealth
lastTerm uint64
lastIndex uint64
conf Config
expected bool
}{
// Healthy server, all values within allowed limits
{
health: ServerHealth{SerfStatus: serf.StatusAlive, LastTerm: 1, LastIndex: 0},
lastTerm: 1,
lastIndex: 10,
conf: Config{MaxTrailingLogs: 20},
expected: true,
},
// Serf status failed
{
health: ServerHealth{SerfStatus: serf.StatusFailed},
expected: false,
},
// Old value for lastTerm
{
health: ServerHealth{SerfStatus: serf.StatusAlive, LastTerm: 0},
lastTerm: 1,
expected: false,
},
// Too far behind on logs
{
health: ServerHealth{SerfStatus: serf.StatusAlive, LastIndex: 0},
lastIndex: 10,
conf: Config{MaxTrailingLogs: 5},
expected: false,
},
}
for index, tc := range cases {
actual := tc.health.IsHealthy(tc.lastTerm, tc.lastIndex, &tc.conf)
if actual != tc.expected {
t.Fatalf("bad value for case %d: %v", index, actual)
}
}
}
func TestServerHealth_IsStable(t *testing.T) {
start := time.Now()
cases := []struct {
health *ServerHealth
now time.Time
conf Config
expected bool
}{
// Healthy server, all values within allowed limits
{
health: &ServerHealth{Healthy: true, StableSince: start},
now: start.Add(15 * time.Second),
conf: Config{ServerStabilizationTime: 10 * time.Second},
expected: true,
},
// Unhealthy server
{
health: &ServerHealth{Healthy: false},
expected: false,
},
// Healthy server, hasn't reached stabilization time
{
health: &ServerHealth{Healthy: true, StableSince: start},
now: start.Add(5 * time.Second),
conf: Config{ServerStabilizationTime: 10 * time.Second},
expected: false,
},
// Nil struct
{
health: nil,
expected: false,
},
}
for index, tc := range cases {
actual := tc.health.IsStable(tc.now, &tc.conf)
if actual != tc.expected {
t.Fatalf("bad value for case %d: %v", index, actual)
}
}
}

View File

@ -2,9 +2,15 @@
package consul
import "github.com/hashicorp/consul/agent/consul/autopilot"
import (
"github.com/hashicorp/consul/agent/metadata"
autopilot "github.com/hashicorp/raft-autopilot"
)
func (s *Server) initAutopilot(config *Config) {
apDelegate := &AutopilotDelegate{s}
s.autopilot = autopilot.NewAutopilot(s.logger, apDelegate, config.AutopilotInterval, config.ServerHealthInterval)
func (s *Server) autopilotPromoter() autopilot.Promoter {
return autopilot.DefaultPromoter()
}
func (_ *Server) autopilotServerExt(_ *metadata.Server) interface{} {
return nil
}

View File

@ -1,10 +1,12 @@
package consul
import (
"context"
"os"
"testing"
"time"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/raft"
@ -18,28 +20,20 @@ func TestAutopilot_IdempotentShutdown(t *testing.T) {
defer s1.Shutdown()
retry.Run(t, func(r *retry.R) { r.Check(waitForLeader(s1)) })
s1.autopilot.Start()
s1.autopilot.Start()
s1.autopilot.Start()
s1.autopilot.Stop()
s1.autopilot.Stop()
s1.autopilot.Stop()
s1.autopilot.Start(context.Background())
s1.autopilot.Start(context.Background())
s1.autopilot.Start(context.Background())
<-s1.autopilot.Stop()
<-s1.autopilot.Stop()
<-s1.autopilot.Stop()
}
func TestAutopilot_CleanupDeadServer(t *testing.T) {
t.Parallel()
for i := 1; i <= 3; i++ {
testCleanupDeadServer(t, i)
}
}
func testCleanupDeadServer(t *testing.T, raftVersion int) {
dc := "dc1"
conf := func(c *Config) {
c.Datacenter = dc
c.Bootstrap = false
c.BootstrapExpect = 5
c.RaftConfig.ProtocolVersion = raft.ProtocolVersion(raftVersion)
}
dir1, s1 := testServerWithConfig(t, conf)
defer os.RemoveAll(dir1)
@ -119,10 +113,19 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) {
}
func TestAutopilot_CleanupDeadNonvoter(t *testing.T) {
dir1, s1 := testServer(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.AutopilotConfig = &structs.AutopilotConfig{
CleanupDeadServers: true,
ServerStabilizationTime: 100 * time.Millisecond,
}
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
// we have to wait for autopilot to be running long enough for the server stabilization time
// to kick in for this test to work.
time.Sleep(100 * time.Millisecond)
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
@ -316,7 +319,7 @@ func TestAutopilot_CleanupStaleRaftServer(t *testing.T) {
}
// Verify we have 4 peers
peers, err := s1.numPeers()
peers, err := s1.autopilot.NumVoters()
if err != nil {
t.Fatal(err)
}
@ -335,7 +338,6 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = true
c.RaftConfig.ProtocolVersion = 3
c.AutopilotConfig.ServerStabilizationTime = 200 * time.Millisecond
c.ServerHealthInterval = 100 * time.Millisecond
c.AutopilotInterval = 100 * time.Millisecond
@ -346,6 +348,10 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) {
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// this may seem arbitrary but we need to get past the server stabilization time
// so that we start factoring in that time for newly connected nodes.
time.Sleep(100 * time.Millisecond)
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = false
@ -370,7 +376,7 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) {
if servers[1].Suffrage != raft.Nonvoter {
r.Fatalf("bad: %v", servers)
}
health := s1.autopilot.GetServerHealth(string(servers[1].ID))
health := s1.autopilot.GetServerHealth(servers[1].ID)
if health == nil {
r.Fatal("nil health")
}
@ -406,7 +412,6 @@ func TestAutopilot_MinQuorum(t *testing.T) {
c.Bootstrap = false
c.BootstrapExpect = 4
c.AutopilotConfig.MinQuorum = 3
c.RaftConfig.ProtocolVersion = raft.ProtocolVersion(2)
c.AutopilotInterval = 100 * time.Millisecond
}
dir1, s1 := testServerWithConfig(t, conf)

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
@ -17,6 +18,52 @@ import (
"github.com/hashicorp/go-uuid"
)
var CatalogCounters = []prometheus.CounterDefinition{
{
Name: []string{"catalog", "service", "query"},
Help: "Increments for each catalog query for the given service.",
},
{
Name: []string{"catalog", "connect", "query"},
Help: "",
},
{
Name: []string{"catalog", "service", "query-tag"},
Help: "Increments for each catalog query for the given service with the given tag.",
},
{
Name: []string{"catalog", "connect", "query-tag"},
Help: "",
},
{
Name: []string{"catalog", "service", "query-tags"},
Help: "Increments for each catalog query for the given service with the given tags.",
},
{
Name: []string{"catalog", "connect", "query-tags"},
Help: "",
},
{
Name: []string{"catalog", "service", "not-found"},
Help: "Increments for each catalog query where the given service could not be found.",
},
{
Name: []string{"catalog", "connect", "not-found"},
Help: "",
},
}
var CatalogSummaries = []prometheus.SummaryDefinition{
{
Name: []string{"catalog", "deregister"},
Help: "Measures the time it takes to complete a catalog deregister operation.",
},
{
Name: []string{"catalog", "register"},
Help: "Measures the time it takes to complete a catalog register operation.",
},
}
// Catalog endpoint is used to manipulate the service catalog
type Catalog struct {
srv *Server

View File

@ -9,6 +9,7 @@ import (
"time"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/router"
"github.com/hashicorp/consul/agent/structs"
@ -21,6 +22,21 @@ import (
"golang.org/x/time/rate"
)
var ClientCounters = []prometheus.CounterDefinition{
{
Name: []string{"client", "rpc"},
Help: "Increments whenever a Consul agent in client mode makes an RPC request to a Consul server.",
},
{
Name: []string{"client", "rpc", "exceeded"},
Help: "Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's limits configuration.",
},
{
Name: []string{"client", "rpc", "failed"},
Help: "Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails.",
},
}
const (
// serfEventBacklog is the maximum number of unprocessed Serf Events
// that will be held in queue before new serf events block. A

View File

@ -145,8 +145,8 @@ func TestClient_LANReap(t *testing.T) {
// Check the router has both
retry.Run(t, func(r *retry.R) {
server := c1.router.FindLANServer()
require.NotNil(t, server)
require.Equal(t, s1.config.NodeName, server.Name)
require.NotNil(r, server)
require.Equal(r, s1.config.NodeName, server.Name)
})
// shutdown the second dc

View File

@ -12,7 +12,6 @@ import (
"golang.org/x/time/rate"
"github.com/hashicorp/consul/agent/checks"
"github.com/hashicorp/consul/agent/consul/autopilot"
"github.com/hashicorp/consul/agent/structs"
libserf "github.com/hashicorp/consul/lib/serf"
"github.com/hashicorp/consul/tlsutil"
@ -111,9 +110,9 @@ type Config struct {
// RaftConfig is the configuration used for Raft in the local DC
RaftConfig *raft.Config
// (Enterprise-only) NonVoter is used to prevent this server from being added
// (Enterprise-only) ReadReplica is used to prevent this server from being added
// as a voting member of the Raft cluster.
NonVoter bool
ReadReplica bool
// NotifyListen is called after the RPC listener has been configured.
// RPCAdvertise will be set to the listener address if it hasn't been
@ -269,8 +268,8 @@ type Config struct {
// ACLDefaultPolicy is used to control the ACL interaction when
// there is no defined policy. This can be "allow" which means
// ACLs are used to black-list, or "deny" which means ACLs are
// white-lists.
// ACLs are used to deny-list, or "deny" which means ACLs are
// allow-lists.
ACLDefaultPolicy string
// ACLDownPolicy controls the behavior of ACLs if the ACLDatacenter
@ -438,7 +437,7 @@ type Config struct {
// AutopilotConfig is used to apply the initial autopilot config when
// bootstrapping.
AutopilotConfig *autopilot.Config
AutopilotConfig *structs.AutopilotConfig
// ServerHealthInterval is the frequency with which the health of the
// servers in the cluster will be updated.
@ -590,7 +589,7 @@ func DefaultConfig() *Config {
// TODO (slackpad) - Until #3744 is done, we need to keep these
// in sync with agent/config/default.go.
AutopilotConfig: &autopilot.Config{
AutopilotConfig: &structs.AutopilotConfig{
CleanupDeadServers: true,
LastContactThreshold: 200 * time.Millisecond,
MaxTrailingLogs: 250,

View File

@ -4,6 +4,8 @@ import (
"fmt"
"time"
"github.com/armon/go-metrics/prometheus"
metrics "github.com/armon/go-metrics"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
@ -12,6 +14,33 @@ import (
"github.com/mitchellh/copystructure"
)
var ConfigSummaries = []prometheus.SummaryDefinition{
{
Name: []string{"config_entry", "apply"},
Help: "",
},
{
Name: []string{"config_entry", "get"},
Help: "",
},
{
Name: []string{"config_entry", "list"},
Help: "",
},
{
Name: []string{"config_entry", "listAll"},
Help: "",
},
{
Name: []string{"config_entry", "delete"},
Help: "",
},
{
Name: []string{"config_entry", "resolve_service_config"},
Help: "",
},
}
// The ConfigEntry endpoint is used to query centralized config information
type ConfigEntry struct {
srv *Server
@ -19,10 +48,6 @@ type ConfigEntry struct {
// Apply does an upsert of the given config entry.
func (c *ConfigEntry) Apply(args *structs.ConfigEntryRequest, reply *bool) error {
return c.applyInternal(args, reply, nil)
}
func (c *ConfigEntry) applyInternal(args *structs.ConfigEntryRequest, reply *bool, normalizeAndValidateFn func(structs.ConfigEntry) error) error {
if err := c.srv.validateEnterpriseRequest(args.Entry.GetEnterpriseMeta(), true); err != nil {
return err
}
@ -47,17 +72,11 @@ func (c *ConfigEntry) applyInternal(args *structs.ConfigEntryRequest, reply *boo
}
// Normalize and validate the incoming config entry as if it came from a user.
if normalizeAndValidateFn == nil {
if err := args.Entry.Normalize(); err != nil {
return err
}
if err := args.Entry.Validate(); err != nil {
return err
}
} else {
if err := normalizeAndValidateFn(args.Entry); err != nil {
return err
}
if err := args.Entry.Normalize(); err != nil {
return err
}
if err := args.Entry.Validate(); err != nil {
return err
}
if authz != nil && !args.Entry.CanWrite(authz) {
@ -454,6 +473,11 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
func (c *ConfigEntry) preflightCheck(kind string) error {
switch kind {
case structs.ServiceIntentions:
// Exit early if Connect hasn't been enabled.
if !c.srv.config.ConnectEnabled {
return ErrConnectNotEnabled
}
usingConfigEntries, err := c.srv.fsm.State().AreIntentionsInConfigEntries()
if err != nil {
return fmt.Errorf("system metadata lookup failed: %v", err)

View File

@ -11,12 +11,8 @@ import (
"github.com/hashicorp/go-hclog"
)
func cmpConfigLess(first structs.ConfigEntry, second structs.ConfigEntry) bool {
return first.GetKind() < second.GetKind() || (first.GetKind() == second.GetKind() && first.GetName() < second.GetName())
}
func configSort(configs []structs.ConfigEntry) {
sort.Slice(configs, func(i, j int) bool {
sort.SliceStable(configs, func(i, j int) bool {
return cmpConfigLess(configs[i], configs[j])
})
}
@ -25,12 +21,14 @@ func diffConfigEntries(local []structs.ConfigEntry, remote []structs.ConfigEntry
configSort(local)
configSort(remote)
var deletions []structs.ConfigEntry
var updates []structs.ConfigEntry
var localIdx int
var remoteIdx int
var (
deletions []structs.ConfigEntry
updates []structs.ConfigEntry
localIdx int
remoteIdx int
)
for localIdx, remoteIdx = 0, 0; localIdx < len(local) && remoteIdx < len(remote); {
if local[localIdx].GetKind() == remote[remoteIdx].GetKind() && local[localIdx].GetName() == remote[remoteIdx].GetName() {
if configSameID(local[localIdx], remote[remoteIdx]) {
// config is in both the local and remote state - need to check raft indices
if remote[remoteIdx].GetRaftIndex().ModifyIndex > lastRemoteIndex {
updates = append(updates, remote[remoteIdx])
@ -64,6 +62,30 @@ func diffConfigEntries(local []structs.ConfigEntry, remote []structs.ConfigEntry
return deletions, updates
}
func cmpConfigLess(first structs.ConfigEntry, second structs.ConfigEntry) bool {
if first.GetKind() < second.GetKind() {
return true
}
if first.GetKind() > second.GetKind() {
return false
}
if first.GetEnterpriseMeta().LessThan(second.GetEnterpriseMeta()) {
return true
}
if second.GetEnterpriseMeta().LessThan(first.GetEnterpriseMeta()) {
return false
}
return first.GetName() < second.GetName()
}
func configSameID(e1, e2 structs.ConfigEntry) bool {
return e1.GetKind() == e2.GetKind() &&
e1.GetEnterpriseMeta().IsSame(e2.GetEnterpriseMeta()) &&
e1.GetName() == e2.GetName()
}
func (s *Server) reconcileLocalConfig(ctx context.Context, configs []structs.ConfigEntry, op structs.ConfigEntryOp) (bool, error) {
ticker := time.NewTicker(time.Second / time.Duration(s.config.ConfigReplicationApplyLimit))
defer ticker.Stop()

View File

@ -4,6 +4,7 @@ import (
"fmt"
"os"
"testing"
"time"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil/retry"
@ -11,6 +12,85 @@ import (
"github.com/stretchr/testify/require"
)
func TestReplication_ConfigSort(t *testing.T) {
newDefaults := func(name, protocol string) *structs.ServiceConfigEntry {
return &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: name,
Protocol: protocol,
}
}
newResolver := func(name string, timeout time.Duration) *structs.ServiceResolverConfigEntry {
return &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: name,
ConnectTimeout: timeout,
}
}
type testcase struct {
configs []structs.ConfigEntry
expect []structs.ConfigEntry
}
cases := map[string]testcase{
"none": {},
"one": {
configs: []structs.ConfigEntry{
newDefaults("web", "grpc"),
},
expect: []structs.ConfigEntry{
newDefaults("web", "grpc"),
},
},
"just kinds": {
configs: []structs.ConfigEntry{
newResolver("web", 33*time.Second),
newDefaults("web", "grpc"),
},
expect: []structs.ConfigEntry{
newDefaults("web", "grpc"),
newResolver("web", 33*time.Second),
},
},
"just names": {
configs: []structs.ConfigEntry{
newDefaults("db", "grpc"),
newDefaults("api", "http2"),
},
expect: []structs.ConfigEntry{
newDefaults("api", "http2"),
newDefaults("db", "grpc"),
},
},
"all": {
configs: []structs.ConfigEntry{
newResolver("web", 33*time.Second),
newDefaults("web", "grpc"),
newDefaults("db", "grpc"),
newDefaults("api", "http2"),
},
expect: []structs.ConfigEntry{
newDefaults("api", "http2"),
newDefaults("db", "grpc"),
newDefaults("web", "grpc"),
newResolver("web", 33*time.Second),
},
},
}
for name, tc := range cases {
tc := tc
t.Run(name, func(t *testing.T) {
configSort(tc.configs)
require.Equal(t, tc.expect, tc.configs)
// and it should be stable
configSort(tc.configs)
require.Equal(t, tc.expect, tc.configs)
})
}
}
func TestReplication_ConfigEntries(t *testing.T) {
t.Parallel()
dir1, s1 := testServerWithConfig(t, func(c *Config) {

View File

@ -67,7 +67,7 @@ func (s *ConnectCA) ConfigurationGet(
if err != nil {
return err
}
if rule != nil && rule.OperatorRead(nil) != acl.Allow {
if rule != nil && rule.OperatorWrite(nil) != acl.Allow {
return acl.ErrPermissionDenied
}

View File

@ -9,8 +9,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/connect"
ca "github.com/hashicorp/consul/agent/connect/ca"
"github.com/hashicorp/consul/agent/structs"
@ -18,6 +17,7 @@ import (
"github.com/hashicorp/consul/testrpc"
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func testParseCert(t *testing.T, pemValue string) *x509.Certificate {
@ -144,6 +144,93 @@ func TestConnectCAConfig_GetSet(t *testing.T) {
}
}
func TestConnectCAConfig_GetSet_ACLDeny(t *testing.T) {
t.Parallel()
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.ACLDatacenter = "dc1"
c.ACLsEnabled = true
c.ACLMasterToken = TestDefaultMasterToken
c.ACLDefaultPolicy = "deny"
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
opReadToken, err := upsertTestTokenWithPolicyRules(
codec, TestDefaultMasterToken, "dc1", `operator = "read"`)
require.NoError(t, err)
opWriteToken, err := upsertTestTokenWithPolicyRules(
codec, TestDefaultMasterToken, "dc1", `operator = "write"`)
require.NoError(t, err)
// Update a config value
newConfig := &structs.CAConfiguration{
Provider: "consul",
Config: map[string]interface{}{
"PrivateKey": `
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49
AwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav
q5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==
-----END EC PRIVATE KEY-----`,
"RootCert": `
-----BEGIN CERTIFICATE-----
MIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ
VGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG
A1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR
AB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7
SkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD
AgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6
NDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6
NWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf
ZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6
ZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw
WQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1
NTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG
SM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA
pY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=
-----END CERTIFICATE-----`,
},
}
args := &structs.CARequest{
Datacenter: "dc1",
Config: newConfig,
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
}
var reply interface{}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
t.Run("deny get with operator:read", func(t *testing.T) {
args := &structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Token: opReadToken.SecretID},
}
var reply structs.CAConfiguration
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)
assert.True(t, acl.IsErrPermissionDenied(err))
})
t.Run("allow get with operator:write", func(t *testing.T) {
args := &structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Token: opWriteToken.SecretID},
}
var reply structs.CAConfiguration
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)
assert.False(t, acl.IsErrPermissionDenied(err))
assert.Equal(t, newConfig.Config, reply.Config)
})
}
// This test case tests that the logic around forcing a rotation without cross
// signing works when requested (and is denied when not requested). This occurs
// if the current CA is not able to cross sign external CA certificates.

View File

@ -5,13 +5,33 @@ import (
"fmt"
"time"
metrics "github.com/armon/go-metrics"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
memdb "github.com/hashicorp/go-memdb"
)
var FederationStateSummaries = []prometheus.SummaryDefinition{
{
Name: []string{"federation_state", "apply"},
Help: "",
},
{
Name: []string{"federation_state", "get"},
Help: "",
},
{
Name: []string{"federation_state", "list"},
Help: "",
},
{
Name: []string{"federation_state", "list_mesh_gateways"},
Help: "",
},
}
var (
errFederationStatesNotEnabled = errors.New("Federation states are currently disabled until all servers in the datacenter support the feature")
)

View File

@ -4,11 +4,102 @@ import (
"fmt"
"time"
metrics "github.com/armon/go-metrics"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
)
var CommandsSummaries = []prometheus.SummaryDefinition{
{
Name: []string{"fsm", "register"},
Help: "Measures the time it takes to apply a catalog register operation to the FSM.",
},
{
Name: []string{"fsm", "deregister"},
Help: "Measures the time it takes to apply a catalog deregister operation to the FSM.",
},
{
Name: []string{"fsm", "kvs"},
Help: "Measures the time it takes to apply the given KV operation to the FSM.",
},
{
Name: []string{"fsm", "session"},
Help: "Measures the time it takes to apply the given session operation to the FSM.",
},
{
Name: []string{"fsm", "acl"},
Help: "Measures the time it takes to apply the given ACL operation to the FSM.",
},
{
Name: []string{"fsm", "tombstone"},
Help: "Measures the time it takes to apply the given tombstone operation to the FSM.",
},
{
Name: []string{"fsm", "coordinate", "batch-update"},
Help: "Measures the time it takes to apply the given batch coordinate update to the FSM.",
},
{
Name: []string{"fsm", "prepared-query"},
Help: "Measures the time it takes to apply the given prepared query update operation to the FSM.",
},
{
Name: []string{"fsm", "txn"},
Help: "Measures the time it takes to apply the given transaction update to the FSM.",
},
{
Name: []string{"fsm", "autopilot"},
Help: "Measures the time it takes to apply the given autopilot update to the FSM.",
},
{
Name: []string{"consul", "fsm", "intention"},
Help: "",
},
{
Name: []string{"fsm", "intention"},
Help: "",
},
{
Name: []string{"consul", "fsm", "ca"},
Help: "",
},
{
Name: []string{"fsm", "ca", "leaf"},
Help: "",
},
{
Name: []string{"fsm", "acl", "token"},
Help: "",
},
{
Name: []string{"fsm", "ca", "leaf"},
Help: "",
},
{
Name: []string{"fsm", "acl", "policy"},
Help: "",
},
{
Name: []string{"fsm", "acl", "bindingrule"},
Help: "",
},
{
Name: []string{"fsm", "acl", "authmethod"},
Help: "",
},
{
Name: []string{"fsm", "system_metadata"},
Help: "",
},
// TODO(kit): We generate the config-entry fsm summaries by reading off of the request. It is
// possible to statically declare these when we know all of the names, but I didn't get to it
// in this patch. Config-entries are known though and we should add these in the future.
// {
// Name: []string{"fsm", "config_entry", req.Entry.GetKind()},
// Help: "",
// },
}
func init() {
registerCommand(structs.RegisterRequestType, (*FSM).applyRegister)
registerCommand(structs.DeregisterRequestType, (*FSM).applyDeregister)
@ -291,6 +382,11 @@ func (c *FSM) applyIntentionOperation(buf []byte, index uint64) interface{} {
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
defer metrics.MeasureSinceWithLabels([]string{"fsm", "intention"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
if req.Mutation != nil {
return c.state.IntentionMutation(index, req.Op, req.Mutation)
}
switch req.Op {
case structs.IntentionOpCreate, structs.IntentionOpUpdate:
//nolint:staticcheck

View File

@ -10,7 +10,6 @@ import (
"github.com/golang/protobuf/proto"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/autopilot"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil"
@ -1163,7 +1162,7 @@ func TestFSM_Autopilot(t *testing.T) {
// Set the autopilot config using a request.
req := structs.AutopilotSetConfigRequest{
Datacenter: "dc1",
Config: autopilot.Config{
Config: structs.AutopilotConfig{
CleanupDeadServers: true,
LastContactThreshold: 10 * time.Second,
MaxTrailingLogs: 300,

View File

@ -6,13 +6,14 @@ import (
"sync"
"time"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-msgpack/codec"
"github.com/hashicorp/go-raftchunking"
"github.com/hashicorp/raft"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/logging"
)
// command is a command method on the FSM.
@ -41,7 +42,9 @@ func registerCommand(msg structs.MessageType, fn unboundCommand) {
// along with Raft to provide strong consistency. We implement
// this outside the Server to avoid exposing this outside the package.
type FSM struct {
logger hclog.Logger
deps Deps
logger hclog.Logger
chunker *raftchunking.ChunkingFSM
// apply is built off the commands global and is used to route apply
// operations to their appropriate handlers.
@ -53,28 +56,40 @@ type FSM struct {
// Raft side, so doesn't need to lock this.
stateLock sync.RWMutex
state *state.Store
gc *state.TombstoneGC
chunker *raftchunking.ChunkingFSM
}
// New is used to construct a new FSM with a blank state.
//
// Deprecated: use NewFromDeps.
func New(gc *state.TombstoneGC, logger hclog.Logger) (*FSM, error) {
if logger == nil {
logger = hclog.New(&hclog.LoggerOptions{})
newStateStore := func() *state.Store {
return state.NewStateStore(gc)
}
return NewFromDeps(Deps{Logger: logger, NewStateStore: newStateStore}), nil
}
stateNew, err := state.NewStateStore(gc)
if err != nil {
return nil, err
// Deps are dependencies used to construct the FSM.
type Deps struct {
// Logger used to emit log messages
Logger hclog.Logger
// NewStateStore returns a state.Store which the FSM will use to make changes
// to the state.
// NewStateStore will be called once when the FSM is created and again any
// time Restore() is called.
NewStateStore func() *state.Store
}
// NewFromDeps creates a new FSM from its dependencies.
func NewFromDeps(deps Deps) *FSM {
if deps.Logger == nil {
deps.Logger = hclog.New(&hclog.LoggerOptions{})
}
fsm := &FSM{
logger: logger.Named(logging.FSM),
deps: deps,
logger: deps.Logger.Named(logging.FSM),
apply: make(map[structs.MessageType]command),
state: stateNew,
gc: gc,
state: deps.NewStateStore(),
}
// Build out the apply dispatch table based on the registered commands.
@ -86,8 +101,7 @@ func New(gc *state.TombstoneGC, logger hclog.Logger) (*FSM, error) {
}
fsm.chunker = raftchunking.NewChunkingFSM(fsm, nil)
return fsm, nil
return fsm
}
func (c *FSM) ChunkingFSM() *raftchunking.ChunkingFSM {
@ -149,11 +163,7 @@ func (c *FSM) Snapshot() (raft.FSMSnapshot, error) {
func (c *FSM) Restore(old io.ReadCloser) error {
defer old.Close()
// Create a new state store.
stateNew, err := state.NewStateStore(c.gc)
if err != nil {
return err
}
stateNew := c.deps.NewStateStore()
// Set up a new restore transaction
restore := stateNew.Restore()

Some files were not shown because too many files have changed in this diff Show More