Merge remote-tracking branch 'origin/main' into serve-panic-recovery
This commit is contained in:
commit
52c8b4994b
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
connect: fix issue with attempting to generate an invalid upstream cluster from UpstreamConfig.Defaults.
|
||||||
|
```
|
|
@ -1,3 +0,0 @@
|
||||||
```release-note:improvement
|
|
||||||
config: Allow ${} style interpolation for UI Dashboard template URLs
|
|
||||||
```
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Adding support of Consul API Gateway as an external source.
|
||||||
|
```
|
|
@ -0,0 +1,9 @@
|
||||||
|
```release-note:improvement
|
||||||
|
ci: Artifact builds will now only run on merges to the release branches or to `main`
|
||||||
|
```
|
||||||
|
```release-note:improvement
|
||||||
|
ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64
|
||||||
|
```
|
||||||
|
```release-note:improvement
|
||||||
|
ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux]
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
config: warn the user if client_addr is empty because client services won't be listening
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
ui: **(Enterprise only)** When no namespace is selected, make sure to default to the tokens default namespace when requesting permissions
|
||||||
|
```
|
|
@ -0,0 +1,6 @@
|
||||||
|
```release-note:bug
|
||||||
|
ui: code editor styling (layout consistency + wide screen support)
|
||||||
|
```
|
||||||
|
```release-note:improvement
|
||||||
|
ui: added copy to clipboard button in code editor toolbars
|
||||||
|
```
|
|
@ -0,0 +1,4 @@
|
||||||
|
```release-note:bug
|
||||||
|
ui: Filter the global intentions list by the currently selected parition rather
|
||||||
|
than a wildcard
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
ui: When switching partitions reset the namespace back to the tokens default namespace or default
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
sdk: Add support for iptable rules that allow DNS lookup redirection to Consul DNS.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
ui: Update UI browser support to 'roughly ~2 years back'
|
||||||
|
```
|
|
@ -0,0 +1,6 @@
|
||||||
|
```release-note:improvement
|
||||||
|
connect/ca: Return an error when querying roots from uninitialized CA.
|
||||||
|
```
|
||||||
|
```release-note:bug
|
||||||
|
connect/ca: Allow secondary initialization to resume after being deferred due to unreachable or incompatible primary DC servers.
|
||||||
|
```
|
|
@ -0,0 +1,4 @@
|
||||||
|
```release-note:bug
|
||||||
|
ui: Revert to depending on the backend, 'post-user-action', to report
|
||||||
|
permissions errors rather than using UI capabilities 'pre-user-action'
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
partitions: Prevent writing partition-exports entries to secondary DCs.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
ui: Add upstream icons for upstreams and upstream instances
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
connect: **(Enterprise only)** Allow ingress gateways to target services in another partition
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:enhancement
|
||||||
|
api: responses that contain only a partial subset of results, due to filtering by ACL policies, may now include an `X-Consul-Results-Filtered-By-ACLs` header
|
||||||
|
```
|
|
@ -0,0 +1,5 @@
|
||||||
|
```release-note:improvement
|
||||||
|
connect: Support Vault auth methods for the Connect CA Vault provider. Currently, we support any non-deprecated auth methods
|
||||||
|
the latest version of Vault supports (v1.8.5), which include AppRole, AliCloud, AWS, Azure, Cloud Foundry, GitHub, Google Cloud,
|
||||||
|
JWT/OIDC, Kerberos, Kubernetes, LDAP, Oracle Cloud Infrastructure, Okta, Radius, TLS Certificates, and Username & Password.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
ui: Update global notification styling
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
ui: Fix inline-code brand styling
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
api: ensure new partition fields are omit empty for compatibility with older versions of consul
|
||||||
|
```
|
|
@ -0,0 +1,6 @@
|
||||||
|
```release-note:bug
|
||||||
|
windows: fixes arm and arm64 builds
|
||||||
|
```
|
||||||
|
```release-note:bug
|
||||||
|
macos: fixes building with a non-Apple LLVM (such as installed via Homebrew)
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
ui: Ensure the UI stores the default partition for the users token
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
ui: Add partition support for SSO
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
agent: **(Enterprise only)** purge service/check registration files for incorrect partitions on reload
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
ui: Include `Service.Namespace` into available variables for `dashboard_url_templates`
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
types: add TLSVersion and TLSCipherSuite
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Include `Service.Partition` into available variables for `dashboard_url_templates`
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Don't offer a 'Valid Datacenters' option when editing policies for non-default partitions
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:deprecation
|
||||||
|
config: `acl.tokens.master` has been renamed to `acl.tokens.initial_management`, and `acl.tokens.agent_master` has been renamed to `acl.tokens.agent_recovery` - the old field names are now deprecated and will be removed in a future major release
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Upgrade Lock Sessions to use partitions
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Add documentation link to Partition empty state
|
||||||
|
```
|
|
@ -0,0 +1,6 @@
|
||||||
|
```release-note:deprecation
|
||||||
|
api: `/v1/agent/token/agent_master` is deprecated and will be removed in a future major release - use `/v1/agent/token/agent_recovery` instead
|
||||||
|
```
|
||||||
|
```release-note:breaking-change
|
||||||
|
cli: `consul acl set-agent-token master` has been replaced with `consul acl set-agent-token recovery`
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
ui: Fix visual issue with slight table header overflow
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
ca: fixes a bug that caused the intermediate cert used to sign leaf certs to be missing from the /connect/ca/roots API response when the Vault provider was used.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
ca: fixes a bug that caused the SigningKeyID to be wrong in the primary DC, when the Vault provider is used, after a CA config creates a new root.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Adds support for partitions to the Routing visualization.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
server: block enterprise-specific partition-exports config entry from being used in OSS Consul.
|
||||||
|
```
|
|
@ -0,0 +1,4 @@
|
||||||
|
```release-note:bug
|
||||||
|
ui: Fixes an issue where under some circumstances after logging we present the
|
||||||
|
data loaded previous to you logging in.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
ca: fixes a bug that caused non blocking leaf cert queries to return the same cached response regardless of ca rotation or leaf cert expiry
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Adds support for partitions to Service and Node Identity template visuals.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
auto-config: ensure the feature works properly with partitions
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Adds basic support for showing Services exported from another partition.
|
||||||
|
```
|
|
@ -0,0 +1,11 @@
|
||||||
|
```release-note:improvement
|
||||||
|
raft: Use bbolt instead of the legacy boltdb implementation
|
||||||
|
```
|
||||||
|
|
||||||
|
```release-note:improvement
|
||||||
|
raft: Emit boltdb related performance metrics
|
||||||
|
```
|
||||||
|
|
||||||
|
```release-note:improvement
|
||||||
|
raft: Added a configuration to disable boltdb freelist syncing
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
agent: add variation of force-leave that exclusively works on the WAN
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
connect: Consul will now generate a unique virtual IP for each connect-enabled service (this will also differ across namespace/partition in Enterprise).
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
dns: Added a `virtual` endpoint for querying the assigned virtual IP for a service.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
partitions: **(Enterprise only)** rename APIs, commands, and public types to use "partition" rather than "admin partition".
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
connect: **(Enterprise only)** add support for cross-partition transparent proxying.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
api: **(Enterprise Only)** rename partition-exports config entry to exported-services.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:note
|
||||||
|
Renamed the `agent_master` field to `agent_recovery` in the `acl-tokens.json` file in which tokens are persisted on-disk (when `acl.enable_token_persistence` is enabled)
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
areas: **(Enterprise only)** make the gRPC server tracker network area aware
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
connect: **(Enterprise only)** add support for targeting partitions in discovery chain routes, splits, and redirects.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
partitions: **(Enterprise only)** segment serf LAN gossip between nodes in different partitions
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:security
|
||||||
|
namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
snapshot: **(Enterprise only)** snapshot agent no longer attempts to refresh its license from the server when a local license is provided (i.e. via config or an environment variable)
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
snapshot: **(Enterprise only)** fixed a bug where the snapshot agent would ignore the `license_path` setting in config files
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
acl: **(Enterprise only)** fix namespace and namespace_prefix policy evaluation when both govern an authz request
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
namespaces: **(Enterprise only)** policy and role defaults can reference policies in any namespace in the same partition by ID
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
partitions: **(Enterprise only)** Ensure partitions and serf-based WAN federation are mutually exclusive.
|
||||||
|
```
|
|
@ -22,7 +22,7 @@ references:
|
||||||
test-results: &TEST_RESULTS_DIR /tmp/test-results
|
test-results: &TEST_RESULTS_DIR /tmp/test-results
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
yarn: &YARN_CACHE_KEY consul-ui-v5-{{ checksum "ui/yarn.lock" }}
|
yarn: &YARN_CACHE_KEY consul-ui-v6-{{ checksum "ui/yarn.lock" }}
|
||||||
rubygem: &RUBYGEM_CACHE_KEY static-site-gems-v1-{{ checksum "Gemfile.lock" }}
|
rubygem: &RUBYGEM_CACHE_KEY static-site-gems-v1-{{ checksum "Gemfile.lock" }}
|
||||||
|
|
||||||
environment: &ENVIRONMENT
|
environment: &ENVIRONMENT
|
||||||
|
@ -113,7 +113,7 @@ jobs:
|
||||||
- image: *GOLANG_IMAGE
|
- image: *GOLANG_IMAGE
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run: go get -u github.com/hashicorp/lint-consul-retry && lint-consul-retry
|
- run: go install github.com/hashicorp/lint-consul-retry@master && lint-consul-retry
|
||||||
- run: *notify-slack-failure
|
- run: *notify-slack-failure
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
|
@ -184,9 +184,8 @@ jobs:
|
||||||
name: Install gogo/protobuf
|
name: Install gogo/protobuf
|
||||||
command: |
|
command: |
|
||||||
gogo_version=$(go list -m github.com/gogo/protobuf | awk '{print $2}')
|
gogo_version=$(go list -m github.com/gogo/protobuf | awk '{print $2}')
|
||||||
mkdir -p .gotools; cd .gotools; go mod init consul-tools
|
go install -v github.com/hashicorp/protoc-gen-go-binary@master
|
||||||
go get -v github.com/hashicorp/protoc-gen-go-binary
|
go install -v github.com/gogo/protobuf/protoc-gen-gofast@${gogo_version}
|
||||||
go get -v github.com/gogo/protobuf/protoc-gen-gofast@${gogo_version}
|
|
||||||
|
|
||||||
- run:
|
- run:
|
||||||
command: make --always-make proto
|
command: make --always-make proto
|
||||||
|
|
|
@ -3,9 +3,9 @@ contact_links:
|
||||||
- name: Consul Community Support
|
- name: Consul Community Support
|
||||||
url: https://discuss.hashicorp.com/c/consul/29
|
url: https://discuss.hashicorp.com/c/consul/29
|
||||||
about: If you have a question, or are looking for advice, please post on our Discuss forum! The community loves to chime in to help. Happy Coding!
|
about: If you have a question, or are looking for advice, please post on our Discuss forum! The community loves to chime in to help. Happy Coding!
|
||||||
- name: Consul-Helm GitHub Issues
|
- name: Consul on Kubernetes GitHub Issues
|
||||||
url: https://github.com/hashicorp/consul-helm
|
url: https://github.com/hashicorp/consul-k8s
|
||||||
about: Are you submitting an issue or feature enhancement for the Consul helm chart? Please post in the Consul-Helm GitHub Issues.
|
about: Are you submitting an issue or feature enhancement for the Consul Helm chart? Please post in the consul-k8s GitHub Issues.
|
||||||
- name: Consul Learn Tracks
|
- name: Consul Learn Tracks
|
||||||
url: https://learn.hashicorp.com/consul?track=getting-started#getting-started
|
url: https://learn.hashicorp.com/consul?track=getting-started#getting-started
|
||||||
about: Please check out our Learn Guides. These hands on guides deal with many of the tasks common to using Consul
|
about: Please check out our Learn Guides. These hands on guides deal with many of the tasks common to using Consul
|
||||||
|
|
|
@ -3,30 +3,55 @@ updates:
|
||||||
- package-ecosystem: gomod
|
- package-ecosystem: gomod
|
||||||
open-pull-requests-limit: 5
|
open-pull-requests-limit: 5
|
||||||
directory: "/"
|
directory: "/"
|
||||||
|
labels:
|
||||||
|
- "go"
|
||||||
|
- "dependencies"
|
||||||
|
- "pr/no-changelog"
|
||||||
schedule:
|
schedule:
|
||||||
interval: daily
|
interval: daily
|
||||||
- package-ecosystem: gomod
|
- package-ecosystem: gomod
|
||||||
open-pull-requests-limit: 5
|
open-pull-requests-limit: 5
|
||||||
directory: "/api"
|
directory: "/api"
|
||||||
|
labels:
|
||||||
|
- "go"
|
||||||
|
- "dependencies"
|
||||||
|
- "pr/no-changelog"
|
||||||
schedule:
|
schedule:
|
||||||
interval: daily
|
interval: daily
|
||||||
- package-ecosystem: gomod
|
- package-ecosystem: gomod
|
||||||
open-pull-requests-limit: 5
|
open-pull-requests-limit: 5
|
||||||
directory: "/sdk"
|
directory: "/sdk"
|
||||||
|
labels:
|
||||||
|
- "go"
|
||||||
|
- "dependencies"
|
||||||
|
- "pr/no-changelog"
|
||||||
schedule:
|
schedule:
|
||||||
interval: daily
|
interval: daily
|
||||||
- package-ecosystem: npm
|
- package-ecosystem: npm
|
||||||
open-pull-requests-limit: 5
|
open-pull-requests-limit: 5
|
||||||
directory: "/ui"
|
directory: "/ui"
|
||||||
|
labels:
|
||||||
|
- "javascript"
|
||||||
|
- "dependencies"
|
||||||
|
- "pr/no-changelog"
|
||||||
schedule:
|
schedule:
|
||||||
interval: daily
|
interval: daily
|
||||||
- package-ecosystem: npm
|
- package-ecosystem: npm
|
||||||
open-pull-requests-limit: 5
|
open-pull-requests-limit: 5
|
||||||
directory: "/website"
|
directory: "/website"
|
||||||
|
labels:
|
||||||
|
- "javascript"
|
||||||
|
- "dependencies"
|
||||||
|
- "type/docs-cherrypick"
|
||||||
|
- "pr/no-changelog"
|
||||||
schedule:
|
schedule:
|
||||||
interval: daily
|
interval: daily
|
||||||
- package-ecosystem: github-actions
|
- package-ecosystem: github-actions
|
||||||
open-pull-requests-limit: 5
|
open-pull-requests-limit: 5
|
||||||
directory: /
|
directory: /
|
||||||
|
labels:
|
||||||
|
- "github_actions"
|
||||||
|
- "dependencies"
|
||||||
|
- "pr/no-changelog"
|
||||||
schedule:
|
schedule:
|
||||||
interval: daily
|
interval: daily
|
||||||
|
|
|
@ -3,7 +3,7 @@ name: build
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
# Sequence of patterns matched against refs/heads
|
# Sequence of patterns matched against refs/heads
|
||||||
branches: [ crt-release-migration-1.11.x ]
|
branches: [ main ]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
PKG_NAME: consul
|
PKG_NAME: consul
|
||||||
|
|
|
@ -97,8 +97,21 @@ event "sign" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
event "verify" {
|
event "sign-linux-rpms" {
|
||||||
depends = ["sign"]
|
depends = ["sign"]
|
||||||
|
action "sign-linux-rpms" {
|
||||||
|
organization = "hashicorp"
|
||||||
|
repository = "crt-workflows-common"
|
||||||
|
workflow = "sign-linux-rpms"
|
||||||
|
}
|
||||||
|
|
||||||
|
notification {
|
||||||
|
on = "fail"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
event "verify" {
|
||||||
|
depends = ["sign-linux-rpms"]
|
||||||
action "verify" {
|
action "verify" {
|
||||||
organization = "hashicorp"
|
organization = "hashicorp"
|
||||||
repository = "crt-workflows-common"
|
repository = "crt-workflows-common"
|
||||||
|
|
217
CHANGELOG.md
217
CHANGELOG.md
|
@ -1,7 +1,141 @@
|
||||||
## UNRELEASED
|
## 1.11.0-beta3 (November 17, 2021)
|
||||||
|
|
||||||
|
SECURITY:
|
||||||
|
|
||||||
|
* agent: Use SHA256 instead of MD5 to generate persistence file names. [[GH-11491](https://github.com/hashicorp/consul/issues/11491)]
|
||||||
|
* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805.
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
|
||||||
|
* ca: Add a configurable TTL for Connect CA root certificates. The configuration is supported by the Vault and Consul providers. [[GH-11428](https://github.com/hashicorp/consul/issues/11428)]
|
||||||
|
* ca: Add a configurable TTL to the AWS ACM Private CA provider root certificate. [[GH-11449](https://github.com/hashicorp/consul/issues/11449)]
|
||||||
|
* health-checks: add support for h2c in http2 ping health checks [[GH-10690](https://github.com/hashicorp/consul/issues/10690)]
|
||||||
|
* partitions: **(Enterprise only)** segment serf LAN gossip between nodes in different partitions
|
||||||
|
* ui: Adding support of Consul API Gateway as an external source. [[GH-11371](https://github.com/hashicorp/consul/issues/11371)]
|
||||||
|
* ui: Topology - New views for scenarios where no dependencies exist or ACLs are disabled [[GH-11280](https://github.com/hashicorp/consul/issues/11280)]
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* ci: Artifact builds will now only run on merges to the release branches or to `main` [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64 [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux] [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* config: warn the user if client_addr is empty because client services won't be listening [[GH-11461](https://github.com/hashicorp/consul/issues/11461)]
|
||||||
|
* connect/ca: Return an error when querying roots from uninitialized CA. [[GH-11514](https://github.com/hashicorp/consul/issues/11514)]
|
||||||
|
* connect: **(Enterprise only)** Allow ingress gateways to target services in another partition [[GH-11566](https://github.com/hashicorp/consul/issues/11566)]
|
||||||
|
* connect: add Namespace configuration setting for Vault CA provider [[GH-11477](https://github.com/hashicorp/consul/issues/11477)]
|
||||||
|
* namespaces: **(Enterprise only)** policy and role defaults can reference policies in any namespace in the same partition by ID
|
||||||
|
* partitions: Prevent writing partition-exports entries to secondary DCs. [[GH-11541](https://github.com/hashicorp/consul/issues/11541)]
|
||||||
|
* sdk: Add support for iptable rules that allow DNS lookup redirection to Consul DNS. [[GH-11480](https://github.com/hashicorp/consul/issues/11480)]
|
||||||
|
* segments: **(Enterprise only)** ensure that the serf_lan_allowed_cidrs applies to network segments [[GH-11495](https://github.com/hashicorp/consul/issues/11495)]
|
||||||
|
* ui: Add upstream icons for upstreams and upstream instances [[GH-11556](https://github.com/hashicorp/consul/issues/11556)]
|
||||||
|
* ui: Update UI browser support to 'roughly ~2 years back' [[GH-11505](https://github.com/hashicorp/consul/issues/11505)]
|
||||||
|
* ui: When switching partitions reset the namespace back to the tokens default namespace or default [[GH-11479](https://github.com/hashicorp/consul/issues/11479)]
|
||||||
|
* ui: added copy to clipboard button in code editor toolbars [[GH-11474](https://github.com/hashicorp/consul/issues/11474)]
|
||||||
|
|
||||||
BUG FIXES:
|
BUG FIXES:
|
||||||
|
|
||||||
|
* acl: **(Enterprise only)** fix namespace and namespace_prefix policy evaluation when both govern an authz request
|
||||||
|
* api: ensure new partition fields are omit empty for compatibility with older versions of consul [[GH-11585](https://github.com/hashicorp/consul/issues/11585)]
|
||||||
|
* connect/ca: Allow secondary initialization to resume after being deferred due to unreachable or incompatible primary DC servers. [[GH-11514](https://github.com/hashicorp/consul/issues/11514)]
|
||||||
|
* connect: fix issue with attempting to generate an invalid upstream cluster from UpstreamConfig.Defaults. [[GH-11245](https://github.com/hashicorp/consul/issues/11245)]
|
||||||
|
* macos: fixes building with a non-Apple LLVM (such as installed via Homebrew) [[GH-11586](https://github.com/hashicorp/consul/issues/11586)]
|
||||||
|
* namespaces: **(Enterprise only)** ensure the namespace replicator doesn't replicate deleted namespaces
|
||||||
|
* partitions: **(Enterprise only)** fix panic when forwarding delete operations to the leader
|
||||||
|
* snapshot: **(Enterprise only)** fixed a bug where the snapshot agent would ignore the `license_path` setting in config files
|
||||||
|
* snapshot: **(Enterprise only)** snapshot agent no longer attempts to refresh its license from the server when a local license is provided (i.e. via config or an environment variable)
|
||||||
|
* state: **(Enterprise Only)** ensure partition delete triggers namespace deletes
|
||||||
|
* ui: **(Enterprise only)** When no namespace is selected, make sure to default to the tokens default namespace when requesting permissions [[GH-11472](https://github.com/hashicorp/consul/issues/11472)]
|
||||||
|
* ui: Ensure the UI stores the default partition for the users token [[GH-11591](https://github.com/hashicorp/consul/issues/11591)]
|
||||||
|
* ui: Ensure we check intention permissions for specific services when deciding
|
||||||
|
whether to show action buttons for per service intention actions [[GH-11409](https://github.com/hashicorp/consul/issues/11409)]
|
||||||
|
* ui: Filter the global intentions list by the currently selected parition rather
|
||||||
|
than a wildcard [[GH-11475](https://github.com/hashicorp/consul/issues/11475)]
|
||||||
|
* ui: Revert to depending on the backend, 'post-user-action', to report
|
||||||
|
permissions errors rather than using UI capabilities 'pre-user-action' [[GH-11520](https://github.com/hashicorp/consul/issues/11520)]
|
||||||
|
* ui: code editor styling (layout consistency + wide screen support) [[GH-11474](https://github.com/hashicorp/consul/issues/11474)]
|
||||||
|
* windows: fixes arm and arm64 builds [[GH-11586](https://github.com/hashicorp/consul/issues/11586)]
|
||||||
|
* xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection [[GH-11522](https://github.com/hashicorp/consul/issues/11522)]
|
||||||
|
|
||||||
|
## 1.11.0-beta2 (November 02, 2021)
|
||||||
|
|
||||||
|
BREAKING CHANGES:
|
||||||
|
|
||||||
|
* acl: The legacy ACL system that was deprecated in Consul 1.4.0 has been removed. Before upgrading you should verify that nothing is still using the legacy ACL system. See the [Migrate Legacy ACL Tokens Learn Guide](https://learn.hashicorp.com/tutorials/consul/access-control-token-migration) for more information. [[GH-11232](https://github.com/hashicorp/consul/issues/11232)]
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* agent: for various /v1/agent endpoints parse the partition parameter on the request [[GH-11444](https://github.com/hashicorp/consul/issues/11444)]
|
||||||
|
* agent: refactor the agent delegate interface to be partition friendly [[GH-11429](https://github.com/hashicorp/consul/issues/11429)]
|
||||||
|
* cli: Add `-cas` and `-modify-index` flags to the `consul config delete` command to support Check-And-Set (CAS) deletion of config entries [[GH-11419](https://github.com/hashicorp/consul/issues/11419)]
|
||||||
|
* cli: update consul members output to display partitions and sort the results usefully [[GH-11446](https://github.com/hashicorp/consul/issues/11446)]
|
||||||
|
* config: Allow ${} style interpolation for UI Dashboard template URLs [[GH-11328](https://github.com/hashicorp/consul/issues/11328)]
|
||||||
|
* config: Support Check-And-Set (CAS) deletion of config entries [[GH-11419](https://github.com/hashicorp/consul/issues/11419)]
|
||||||
|
* connect: **(Enterprise only)** add support for dialing upstreams in remote partitions through mesh gateways. [[GH-11431](https://github.com/hashicorp/consul/issues/11431)]
|
||||||
|
* connect: **(Enterprise only)** updates ServiceRead and NodeRead to account for the partition-exports config entry. [[GH-11433](https://github.com/hashicorp/consul/issues/11433)]
|
||||||
|
* connect: ingress gateways may now enable built-in TLS for a subset of listeners. [[GH-11163](https://github.com/hashicorp/consul/issues/11163)]
|
||||||
|
* connect: service-resolver subset filters are validated for valid go-bexpr syntax on write [[GH-11293](https://github.com/hashicorp/consul/issues/11293)]
|
||||||
|
* connect: update supported envoy versions to 1.20.0, 1.19.1, 1.18.4, 1.17.4 [[GH-11277](https://github.com/hashicorp/consul/issues/11277)]
|
||||||
|
|
||||||
|
DEPRECATIONS:
|
||||||
|
|
||||||
|
* tls: With the upgrade to Go 1.17, the ordering of `tls_cipher_suites` will no longer be honored, and `tls_prefer_server_cipher_suites` is now ignored. [[GH-11364](https://github.com/hashicorp/consul/issues/11364)]
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* api: fixed backwards compatibility issue with AgentService SocketPath field. [[GH-11318](https://github.com/hashicorp/consul/issues/11318)]
|
||||||
|
* dns: Fixed an issue where on DNS requests made with .alt_domain response was returned as .domain [[GH-11348](https://github.com/hashicorp/consul/issues/11348)]
|
||||||
|
* raft: do not trigger an election if not part of the servers list. [[GH-11375](https://github.com/hashicorp/consul/issues/11375)]
|
||||||
|
* rpc: only attempt to authorize the DNSName in the client cert when verify_incoming_rpc=true [[GH-11255](https://github.com/hashicorp/consul/issues/11255)]
|
||||||
|
* telemetry: fixes a bug with Prometheus consul_autopilot_failure_tolerance metric where 0 is reported instead of NaN on follower servers. [[GH-11399](https://github.com/hashicorp/consul/issues/11399)]
|
||||||
|
* ui: Ensure dc selector correctly shows the currently selected dc [[GH-11380](https://github.com/hashicorp/consul/issues/11380)]
|
||||||
|
* ui: Ensure we filter tokens by policy when showing which tokens use a certain
|
||||||
|
policy whilst editing a policy [[GH-11311](https://github.com/hashicorp/consul/issues/11311)]
|
||||||
|
|
||||||
|
## 1.11.0-beta1 (October 15, 2021)
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
|
||||||
|
* partitions: allow for partition queries to be forwarded [[GH-11099](https://github.com/hashicorp/consul/issues/11099)]
|
||||||
|
* sso/oidc: **(Enterprise only)** Add support for providing acr_values in OIDC auth flow [[GH-11026](https://github.com/hashicorp/consul/issues/11026)]
|
||||||
|
* ui: Added initial support for admin partition CRUD [[GH-11188](https://github.com/hashicorp/consul/issues/11188)]
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* api: add partition field to acl structs [[GH-11080](https://github.com/hashicorp/consul/issues/11080)]
|
||||||
|
* audit-logging: **(Enterprise Only)** Audit logs will now include select HTTP headers in each logs payload. Those headers are: `Forwarded`, `Via`, `X-Forwarded-For`, `X-Forwarded-Host` and `X-Forwarded-Proto`. [[GH-11107](https://github.com/hashicorp/consul/issues/11107)]
|
||||||
|
* connect: Add low-level feature to allow an Ingress to retrieve TLS certificates from SDS. [[GH-10903](https://github.com/hashicorp/consul/issues/10903)]
|
||||||
|
* connect: update supported envoy versions to 1.19.1, 1.18.4, 1.17.4, 1.16.5 [[GH-11115](https://github.com/hashicorp/consul/issues/11115)]
|
||||||
|
* state: reads of partitions now accept an optional memdb.WatchSet
|
||||||
|
* telemetry: Add new metrics for the count of KV entries in the Consul store. [[GH-11090](https://github.com/hashicorp/consul/issues/11090)]
|
||||||
|
* telemetry: Add new metrics for the count of connect service instances and configuration entries. [[GH-11222](https://github.com/hashicorp/consul/issues/11222)]
|
||||||
|
* ui: Add initial support for partitions to intentions [[GH-11129](https://github.com/hashicorp/consul/issues/11129)]
|
||||||
|
* ui: Add uri guard to prevent future URL encoding issues [[GH-11117](https://github.com/hashicorp/consul/issues/11117)]
|
||||||
|
* ui: Move the majority of our SASS variables to use native CSS custom
|
||||||
|
properties [[GH-11200](https://github.com/hashicorp/consul/issues/11200)]
|
||||||
|
* ui: Removed informational panel from the namespace selector menu when editing
|
||||||
|
namespaces [[GH-11130](https://github.com/hashicorp/consul/issues/11130)]
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* acl: **(Enterprise only)** Fix bug in 'consul members' filtering with partitions. [[GH-11263](https://github.com/hashicorp/consul/issues/11263)]
|
||||||
|
* acl: **(Enterprise only)** ensure that auth methods with namespace rules work with partitions [[GH-11323](https://github.com/hashicorp/consul/issues/11323)]
|
||||||
|
* acl: fixes the fallback behaviour of down_policy with setting extend-cache/async-cache when the token is not cached. [[GH-11136](https://github.com/hashicorp/consul/issues/11136)]
|
||||||
|
* connect: Fix upstream listener escape hatch for prepared queries [[GH-11109](https://github.com/hashicorp/consul/issues/11109)]
|
||||||
|
* grpc: strip local ACL tokens from RPCs during forwarding if crossing datacenters [[GH-11099](https://github.com/hashicorp/consul/issues/11099)]
|
||||||
|
* server: **(Enterprise only)** Ensure that servers leave network segments when leaving other gossip pools
|
||||||
|
* telemetry: Consul Clients no longer emit Autopilot metrics. [[GH-11241](https://github.com/hashicorp/consul/issues/11241)]
|
||||||
|
* telemetry: fixes a bug with Prometheus consul_autopilot_healthy metric where 0 is reported instead of NaN on servers. [[GH-11231](https://github.com/hashicorp/consul/issues/11231)]
|
||||||
|
* ui: **(Enterprise Only)** Fix saving intentions with namespaced source/destination [[GH-11095](https://github.com/hashicorp/consul/issues/11095)]
|
||||||
|
* ui: Don't show a CRD warning for read-only intentions [[GH-11149](https://github.com/hashicorp/consul/issues/11149)]
|
||||||
|
* ui: Ensure all types of data get reconciled with the backend data [[GH-11237](https://github.com/hashicorp/consul/issues/11237)]
|
||||||
|
* ui: Fixed styling of Role remove dialog on the Token edit page [[GH-11298](https://github.com/hashicorp/consul/issues/11298)]
|
||||||
|
* ui: Gracefully recover from non-existant DC errors [[GH-11077](https://github.com/hashicorp/consul/issues/11077)]
|
||||||
|
* ui: Ignore reported permissions for KV area meaning the KV is always enabled
|
||||||
|
for both read/write access if the HTTP API allows. [[GH-10916](https://github.com/hashicorp/consul/issues/10916)]
|
||||||
|
* ui: Topology - Fix up Default Allow and Permissive Intentions notices [[GH-11216](https://github.com/hashicorp/consul/issues/11216)]
|
||||||
|
* ui: hide create button for policies/roles/namespace if users token has no write permissions to those areas [[GH-10914](https://github.com/hashicorp/consul/issues/10914)]
|
||||||
|
* xds: ensure the active streams counters are 64 bit aligned on 32 bit systems [[GH-11085](https://github.com/hashicorp/consul/issues/11085)]
|
||||||
|
* xds: fixed a bug where Envoy sidecars could enter a state where they failed to receive xds updates from Consul [[GH-10987](https://github.com/hashicorp/consul/issues/10987)]
|
||||||
* Fixing SOA record to return proper domain when alt domain in use. [[GH-10431]](https://github.com/hashicorp/consul/pull/10431)
|
* Fixing SOA record to return proper domain when alt domain in use. [[GH-10431]](https://github.com/hashicorp/consul/pull/10431)
|
||||||
|
|
||||||
## 1.11.0-alpha (September 16, 2021)
|
## 1.11.0-alpha (September 16, 2021)
|
||||||
|
@ -51,6 +185,44 @@ manage licenses on older servers [[GH-10952](https://github.com/hashicorp/consul
|
||||||
* use the MaxQueryTime instead of RPCHoldTimeout for blocking RPC queries
|
* use the MaxQueryTime instead of RPCHoldTimeout for blocking RPC queries
|
||||||
[[GH-8978](https://github.com/hashicorp/consul/pull/8978)]. [[GH-10299](https://github.com/hashicorp/consul/issues/10299)]
|
[[GH-8978](https://github.com/hashicorp/consul/pull/8978)]. [[GH-10299](https://github.com/hashicorp/consul/issues/10299)]
|
||||||
|
|
||||||
|
## 1.10.4 (November 11, 2021)
|
||||||
|
|
||||||
|
SECURITY:
|
||||||
|
|
||||||
|
* agent: Use SHA256 instead of MD5 to generate persistence file names. [[GH-11491](https://github.com/hashicorp/consul/issues/11491)]
|
||||||
|
* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805.
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* ci: Artifact builds will now only run on merges to the release branches or to `main` [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64 [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux] [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* connect/ca: Return an error when querying roots from uninitialized CA. [[GH-11514](https://github.com/hashicorp/consul/issues/11514)]
|
||||||
|
* telemetry: Add new metrics for the count of connect service instances and configuration entries. [[GH-11222](https://github.com/hashicorp/consul/issues/11222)]
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* acl: fixes the fallback behaviour of down_policy with setting extend-cache/async-cache when the token is not cached. [[GH-11136](https://github.com/hashicorp/consul/issues/11136)]
|
||||||
|
* api: fixed backwards compatibility issue with AgentService SocketPath field. [[GH-11318](https://github.com/hashicorp/consul/issues/11318)]
|
||||||
|
* connect/ca: Allow secondary initialization to resume after being deferred due to unreachable or incompatible primary DC servers. [[GH-11514](https://github.com/hashicorp/consul/issues/11514)]
|
||||||
|
* connect: fix issue with attempting to generate an invalid upstream cluster from UpstreamConfig.Defaults. [[GH-11245](https://github.com/hashicorp/consul/issues/11245)]
|
||||||
|
* raft: do not trigger an election if not part of the servers list. [[GH-11375](https://github.com/hashicorp/consul/issues/11375)]
|
||||||
|
* rpc: only attempt to authorize the DNSName in the client cert when verify_incoming_rpc=true [[GH-11255](https://github.com/hashicorp/consul/issues/11255)]
|
||||||
|
* server: **(Enterprise only)** Ensure that servers leave network segments when leaving other gossip pools
|
||||||
|
* snapshot: **(Enterprise only)** snapshot agent no longer attempts to refresh its license from the server when a local license is provided (i.e. via config or an environment variable)
|
||||||
|
* telemetry: Consul Clients no longer emit Autopilot metrics. [[GH-11241](https://github.com/hashicorp/consul/issues/11241)]
|
||||||
|
* telemetry: fixes a bug with Prometheus consul_autopilot_failure_tolerance metric where 0 is reported instead of NaN on follower servers. [[GH-11399](https://github.com/hashicorp/consul/issues/11399)]
|
||||||
|
* telemetry: fixes a bug with Prometheus consul_autopilot_healthy metric where 0 is reported instead of NaN on servers. [[GH-11231](https://github.com/hashicorp/consul/issues/11231)]
|
||||||
|
* ui: **(Enterprise only)** When no namespace is selected, make sure to default to the tokens default namespace when requesting permissions [[GH-11472](https://github.com/hashicorp/consul/issues/11472)]
|
||||||
|
* ui: Ensure we check intention permissions for specific services when deciding
|
||||||
|
whether to show action buttons for per service intention actions [[GH-11270](https://github.com/hashicorp/consul/issues/11270)]
|
||||||
|
* ui: Fixed styling of Role remove dialog on the Token edit page [[GH-11298](https://github.com/hashicorp/consul/issues/11298)]
|
||||||
|
* xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection [[GH-11522](https://github.com/hashicorp/consul/issues/11522)]
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* Fixing SOA record to return proper domain when alt domain in use. [[GH-10431]](https://github.com/hashicorp/consul/pull/10431)
|
||||||
|
|
||||||
## 1.10.3 (September 27, 2021)
|
## 1.10.3 (September 27, 2021)
|
||||||
|
|
||||||
FEATURES:
|
FEATURES:
|
||||||
|
@ -281,6 +453,28 @@ NOTES:
|
||||||
|
|
||||||
* legal: **(Enterprise only)** Enterprise binary downloads will now include a copy of the EULA and Terms of Evaluation in the zip archive
|
* legal: **(Enterprise only)** Enterprise binary downloads will now include a copy of the EULA and Terms of Evaluation in the zip archive
|
||||||
|
|
||||||
|
## 1.9.11 (November 11, 2021)
|
||||||
|
|
||||||
|
SECURITY:
|
||||||
|
|
||||||
|
* agent: Use SHA256 instead of MD5 to generate persistence file names. [[GH-11491](https://github.com/hashicorp/consul/issues/11491)]
|
||||||
|
* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805.
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* ci: Artifact builds will now only run on merges to the release branches or to `main` [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64 [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux] [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* telemetry: Add new metrics for the count of connect service instances and configuration entries. [[GH-11222](https://github.com/hashicorp/consul/issues/11222)]
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* acl: fixes the fallback behaviour of down_policy with setting extend-cache/async-cache when the token is not cached. [[GH-11136](https://github.com/hashicorp/consul/issues/11136)]
|
||||||
|
* rpc: only attempt to authorize the DNSName in the client cert when verify_incoming_rpc=true [[GH-11255](https://github.com/hashicorp/consul/issues/11255)]
|
||||||
|
* server: **(Enterprise only)** Ensure that servers leave network segments when leaving other gossip pools
|
||||||
|
* ui: Fixed styling of Role remove dialog on the Token edit page [[GH-11298](https://github.com/hashicorp/consul/issues/11298)]
|
||||||
|
* xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection [[GH-11522](https://github.com/hashicorp/consul/issues/11522)]
|
||||||
|
|
||||||
## 1.9.10 (September 27, 2021)
|
## 1.9.10 (September 27, 2021)
|
||||||
|
|
||||||
FEATURES:
|
FEATURES:
|
||||||
|
@ -639,6 +833,27 @@ BUG FIXES:
|
||||||
* telemetry: fixed a bug that caused logs to be flooded with `[WARN] agent.router: Non-server in server-only area` [[GH-8685](https://github.com/hashicorp/consul/issues/8685)]
|
* telemetry: fixed a bug that caused logs to be flooded with `[WARN] agent.router: Non-server in server-only area` [[GH-8685](https://github.com/hashicorp/consul/issues/8685)]
|
||||||
* ui: show correct datacenter for gateways [[GH-8704](https://github.com/hashicorp/consul/issues/8704)]
|
* ui: show correct datacenter for gateways [[GH-8704](https://github.com/hashicorp/consul/issues/8704)]
|
||||||
|
|
||||||
|
## 1.8.17 (November 11, 2021)
|
||||||
|
|
||||||
|
SECURITY:
|
||||||
|
|
||||||
|
* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805.
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* ci: Artifact builds will now only run on merges to the release branches or to `main` [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64 [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
* ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux] [[GH-11417](https://github.com/hashicorp/consul/issues/11417)]
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* acl: fixes the fallback behaviour of down_policy with setting extend-cache/async-cache when the token is not cached. [[GH-11136](https://github.com/hashicorp/consul/issues/11136)]
|
||||||
|
* raft: Consul leaders will attempt to transfer leadership to another server as part of gracefully leaving the cluster. [[GH-11242](https://github.com/hashicorp/consul/issues/11242)]
|
||||||
|
* rpc: only attempt to authorize the DNSName in the client cert when verify_incoming_rpc=true [[GH-11255](https://github.com/hashicorp/consul/issues/11255)]
|
||||||
|
* server: **(Enterprise only)** Ensure that servers leave network segments when leaving other gossip pools
|
||||||
|
* ui: Fixed styling of Role delete confirmation button with the Token edit page [[GH-11297](https://github.com/hashicorp/consul/issues/11297)]
|
||||||
|
* xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection [[GH-11522](https://github.com/hashicorp/consul/issues/11522)]
|
||||||
|
|
||||||
## 1.8.16 (September 27, 2021)
|
## 1.8.16 (September 27, 2021)
|
||||||
|
|
||||||
FEATURES:
|
FEATURES:
|
||||||
|
|
|
@ -24,6 +24,7 @@ LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
|
||||||
org.opencontainers.image.title="consul" \
|
org.opencontainers.image.title="consul" \
|
||||||
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
|
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
|
||||||
|
|
||||||
|
RUN apk add --no-cache dumb-init
|
||||||
# Create a consul user and group first so the IDs get set the same way, even as
|
# Create a consul user and group first so the IDs get set the same way, even as
|
||||||
# the rest of this may change over time.
|
# the rest of this may change over time.
|
||||||
RUN addgroup $BIN_NAME && \
|
RUN addgroup $BIN_NAME && \
|
||||||
|
@ -54,7 +55,9 @@ EXPOSE 8500 8600 8600/udp
|
||||||
# Consul doesn't need root privileges so we run it as the consul user from the
|
# Consul doesn't need root privileges so we run it as the consul user from the
|
||||||
# entry point script. The entry point script also uses dumb-init as the top-level
|
# entry point script. The entry point script also uses dumb-init as the top-level
|
||||||
# process to reap any zombie processes created by Consul sub-processes.
|
# process to reap any zombie processes created by Consul sub-processes.
|
||||||
|
|
||||||
COPY .release/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
COPY .release/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||||
|
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||||
ENTRYPOINT ["docker-entrypoint.sh"]
|
ENTRYPOINT ["docker-entrypoint.sh"]
|
||||||
|
|
||||||
# By default you'll get an insecure single-node development server that stores
|
# By default you'll get an insecure single-node development server that stores
|
||||||
|
|
19
GNUmakefile
19
GNUmakefile
|
@ -3,12 +3,13 @@ GOGOVERSION?=$(shell grep github.com/gogo/protobuf go.mod | awk '{print $$2}')
|
||||||
GOTOOLS = \
|
GOTOOLS = \
|
||||||
github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \
|
github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \
|
||||||
github.com/hashicorp/go-bindata/go-bindata@master \
|
github.com/hashicorp/go-bindata/go-bindata@master \
|
||||||
golang.org/x/tools/cmd/cover \
|
golang.org/x/tools/cmd/cover@master \
|
||||||
golang.org/x/tools/cmd/stringer \
|
golang.org/x/tools/cmd/stringer@master \
|
||||||
github.com/gogo/protobuf/protoc-gen-gofast@$(GOGOVERSION) \
|
github.com/gogo/protobuf/protoc-gen-gofast@$(GOGOVERSION) \
|
||||||
github.com/hashicorp/protoc-gen-go-binary \
|
github.com/hashicorp/protoc-gen-go-binary@master \
|
||||||
github.com/vektra/mockery/cmd/mockery \
|
github.com/vektra/mockery/cmd/mockery@master \
|
||||||
github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1
|
github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 \
|
||||||
|
github.com/hashicorp/lint-consul-retry@master
|
||||||
|
|
||||||
GOTAGS ?=
|
GOTAGS ?=
|
||||||
GOOS?=$(shell go env GOOS)
|
GOOS?=$(shell go env GOOS)
|
||||||
|
@ -283,12 +284,10 @@ static-assets:
|
||||||
ui: ui-docker static-assets-docker
|
ui: ui-docker static-assets-docker
|
||||||
|
|
||||||
tools:
|
tools:
|
||||||
@mkdir -p .gotools
|
@if [[ -d .gotools ]]; then rm -rf .gotools ; fi
|
||||||
@cd .gotools && for TOOL in $(GOTOOLS); do \
|
@for TOOL in $(GOTOOLS); do \
|
||||||
echo "=== TOOL: $$TOOL" ; \
|
echo "=== TOOL: $$TOOL" ; \
|
||||||
rm -f go.mod go.sum ; \
|
go install -v $$TOOL ; \
|
||||||
go mod init consul-tools ; \
|
|
||||||
go get -v $$TOOL ; \
|
|
||||||
done
|
done
|
||||||
|
|
||||||
version:
|
version:
|
||||||
|
|
|
@ -16,10 +16,10 @@ type Config struct {
|
||||||
|
|
||||||
type ExportFetcher interface {
|
type ExportFetcher interface {
|
||||||
// ExportsForPartition returns the config entry defining exports for a partition
|
// ExportsForPartition returns the config entry defining exports for a partition
|
||||||
ExportsForPartition(partition string) PartitionExports
|
ExportsForPartition(partition string) ExportedServices
|
||||||
}
|
}
|
||||||
|
|
||||||
type PartitionExports struct {
|
type ExportedServices struct {
|
||||||
Data map[string]map[string][]string
|
Data map[string]map[string][]string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package acl
|
package acl
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package acl
|
package acl
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package acl
|
package acl
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package acl
|
package acl
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package acl
|
package acl
|
||||||
|
|
20
agent/acl.go
20
agent/acl.go
|
@ -7,6 +7,8 @@ import (
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// aclAccessorID is used to convert an ACLToken's secretID to its accessorID for non-
|
// aclAccessorID is used to convert an ACLToken's secretID to its accessorID for non-
|
||||||
|
@ -167,24 +169,24 @@ func (a *Agent) filterMembers(token string, members *[]serf.Member) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Agent) filterServicesWithAuthorizer(authz acl.Authorizer, services *map[structs.ServiceID]*structs.NodeService) error {
|
func (a *Agent) filterServicesWithAuthorizer(authz acl.Authorizer, services map[string]*api.AgentService) error {
|
||||||
var authzContext acl.AuthorizerContext
|
var authzContext acl.AuthorizerContext
|
||||||
// Filter out services based on the service policy.
|
// Filter out services based on the service policy.
|
||||||
for id, service := range *services {
|
for id, service := range services {
|
||||||
service.FillAuthzContext(&authzContext)
|
agentServiceFillAuthzContext(service, &authzContext)
|
||||||
if authz.ServiceRead(service.Service, &authzContext) == acl.Allow {
|
if authz.ServiceRead(service.Service, &authzContext) == acl.Allow {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
a.logger.Debug("dropping service from result due to ACLs", "service", id.String())
|
a.logger.Debug("dropping service from result due to ACLs", "service", id)
|
||||||
delete(*services, id)
|
delete(services, id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Agent) filterChecksWithAuthorizer(authz acl.Authorizer, checks *map[structs.CheckID]*structs.HealthCheck) error {
|
func (a *Agent) filterChecksWithAuthorizer(authz acl.Authorizer, checks map[types.CheckID]*structs.HealthCheck) error {
|
||||||
var authzContext acl.AuthorizerContext
|
var authzContext acl.AuthorizerContext
|
||||||
// Filter out checks based on the node or service policy.
|
// Filter out checks based on the node or service policy.
|
||||||
for id, check := range *checks {
|
for id, check := range checks {
|
||||||
check.FillAuthzContext(&authzContext)
|
check.FillAuthzContext(&authzContext)
|
||||||
if len(check.ServiceName) > 0 {
|
if len(check.ServiceName) > 0 {
|
||||||
if authz.ServiceRead(check.ServiceName, &authzContext) == acl.Allow {
|
if authz.ServiceRead(check.ServiceName, &authzContext) == acl.Allow {
|
||||||
|
@ -195,8 +197,8 @@ func (a *Agent) filterChecksWithAuthorizer(authz acl.Authorizer, checks *map[str
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.logger.Debug("dropping check from result due to ACLs", "check", id.String())
|
a.logger.Debug("dropping check from result due to ACLs", "check", id)
|
||||||
delete(*checks, id)
|
delete(checks, id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,6 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
|
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest"
|
"github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest"
|
||||||
"github.com/hashicorp/consul/sdk/freeport"
|
|
||||||
"github.com/hashicorp/consul/sdk/testutil"
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/consul/testrpc"
|
"github.com/hashicorp/consul/testrpc"
|
||||||
)
|
)
|
||||||
|
@ -92,9 +91,14 @@ func TestACL_Bootstrap(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
a := NewTestAgent(t, TestACLConfig()+`
|
a := NewTestAgent(t, `
|
||||||
acl_master_token = ""
|
primary_datacenter = "dc1"
|
||||||
`)
|
|
||||||
|
acl {
|
||||||
|
enabled = true
|
||||||
|
default_policy = "deny"
|
||||||
|
}
|
||||||
|
`)
|
||||||
defer a.Shutdown()
|
defer a.Shutdown()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
@ -882,7 +886,7 @@ func TestACL_HTTP(t *testing.T) {
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
require.Len(t, tokens, 1)
|
require.Len(t, tokens, 1)
|
||||||
token := tokens[0]
|
token := tokens[0]
|
||||||
require.Equal(t, "Master Token", token.Description)
|
require.Equal(t, "Initial Management Token", token.Description)
|
||||||
require.Len(t, token.Policies, 1)
|
require.Len(t, token.Policies, 1)
|
||||||
require.Equal(t, structs.ACLPolicyGlobalManagementID, token.Policies[0].ID)
|
require.Equal(t, structs.ACLPolicyGlobalManagementID, token.Policies[0].ID)
|
||||||
})
|
})
|
||||||
|
@ -1658,7 +1662,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) {
|
||||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
// spin up a fake oidc server
|
// spin up a fake oidc server
|
||||||
oidcServer := startSSOTestServer(t)
|
oidcServer := oidcauthtest.Start(t)
|
||||||
pubKey, privKey := oidcServer.SigningKeys()
|
pubKey, privKey := oidcServer.SigningKeys()
|
||||||
|
|
||||||
type mConfig = map[string]interface{}
|
type mConfig = map[string]interface{}
|
||||||
|
@ -1690,7 +1694,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) {
|
||||||
for name, tc := range cases {
|
for name, tc := range cases {
|
||||||
tc := tc
|
tc := tc
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
method, err := upsertTestCustomizedAuthMethod(a.RPC, TestDefaultMasterToken, "dc1", func(method *structs.ACLAuthMethod) {
|
method, err := upsertTestCustomizedAuthMethod(a.RPC, TestDefaultInitialManagementToken, "dc1", func(method *structs.ACLAuthMethod) {
|
||||||
method.Type = "jwt"
|
method.Type = "jwt"
|
||||||
method.Config = map[string]interface{}{
|
method.Config = map[string]interface{}{
|
||||||
"JWTSupportedAlgs": []string{"ES256"},
|
"JWTSupportedAlgs": []string{"ES256"},
|
||||||
|
@ -1759,7 +1763,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) {
|
||||||
testutil.RequireErrorContains(t, err, "Permission denied")
|
testutil.RequireErrorContains(t, err, "Permission denied")
|
||||||
})
|
})
|
||||||
|
|
||||||
_, err = upsertTestCustomizedBindingRule(a.RPC, TestDefaultMasterToken, "dc1", func(rule *structs.ACLBindingRule) {
|
_, err = upsertTestCustomizedBindingRule(a.RPC, TestDefaultInitialManagementToken, "dc1", func(rule *structs.ACLBindingRule) {
|
||||||
rule.AuthMethod = method.Name
|
rule.AuthMethod = method.Name
|
||||||
rule.BindType = structs.BindingRuleBindTypeService
|
rule.BindType = structs.BindingRuleBindTypeService
|
||||||
rule.BindName = "test--${value.name}--${value.primary_org}"
|
rule.BindName = "test--${value.name}--${value.primary_org}"
|
||||||
|
@ -1799,7 +1803,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) {
|
||||||
|
|
||||||
// verify the token was deleted
|
// verify the token was deleted
|
||||||
req, _ = http.NewRequest("GET", "/v1/acl/token/"+token.AccessorID, nil)
|
req, _ = http.NewRequest("GET", "/v1/acl/token/"+token.AccessorID, nil)
|
||||||
req.Header.Add("X-Consul-Token", TestDefaultMasterToken)
|
req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken)
|
||||||
resp = httptest.NewRecorder()
|
resp = httptest.NewRecorder()
|
||||||
|
|
||||||
// make the request
|
// make the request
|
||||||
|
@ -1820,7 +1824,7 @@ func TestACL_Authorize(t *testing.T) {
|
||||||
a1 := NewTestAgent(t, TestACLConfigWithParams(nil))
|
a1 := NewTestAgent(t, TestACLConfigWithParams(nil))
|
||||||
defer a1.Shutdown()
|
defer a1.Shutdown()
|
||||||
|
|
||||||
testrpc.WaitForTestAgent(t, a1.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken))
|
testrpc.WaitForTestAgent(t, a1.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
||||||
|
|
||||||
policyReq := structs.ACLPolicySetRequest{
|
policyReq := structs.ACLPolicySetRequest{
|
||||||
Policy: structs.ACLPolicy{
|
Policy: structs.ACLPolicy{
|
||||||
|
@ -1828,7 +1832,7 @@ func TestACL_Authorize(t *testing.T) {
|
||||||
Rules: `acl = "read" operator = "write" service_prefix "" { policy = "read"} node_prefix "" { policy= "write" } key_prefix "/foo" { policy = "write" } `,
|
Rules: `acl = "read" operator = "write" service_prefix "" { policy = "read"} node_prefix "" { policy= "write" } key_prefix "/foo" { policy = "write" } `,
|
||||||
},
|
},
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
|
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
|
||||||
}
|
}
|
||||||
var policy structs.ACLPolicy
|
var policy structs.ACLPolicy
|
||||||
require.NoError(t, a1.RPC("ACL.PolicySet", &policyReq, &policy))
|
require.NoError(t, a1.RPC("ACL.PolicySet", &policyReq, &policy))
|
||||||
|
@ -1842,15 +1846,15 @@ func TestACL_Authorize(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
|
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
|
||||||
}
|
}
|
||||||
|
|
||||||
var token structs.ACLToken
|
var token structs.ACLToken
|
||||||
require.NoError(t, a1.RPC("ACL.TokenSet", &tokenReq, &token))
|
require.NoError(t, a1.RPC("ACL.TokenSet", &tokenReq, &token))
|
||||||
|
|
||||||
// secondary also needs to setup a replication token to pull tokens and policies
|
// secondary also needs to setup a replication token to pull tokens and policies
|
||||||
secondaryParams := DefaulTestACLConfigParams()
|
secondaryParams := DefaultTestACLConfigParams()
|
||||||
secondaryParams.ReplicationToken = secondaryParams.MasterToken
|
secondaryParams.ReplicationToken = secondaryParams.InitialManagementToken
|
||||||
secondaryParams.EnableTokenReplication = true
|
secondaryParams.EnableTokenReplication = true
|
||||||
|
|
||||||
a2 := NewTestAgent(t, `datacenter = "dc2" `+TestACLConfigWithParams(secondaryParams))
|
a2 := NewTestAgent(t, `datacenter = "dc2" `+TestACLConfigWithParams(secondaryParams))
|
||||||
|
@ -1860,7 +1864,7 @@ func TestACL_Authorize(t *testing.T) {
|
||||||
_, err := a2.JoinWAN([]string{addr})
|
_, err := a2.JoinWAN([]string{addr})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testrpc.WaitForTestAgent(t, a2.RPC, "dc2", testrpc.WithToken(TestDefaultMasterToken))
|
testrpc.WaitForTestAgent(t, a2.RPC, "dc2", testrpc.WithToken(TestDefaultInitialManagementToken))
|
||||||
// this actually ensures a few things. First the dcs got connect okay, secondly that the policy we
|
// this actually ensures a few things. First the dcs got connect okay, secondly that the policy we
|
||||||
// are about ready to use in our local token creation exists in the secondary DC
|
// are about ready to use in our local token creation exists in the secondary DC
|
||||||
testrpc.WaitForACLReplication(t, a2.RPC, "dc2", structs.ACLReplicateTokens, policy.CreateIndex, 1, 0)
|
testrpc.WaitForACLReplication(t, a2.RPC, "dc2", structs.ACLReplicateTokens, policy.CreateIndex, 1, 0)
|
||||||
|
@ -1875,7 +1879,7 @@ func TestACL_Authorize(t *testing.T) {
|
||||||
Local: true,
|
Local: true,
|
||||||
},
|
},
|
||||||
Datacenter: "dc2",
|
Datacenter: "dc2",
|
||||||
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
|
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
|
||||||
}
|
}
|
||||||
|
|
||||||
var localToken structs.ACLToken
|
var localToken structs.ACLToken
|
||||||
|
@ -2005,7 +2009,7 @@ func TestACL_Authorize(t *testing.T) {
|
||||||
for _, dc := range []string{"dc1", "dc2"} {
|
for _, dc := range []string{"dc1", "dc2"} {
|
||||||
t.Run(dc, func(t *testing.T) {
|
t.Run(dc, func(t *testing.T) {
|
||||||
req, _ := http.NewRequest("POST", "/v1/internal/acl/authorize?dc="+dc, jsonBody(request))
|
req, _ := http.NewRequest("POST", "/v1/internal/acl/authorize?dc="+dc, jsonBody(request))
|
||||||
req.Header.Add("X-Consul-Token", TestDefaultMasterToken)
|
req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken)
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
raw, err := a1.srv.ACLAuthorize(recorder, req)
|
raw, err := a1.srv.ACLAuthorize(recorder, req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -2151,7 +2155,7 @@ func TestACL_Authorize(t *testing.T) {
|
||||||
false, // agent:write
|
false, // agent:write
|
||||||
false, // event:read
|
false, // event:read
|
||||||
false, // event:write
|
false, // event:write
|
||||||
true, // intention:read
|
true, // intentions:read
|
||||||
false, // intention:write
|
false, // intention:write
|
||||||
false, // key:read
|
false, // key:read
|
||||||
false, // key:list
|
false, // key:list
|
||||||
|
@ -2330,14 +2334,6 @@ func upsertTestCustomizedBindingRule(rpc rpcFn, masterToken string, datacenter s
|
||||||
return &out, nil
|
return &out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func startSSOTestServer(t *testing.T) *oidcauthtest.Server {
|
|
||||||
ports := freeport.MustTake(1)
|
|
||||||
return oidcauthtest.Start(t, oidcauthtest.WithPort(
|
|
||||||
ports[0],
|
|
||||||
func() { freeport.Return(ports) },
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHTTPHandlers_ACLReplicationStatus(t *testing.T) {
|
func TestHTTPHandlers_ACLReplicationStatus(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("too slow for testing.Short")
|
t.Skip("too slow for testing.Short")
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
@ -6,8 +7,13 @@ import (
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
func serfMemberFillAuthzContext(m *serf.Member, ctx *acl.AuthorizerContext) {
|
func serfMemberFillAuthzContext(m *serf.Member, ctx *acl.AuthorizerContext) {
|
||||||
// no-op
|
// no-op
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func agentServiceFillAuthzContext(s *api.AgentService, ctx *acl.AuthorizerContext) {
|
||||||
|
// no-op
|
||||||
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/consul"
|
"github.com/hashicorp/consul/agent/consul"
|
||||||
"github.com/hashicorp/consul/agent/local"
|
"github.com/hashicorp/consul/agent/local"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
"github.com/hashicorp/consul/sdk/testutil"
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
|
@ -464,7 +465,7 @@ func TestACL_filterServicesWithAuthorizer(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy, catalogIdent)
|
a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy, catalogIdent)
|
||||||
|
|
||||||
filterServices := func(token string, services *map[structs.ServiceID]*structs.NodeService) error {
|
filterServices := func(token string, services map[string]*api.AgentService) error {
|
||||||
authz, err := a.delegate.ResolveTokenAndDefaultMeta(token, nil, nil)
|
authz, err := a.delegate.ResolveTokenAndDefaultMeta(token, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -473,21 +474,22 @@ func TestACL_filterServicesWithAuthorizer(t *testing.T) {
|
||||||
return a.filterServicesWithAuthorizer(authz, services)
|
return a.filterServicesWithAuthorizer(authz, services)
|
||||||
}
|
}
|
||||||
|
|
||||||
services := make(map[structs.ServiceID]*structs.NodeService)
|
services := make(map[string]*api.AgentService)
|
||||||
require.NoError(t, filterServices(nodeROSecret, &services))
|
require.NoError(t, filterServices(nodeROSecret, services))
|
||||||
|
|
||||||
services[structs.NewServiceID("my-service", nil)] = &structs.NodeService{ID: "my-service", Service: "service"}
|
services[structs.NewServiceID("my-service", nil).String()] = &api.AgentService{ID: "my-service", Service: "service"}
|
||||||
services[structs.NewServiceID("my-other", nil)] = &structs.NodeService{ID: "my-other", Service: "other"}
|
services[structs.NewServiceID("my-other", nil).String()] = &api.AgentService{ID: "my-other", Service: "other"}
|
||||||
require.NoError(t, filterServices(serviceROSecret, &services))
|
require.NoError(t, filterServices(serviceROSecret, services))
|
||||||
require.Contains(t, services, structs.NewServiceID("my-service", nil))
|
|
||||||
require.NotContains(t, services, structs.NewServiceID("my-other", nil))
|
require.Contains(t, services, structs.NewServiceID("my-service", nil).String())
|
||||||
|
require.NotContains(t, services, structs.NewServiceID("my-other", nil).String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestACL_filterChecksWithAuthorizer(t *testing.T) {
|
func TestACL_filterChecksWithAuthorizer(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy, catalogIdent)
|
a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy, catalogIdent)
|
||||||
|
|
||||||
filterChecks := func(token string, checks *map[structs.CheckID]*structs.HealthCheck) error {
|
filterChecks := func(token string, checks map[types.CheckID]*structs.HealthCheck) error {
|
||||||
authz, err := a.delegate.ResolveTokenAndDefaultMeta(token, nil, nil)
|
authz, err := a.delegate.ResolveTokenAndDefaultMeta(token, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -496,29 +498,29 @@ func TestACL_filterChecksWithAuthorizer(t *testing.T) {
|
||||||
return a.filterChecksWithAuthorizer(authz, checks)
|
return a.filterChecksWithAuthorizer(authz, checks)
|
||||||
}
|
}
|
||||||
|
|
||||||
checks := make(map[structs.CheckID]*structs.HealthCheck)
|
checks := make(map[types.CheckID]*structs.HealthCheck)
|
||||||
require.NoError(t, filterChecks(nodeROSecret, &checks))
|
require.NoError(t, filterChecks(nodeROSecret, checks))
|
||||||
|
|
||||||
checks[structs.NewCheckID("my-node", nil)] = &structs.HealthCheck{}
|
checks["my-node"] = &structs.HealthCheck{}
|
||||||
checks[structs.NewCheckID("my-service", nil)] = &structs.HealthCheck{ServiceName: "service"}
|
checks["my-service"] = &structs.HealthCheck{ServiceName: "service"}
|
||||||
checks[structs.NewCheckID("my-other", nil)] = &structs.HealthCheck{ServiceName: "other"}
|
checks["my-other"] = &structs.HealthCheck{ServiceName: "other"}
|
||||||
require.NoError(t, filterChecks(serviceROSecret, &checks))
|
require.NoError(t, filterChecks(serviceROSecret, checks))
|
||||||
_, ok := checks[structs.NewCheckID("my-node", nil)]
|
_, ok := checks["my-node"]
|
||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
_, ok = checks[structs.NewCheckID("my-service", nil)]
|
_, ok = checks["my-service"]
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
_, ok = checks[structs.NewCheckID("my-other", nil)]
|
_, ok = checks["my-other"]
|
||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
|
|
||||||
checks[structs.NewCheckID("my-node", nil)] = &structs.HealthCheck{}
|
checks["my-node"] = &structs.HealthCheck{}
|
||||||
checks[structs.NewCheckID("my-service", nil)] = &structs.HealthCheck{ServiceName: "service"}
|
checks["my-service"] = &structs.HealthCheck{ServiceName: "service"}
|
||||||
checks[structs.NewCheckID("my-other", nil)] = &structs.HealthCheck{ServiceName: "other"}
|
checks["my-other"] = &structs.HealthCheck{ServiceName: "other"}
|
||||||
require.NoError(t, filterChecks(nodeROSecret, &checks))
|
require.NoError(t, filterChecks(nodeROSecret, checks))
|
||||||
_, ok = checks[structs.NewCheckID("my-node", nil)]
|
_, ok = checks["my-node"]
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
_, ok = checks[structs.NewCheckID("my-service", nil)]
|
_, ok = checks["my-service"]
|
||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
_, ok = checks[structs.NewCheckID("my-other", nil)]
|
_, ok = checks["my-other"]
|
||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
156
agent/agent.go
156
agent/agent.go
|
@ -144,7 +144,20 @@ type delegate interface {
|
||||||
// This is limited to segments and partitions that the node is a member of.
|
// This is limited to segments and partitions that the node is a member of.
|
||||||
LANMembers(f consul.LANMemberFilter) ([]serf.Member, error)
|
LANMembers(f consul.LANMemberFilter) ([]serf.Member, error)
|
||||||
|
|
||||||
// GetLANCoordinate returns the coordinate of the node in the LAN gossip pool.
|
// GetLANCoordinate returns the coordinate of the node in the LAN gossip
|
||||||
|
// pool.
|
||||||
|
//
|
||||||
|
// - Clients return a single coordinate for the single gossip pool they are
|
||||||
|
// in (default, segment, or partition).
|
||||||
|
//
|
||||||
|
// - Servers return one coordinate for their canonical gossip pool (i.e.
|
||||||
|
// default partition/segment) and one per segment they are also ancillary
|
||||||
|
// members of.
|
||||||
|
//
|
||||||
|
// NOTE: servers do not emit coordinates for partitioned gossip pools they
|
||||||
|
// are ancillary members of.
|
||||||
|
//
|
||||||
|
// NOTE: This assumes coordinates are enabled, so check that before calling.
|
||||||
GetLANCoordinate() (lib.CoordinateSet, error)
|
GetLANCoordinate() (lib.CoordinateSet, error)
|
||||||
|
|
||||||
// JoinLAN is used to have Consul join the inner-DC pool The target address
|
// JoinLAN is used to have Consul join the inner-DC pool The target address
|
||||||
|
@ -1140,8 +1153,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
|
||||||
if runtimeCfg.RaftTrailingLogs != 0 {
|
if runtimeCfg.RaftTrailingLogs != 0 {
|
||||||
cfg.RaftConfig.TrailingLogs = uint64(runtimeCfg.RaftTrailingLogs)
|
cfg.RaftConfig.TrailingLogs = uint64(runtimeCfg.RaftTrailingLogs)
|
||||||
}
|
}
|
||||||
if runtimeCfg.ACLMasterToken != "" {
|
if runtimeCfg.ACLInitialManagementToken != "" {
|
||||||
cfg.ACLMasterToken = runtimeCfg.ACLMasterToken
|
cfg.ACLInitialManagementToken = runtimeCfg.ACLInitialManagementToken
|
||||||
}
|
}
|
||||||
cfg.ACLTokenReplication = runtimeCfg.ACLTokenReplication
|
cfg.ACLTokenReplication = runtimeCfg.ACLTokenReplication
|
||||||
cfg.ACLsEnabled = runtimeCfg.ACLsEnabled
|
cfg.ACLsEnabled = runtimeCfg.ACLsEnabled
|
||||||
|
@ -1250,6 +1263,7 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.ConfigEntryBootstrap = runtimeCfg.ConfigEntryBootstrap
|
cfg.ConfigEntryBootstrap = runtimeCfg.ConfigEntryBootstrap
|
||||||
|
cfg.RaftBoltDBConfig = runtimeCfg.RaftBoltDBConfig
|
||||||
|
|
||||||
// Duplicate our own serf config once to make sure that the duplication
|
// Duplicate our own serf config once to make sure that the duplication
|
||||||
// function does not drift.
|
// function does not drift.
|
||||||
|
@ -1264,6 +1278,7 @@ func segmentConfig(config *config.RuntimeConfig) ([]consul.NetworkSegment, error
|
||||||
var segments []consul.NetworkSegment
|
var segments []consul.NetworkSegment
|
||||||
|
|
||||||
for _, s := range config.Segments {
|
for _, s := range config.Segments {
|
||||||
|
// TODO: use consul.CloneSerfLANConfig(config.SerfLANConfig) here?
|
||||||
serfConf := consul.DefaultConfig().SerfLANConfig
|
serfConf := consul.DefaultConfig().SerfLANConfig
|
||||||
|
|
||||||
serfConf.MemberlistConfig.BindAddr = s.Bind.IP.String()
|
serfConf.MemberlistConfig.BindAddr = s.Bind.IP.String()
|
||||||
|
@ -1539,13 +1554,10 @@ func (a *Agent) RefreshPrimaryGatewayFallbackAddresses(addrs []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForceLeave is used to remove a failed node from the cluster
|
// ForceLeave is used to remove a failed node from the cluster
|
||||||
func (a *Agent) ForceLeave(node string, prune bool, entMeta *structs.EnterpriseMeta) (err error) {
|
func (a *Agent) ForceLeave(node string, prune bool, entMeta *structs.EnterpriseMeta) error {
|
||||||
a.logger.Info("Force leaving node", "node", node)
|
a.logger.Info("Force leaving node", "node", node)
|
||||||
// TODO(partitions): merge IsMember into the RemoveFailedNode call.
|
|
||||||
if ok := a.IsMember(node); !ok {
|
err := a.delegate.RemoveFailedNode(node, prune, entMeta)
|
||||||
return fmt.Errorf("agent: No node found with name '%s'", node)
|
|
||||||
}
|
|
||||||
err = a.delegate.RemoveFailedNode(node, prune, entMeta)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Warn("Failed to remove node",
|
a.logger.Warn("Failed to remove node",
|
||||||
"node", node,
|
"node", node,
|
||||||
|
@ -1555,6 +1567,25 @@ func (a *Agent) ForceLeave(node string, prune bool, entMeta *structs.EnterpriseM
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForceLeaveWAN is used to remove a failed node from the WAN cluster
|
||||||
|
func (a *Agent) ForceLeaveWAN(node string, prune bool, entMeta *structs.EnterpriseMeta) error {
|
||||||
|
a.logger.Info("(WAN) Force leaving node", "node", node)
|
||||||
|
|
||||||
|
srv, ok := a.delegate.(*consul.Server)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Must be a server to force-leave a node from the WAN cluster")
|
||||||
|
}
|
||||||
|
|
||||||
|
err := srv.RemoveFailedNodeWAN(node, prune, entMeta)
|
||||||
|
if err != nil {
|
||||||
|
a.logger.Warn("(WAN) Failed to remove node",
|
||||||
|
"node", node,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// AgentLocalMember is used to retrieve the LAN member for the local node.
|
// AgentLocalMember is used to retrieve the LAN member for the local node.
|
||||||
func (a *Agent) AgentLocalMember() serf.Member {
|
func (a *Agent) AgentLocalMember() serf.Member {
|
||||||
return a.delegate.AgentLocalMember()
|
return a.delegate.AgentLocalMember()
|
||||||
|
@ -1585,18 +1616,6 @@ func (a *Agent) WANMembers() []serf.Member {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsMember is used to check if a node with the given nodeName
|
|
||||||
// is a member
|
|
||||||
func (a *Agent) IsMember(nodeName string) bool {
|
|
||||||
for _, m := range a.LANMembersInAgentPartition() {
|
|
||||||
if m.Name == nodeName {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartSync is called once Services and Checks are registered.
|
// StartSync is called once Services and Checks are registered.
|
||||||
// This is called to prevent a race between clients and the anti-entropy routines
|
// This is called to prevent a race between clients and the anti-entropy routines
|
||||||
func (a *Agent) StartSync() {
|
func (a *Agent) StartSync() {
|
||||||
|
@ -1922,7 +1941,7 @@ func (a *Agent) readPersistedServiceConfigs() (map[structs.ServiceID]*structs.Se
|
||||||
file := filepath.Join(configDir, fi.Name())
|
file := filepath.Join(configDir, fi.Name())
|
||||||
buf, err := ioutil.ReadFile(file)
|
buf, err := ioutil.ReadFile(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed reading service config file %q: %s", file, err)
|
return nil, fmt.Errorf("failed reading service config file %q: %w", file, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try decoding the service config definition
|
// Try decoding the service config definition
|
||||||
|
@ -1941,10 +1960,28 @@ func (a *Agent) readPersistedServiceConfigs() (map[structs.ServiceID]*structs.Se
|
||||||
newPath := a.makeServiceConfigFilePath(serviceID)
|
newPath := a.makeServiceConfigFilePath(serviceID)
|
||||||
if file != newPath {
|
if file != newPath {
|
||||||
if err := os.Rename(file, newPath); err != nil {
|
if err := os.Rename(file, newPath); err != nil {
|
||||||
a.logger.Error("Failed renaming service config file from %s to %s", file, newPath, err)
|
a.logger.Error("Failed renaming service config file",
|
||||||
|
"file", file,
|
||||||
|
"targetFile", newPath,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !structs.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.PartitionOrDefault()) {
|
||||||
|
a.logger.Info("Purging service config file in wrong partition",
|
||||||
|
"file", file,
|
||||||
|
"partition", p.PartitionOrDefault(),
|
||||||
|
)
|
||||||
|
if err := os.Remove(file); err != nil {
|
||||||
|
a.logger.Error("Failed purging service config file",
|
||||||
|
"file", file,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
out[serviceID] = p.Defaults
|
out[serviceID] = p.Defaults
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3045,14 +3082,18 @@ func (a *Agent) loadCheckState(check *structs.HealthCheck) error {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("failed reading file %q: %s", file, err)
|
return fmt.Errorf("failed reading check state %q: %w", file, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := os.Rename(oldFile, file); err != nil {
|
if err := os.Rename(oldFile, file); err != nil {
|
||||||
a.logger.Error("Failed renaming service file from %s to %s", oldFile, file, err)
|
a.logger.Error("Failed renaming check state",
|
||||||
|
"file", oldFile,
|
||||||
|
"targetFile", file,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("failed reading file %q: %s", file, err)
|
return fmt.Errorf("failed reading file %q: %w", file, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3242,7 +3283,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Failed reading services dir %q: %s", svcDir, err)
|
return fmt.Errorf("Failed reading services dir %q: %w", svcDir, err)
|
||||||
}
|
}
|
||||||
for _, fi := range files {
|
for _, fi := range files {
|
||||||
// Skip all dirs
|
// Skip all dirs
|
||||||
|
@ -3260,7 +3301,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
||||||
file := filepath.Join(svcDir, fi.Name())
|
file := filepath.Join(svcDir, fi.Name())
|
||||||
buf, err := ioutil.ReadFile(file)
|
buf, err := ioutil.ReadFile(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed reading service file %q: %s", file, err)
|
return fmt.Errorf("failed reading service file %q: %w", file, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try decoding the service definition
|
// Try decoding the service definition
|
||||||
|
@ -3280,10 +3321,28 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
||||||
newPath := a.makeServiceFilePath(p.Service.CompoundServiceID())
|
newPath := a.makeServiceFilePath(p.Service.CompoundServiceID())
|
||||||
if file != newPath {
|
if file != newPath {
|
||||||
if err := os.Rename(file, newPath); err != nil {
|
if err := os.Rename(file, newPath); err != nil {
|
||||||
a.logger.Error("Failed renaming service file from %s to %s", file, newPath, err)
|
a.logger.Error("Failed renaming service file",
|
||||||
|
"file", file,
|
||||||
|
"targetFile", newPath,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !structs.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.Service.PartitionOrDefault()) {
|
||||||
|
a.logger.Info("Purging service file in wrong partition",
|
||||||
|
"file", file,
|
||||||
|
"partition", p.Service.EnterpriseMeta.PartitionOrDefault(),
|
||||||
|
)
|
||||||
|
if err := os.Remove(file); err != nil {
|
||||||
|
a.logger.Error("Failed purging service file",
|
||||||
|
"file", file,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Restore LocallyRegisteredAsSidecar, see persistedService.LocallyRegisteredAsSidecar
|
// Restore LocallyRegisteredAsSidecar, see persistedService.LocallyRegisteredAsSidecar
|
||||||
p.Service.LocallyRegisteredAsSidecar = p.LocallyRegisteredAsSidecar
|
p.Service.LocallyRegisteredAsSidecar = p.LocallyRegisteredAsSidecar
|
||||||
|
|
||||||
|
@ -3296,10 +3355,10 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
||||||
"source", p.Source,
|
"source", p.Source,
|
||||||
)
|
)
|
||||||
if err := a.purgeService(serviceID); err != nil {
|
if err := a.purgeService(serviceID); err != nil {
|
||||||
return fmt.Errorf("failed purging service %q: %s", serviceID, err)
|
return fmt.Errorf("failed purging service %q: %w", serviceID, err)
|
||||||
}
|
}
|
||||||
if err := a.purgeServiceConfig(serviceID); err != nil {
|
if err := a.purgeServiceConfig(serviceID); err != nil {
|
||||||
return fmt.Errorf("failed purging service config %q: %s", serviceID, err)
|
return fmt.Errorf("failed purging service config %q: %w", serviceID, err)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -3312,10 +3371,10 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
||||||
"file", file,
|
"file", file,
|
||||||
)
|
)
|
||||||
if err := a.purgeService(serviceID); err != nil {
|
if err := a.purgeService(serviceID); err != nil {
|
||||||
return fmt.Errorf("failed purging service %q: %s", serviceID.String(), err)
|
return fmt.Errorf("failed purging service %q: %w", serviceID.String(), err)
|
||||||
}
|
}
|
||||||
if err := a.purgeServiceConfig(serviceID); err != nil {
|
if err := a.purgeServiceConfig(serviceID); err != nil {
|
||||||
return fmt.Errorf("failed purging service config %q: %s", serviceID.String(), err)
|
return fmt.Errorf("failed purging service config %q: %w", serviceID.String(), err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
a.logger.Debug("restored service definition from file",
|
a.logger.Debug("restored service definition from file",
|
||||||
|
@ -3336,7 +3395,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
||||||
checkStateSnapshot: snap,
|
checkStateSnapshot: snap,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed adding service %q: %s", serviceID, err)
|
return fmt.Errorf("failed adding service %q: %w", serviceID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3345,7 +3404,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
||||||
if a.State.Service(serviceID) == nil {
|
if a.State.Service(serviceID) == nil {
|
||||||
// This can be cleaned up now.
|
// This can be cleaned up now.
|
||||||
if err := a.purgeServiceConfig(serviceID); err != nil {
|
if err := a.purgeServiceConfig(serviceID); err != nil {
|
||||||
return fmt.Errorf("failed purging service config %q: %s", serviceID, err)
|
return fmt.Errorf("failed purging service config %q: %w", serviceID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3388,7 +3447,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Failed reading checks dir %q: %s", checkDir, err)
|
return fmt.Errorf("Failed reading checks dir %q: %w", checkDir, err)
|
||||||
}
|
}
|
||||||
for _, fi := range files {
|
for _, fi := range files {
|
||||||
// Ignore dirs - we only care about the check definition files
|
// Ignore dirs - we only care about the check definition files
|
||||||
|
@ -3400,7 +3459,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]
|
||||||
file := filepath.Join(checkDir, fi.Name())
|
file := filepath.Join(checkDir, fi.Name())
|
||||||
buf, err := ioutil.ReadFile(file)
|
buf, err := ioutil.ReadFile(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed reading check file %q: %s", file, err)
|
return fmt.Errorf("failed reading check file %q: %w", file, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode the check
|
// Decode the check
|
||||||
|
@ -3418,10 +3477,25 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]
|
||||||
newPath := filepath.Join(a.config.DataDir, checksDir, checkID.StringHashSHA256())
|
newPath := filepath.Join(a.config.DataDir, checksDir, checkID.StringHashSHA256())
|
||||||
if file != newPath {
|
if file != newPath {
|
||||||
if err := os.Rename(file, newPath); err != nil {
|
if err := os.Rename(file, newPath); err != nil {
|
||||||
a.logger.Error("Failed renaming service file from %s to %s", file, newPath, err)
|
a.logger.Error("Failed renaming check file",
|
||||||
|
"file", file,
|
||||||
|
"targetFile", newPath,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !structs.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.Check.PartitionOrDefault()) {
|
||||||
|
a.logger.Info("Purging check file in wrong partition",
|
||||||
|
"file", file,
|
||||||
|
"partition", p.Check.PartitionOrDefault(),
|
||||||
|
)
|
||||||
|
if err := os.Remove(file); err != nil {
|
||||||
|
return fmt.Errorf("failed purging check %q: %w", checkID, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
source, ok := ConfigSourceFromName(p.Source)
|
source, ok := ConfigSourceFromName(p.Source)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.logger.Warn("check exists with invalid source, purging",
|
a.logger.Warn("check exists with invalid source, purging",
|
||||||
|
@ -3429,7 +3503,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]
|
||||||
"source", p.Source,
|
"source", p.Source,
|
||||||
)
|
)
|
||||||
if err := a.purgeCheck(checkID); err != nil {
|
if err := a.purgeCheck(checkID); err != nil {
|
||||||
return fmt.Errorf("failed purging check %q: %s", checkID, err)
|
return fmt.Errorf("failed purging check %q: %w", checkID, err)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -3442,7 +3516,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]
|
||||||
"file", file,
|
"file", file,
|
||||||
)
|
)
|
||||||
if err := a.purgeCheck(checkID); err != nil {
|
if err := a.purgeCheck(checkID); err != nil {
|
||||||
return fmt.Errorf("Failed purging check %q: %s", checkID, err)
|
return fmt.Errorf("Failed purging check %q: %w", checkID, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Default check to critical to avoid placing potentially unhealthy
|
// Default check to critical to avoid placing potentially unhealthy
|
||||||
|
@ -3462,7 +3536,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]
|
||||||
"error", err,
|
"error", err,
|
||||||
)
|
)
|
||||||
if err := a.purgeCheck(checkID); err != nil {
|
if err := a.purgeCheck(checkID); err != nil {
|
||||||
return fmt.Errorf("Failed purging check %q: %s", checkID, err)
|
return fmt.Errorf("Failed purging check %q: %w", checkID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.logger.Debug("restored health check from file",
|
a.logger.Debug("restored health check from file",
|
||||||
|
|
|
@ -327,9 +327,6 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request
|
||||||
// NOTE: we're explicitly fetching things in the requested partition and
|
// NOTE: we're explicitly fetching things in the requested partition and
|
||||||
// namespace here.
|
// namespace here.
|
||||||
services := s.agent.State.Services(&entMeta)
|
services := s.agent.State.Services(&entMeta)
|
||||||
if err := s.agent.filterServicesWithAuthorizer(authz, &services); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert into api.AgentService since that includes Connect config but so far
|
// Convert into api.AgentService since that includes Connect config but so far
|
||||||
// NodeService doesn't need to internally. They are otherwise identical since
|
// NodeService doesn't need to internally. They are otherwise identical since
|
||||||
|
@ -337,11 +334,8 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request
|
||||||
// anyway.
|
// anyway.
|
||||||
agentSvcs := make(map[string]*api.AgentService)
|
agentSvcs := make(map[string]*api.AgentService)
|
||||||
|
|
||||||
dc := s.agent.config.Datacenter
|
for id, svc := range services {
|
||||||
|
agentService := buildAgentService(svc, s.agent.config.Datacenter)
|
||||||
// Use empty list instead of nil
|
|
||||||
for id, s := range services {
|
|
||||||
agentService := buildAgentService(s, dc)
|
|
||||||
agentSvcs[id.ID] = &agentService
|
agentSvcs[id.ID] = &agentService
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -350,7 +344,34 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return filter.Execute(agentSvcs)
|
raw, err := filter.Execute(agentSvcs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
agentSvcs = raw.(map[string]*api.AgentService)
|
||||||
|
|
||||||
|
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||||
|
// bexpr filter, to ensure total (and the filter-by-acls header we set below)
|
||||||
|
// do not include results that would be filtered out even if the user did have
|
||||||
|
// permission.
|
||||||
|
total := len(agentSvcs)
|
||||||
|
if err := s.agent.filterServicesWithAuthorizer(authz, agentSvcs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the X-Consul-Results-Filtered-By-ACLs header, but only if the user is
|
||||||
|
// authenticated (to prevent information leaking).
|
||||||
|
//
|
||||||
|
// This is done automatically for HTTP endpoints that proxy to an RPC endpoint
|
||||||
|
// that sets QueryMeta.ResultsFilteredByACLs, but must be done manually for
|
||||||
|
// agent-local endpoints.
|
||||||
|
//
|
||||||
|
// For more information see the comment on: Server.maskResultsFilteredByACLs.
|
||||||
|
if token != "" {
|
||||||
|
setResultsFilteredByACLs(resp, total != len(agentSvcs))
|
||||||
|
}
|
||||||
|
|
||||||
|
return agentSvcs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GET /v1/agent/service/:service_id
|
// GET /v1/agent/service/:service_id
|
||||||
|
@ -473,13 +494,8 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request)
|
||||||
|
|
||||||
// NOTE(partitions): this works because nodes exist in ONE partition
|
// NOTE(partitions): this works because nodes exist in ONE partition
|
||||||
checks := s.agent.State.Checks(&entMeta)
|
checks := s.agent.State.Checks(&entMeta)
|
||||||
if err := s.agent.filterChecksWithAuthorizer(authz, &checks); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
agentChecks := make(map[types.CheckID]*structs.HealthCheck)
|
agentChecks := make(map[types.CheckID]*structs.HealthCheck)
|
||||||
|
|
||||||
// Use empty list instead of nil
|
|
||||||
for id, c := range checks {
|
for id, c := range checks {
|
||||||
if c.ServiceTags == nil {
|
if c.ServiceTags == nil {
|
||||||
clone := *c
|
clone := *c
|
||||||
|
@ -490,7 +506,34 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return filter.Execute(agentChecks)
|
raw, err := filter.Execute(agentChecks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
agentChecks = raw.(map[types.CheckID]*structs.HealthCheck)
|
||||||
|
|
||||||
|
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||||
|
// bexpr filter, to ensure total (and the filter-by-acls header we set below)
|
||||||
|
// do not include results that would be filtered out even if the user did have
|
||||||
|
// permission.
|
||||||
|
total := len(agentChecks)
|
||||||
|
if err := s.agent.filterChecksWithAuthorizer(authz, agentChecks); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the X-Consul-Results-Filtered-By-ACLs header, but only if the user is
|
||||||
|
// authenticated (to prevent information leaking).
|
||||||
|
//
|
||||||
|
// This is done automatically for HTTP endpoints that proxy to an RPC endpoint
|
||||||
|
// that sets QueryMeta.ResultsFilteredByACLs, but must be done manually for
|
||||||
|
// agent-local endpoints.
|
||||||
|
//
|
||||||
|
// For more information see the comment on: Server.maskResultsFilteredByACLs.
|
||||||
|
if token != "" {
|
||||||
|
setResultsFilteredByACLs(resp, total != len(agentChecks))
|
||||||
|
}
|
||||||
|
|
||||||
|
return agentChecks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||||
|
@ -547,9 +590,24 @@ func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
total := len(members)
|
||||||
if err := s.agent.filterMembers(token, &members); err != nil {
|
if err := s.agent.filterMembers(token, &members); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the X-Consul-Results-Filtered-By-ACLs header, but only if the user is
|
||||||
|
// authenticated (to prevent information leaking).
|
||||||
|
//
|
||||||
|
// This is done automatically for HTTP endpoints that proxy to an RPC endpoint
|
||||||
|
// that sets QueryMeta.ResultsFilteredByACLs, but must be done manually for
|
||||||
|
// agent-local endpoints.
|
||||||
|
//
|
||||||
|
// For more information see the comment on: Server.maskResultsFilteredByACLs.
|
||||||
|
if token != "" {
|
||||||
|
setResultsFilteredByACLs(resp, total != len(members))
|
||||||
|
}
|
||||||
|
|
||||||
return members, nil
|
return members, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -640,8 +698,15 @@ func (s *HTTPHandlers) AgentForceLeave(resp http.ResponseWriter, req *http.Reque
|
||||||
// Check the value of the prune query
|
// Check the value of the prune query
|
||||||
_, prune := req.URL.Query()["prune"]
|
_, prune := req.URL.Query()["prune"]
|
||||||
|
|
||||||
|
// Check if the WAN is being queried
|
||||||
|
_, wan := req.URL.Query()["wan"]
|
||||||
|
|
||||||
addr := strings.TrimPrefix(req.URL.Path, "/v1/agent/force-leave/")
|
addr := strings.TrimPrefix(req.URL.Path, "/v1/agent/force-leave/")
|
||||||
return nil, s.agent.ForceLeave(addr, prune, entMeta)
|
if wan {
|
||||||
|
return nil, s.agent.ForceLeaveWAN(addr, prune, entMeta)
|
||||||
|
} else {
|
||||||
|
return nil, s.agent.ForceLeave(addr, prune, entMeta)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// syncChanges is a helper function which wraps a blocking call to sync
|
// syncChanges is a helper function which wraps a blocking call to sync
|
||||||
|
@ -664,22 +729,16 @@ func (s *HTTPHandlers) AgentRegisterCheck(resp http.ResponseWriter, req *http.Re
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := decodeBody(req.Body, &args); err != nil {
|
if err := decodeBody(req.Body, &args); err != nil {
|
||||||
resp.WriteHeader(http.StatusBadRequest)
|
return nil, BadRequestError{fmt.Sprintf("Request decode failed: %v", err)}
|
||||||
fmt.Fprintf(resp, "Request decode failed: %v", err)
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify the check has a name.
|
// Verify the check has a name.
|
||||||
if args.Name == "" {
|
if args.Name == "" {
|
||||||
resp.WriteHeader(http.StatusBadRequest)
|
return nil, BadRequestError{"Missing check name"}
|
||||||
fmt.Fprint(resp, "Missing check name")
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if args.Status != "" && !structs.ValidStatus(args.Status) {
|
if args.Status != "" && !structs.ValidStatus(args.Status) {
|
||||||
resp.WriteHeader(http.StatusBadRequest)
|
return nil, BadRequestError{"Bad check status"}
|
||||||
fmt.Fprint(resp, "Bad check status")
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
authz, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &args.EnterpriseMeta, nil)
|
authz, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &args.EnterpriseMeta, nil)
|
||||||
|
@ -698,19 +757,20 @@ func (s *HTTPHandlers) AgentRegisterCheck(resp http.ResponseWriter, req *http.Re
|
||||||
chkType := args.CheckType()
|
chkType := args.CheckType()
|
||||||
err = chkType.Validate()
|
err = chkType.Validate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resp.WriteHeader(http.StatusBadRequest)
|
return nil, BadRequestError{fmt.Sprintf("Invalid check: %v", err)}
|
||||||
fmt.Fprint(resp, fmt.Errorf("Invalid check: %v", err))
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store the type of check based on the definition
|
// Store the type of check based on the definition
|
||||||
health.Type = chkType.Type()
|
health.Type = chkType.Type()
|
||||||
|
|
||||||
if health.ServiceID != "" {
|
if health.ServiceID != "" {
|
||||||
|
cid := health.CompoundServiceID()
|
||||||
// fixup the service name so that vetCheckRegister requires the right ACLs
|
// fixup the service name so that vetCheckRegister requires the right ACLs
|
||||||
service := s.agent.State.Service(health.CompoundServiceID())
|
service := s.agent.State.Service(cid)
|
||||||
if service != nil {
|
if service != nil {
|
||||||
health.ServiceName = service.Service
|
health.ServiceName = service.Service
|
||||||
|
} else {
|
||||||
|
return nil, NotFoundError{fmt.Sprintf("ServiceID %q does not exist", cid.String())}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -746,14 +806,14 @@ func (s *HTTPHandlers) AgentDeregisterCheck(resp http.ResponseWriter, req *http.
|
||||||
|
|
||||||
checkID.Normalize()
|
checkID.Normalize()
|
||||||
|
|
||||||
if err := s.agent.vetCheckUpdateWithAuthorizer(authz, checkID); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !s.validateRequestPartition(resp, &checkID.EnterpriseMeta) {
|
if !s.validateRequestPartition(resp, &checkID.EnterpriseMeta) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := s.agent.vetCheckUpdateWithAuthorizer(authz, checkID); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if err := s.agent.RemoveCheck(checkID, true); err != nil {
|
if err := s.agent.RemoveCheck(checkID, true); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -945,7 +1005,7 @@ func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *htt
|
||||||
}
|
}
|
||||||
notFoundReason := fmt.Sprintf("ServiceId %s not found", sid.String())
|
notFoundReason := fmt.Sprintf("ServiceId %s not found", sid.String())
|
||||||
if returnTextPlain(req) {
|
if returnTextPlain(req) {
|
||||||
return notFoundReason, CodeWithPayloadError{StatusCode: http.StatusNotFound, Reason: notFoundReason, ContentType: "application/json"}
|
return notFoundReason, CodeWithPayloadError{StatusCode: http.StatusNotFound, Reason: notFoundReason, ContentType: "text/plain"}
|
||||||
}
|
}
|
||||||
return &api.AgentServiceChecksInfo{
|
return &api.AgentServiceChecksInfo{
|
||||||
AggregatedStatus: api.HealthCritical,
|
AggregatedStatus: api.HealthCritical,
|
||||||
|
@ -1205,14 +1265,14 @@ func (s *HTTPHandlers) AgentDeregisterService(resp http.ResponseWriter, req *htt
|
||||||
|
|
||||||
sid.Normalize()
|
sid.Normalize()
|
||||||
|
|
||||||
if err := s.agent.vetServiceUpdateWithAuthorizer(authz, sid); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !s.validateRequestPartition(resp, &sid.EnterpriseMeta) {
|
if !s.validateRequestPartition(resp, &sid.EnterpriseMeta) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := s.agent.vetServiceUpdateWithAuthorizer(authz, sid); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if err := s.agent.RemoveService(sid); err != nil {
|
if err := s.agent.RemoveService(sid); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1449,8 +1509,8 @@ func (s *HTTPHandlers) AgentToken(resp http.ResponseWriter, req *http.Request) (
|
||||||
triggerAntiEntropySync = true
|
triggerAntiEntropySync = true
|
||||||
}
|
}
|
||||||
|
|
||||||
case "acl_agent_master_token", "agent_master":
|
case "acl_agent_master_token", "agent_master", "agent_recovery":
|
||||||
s.agent.tokens.UpdateAgentMasterToken(args.Token, token_store.TokenSourceAPI)
|
s.agent.tokens.UpdateAgentRecoveryToken(args.Token, token_store.TokenSourceAPI)
|
||||||
|
|
||||||
case "acl_replication_token", "replication":
|
case "acl_replication_token", "replication":
|
||||||
s.agent.tokens.UpdateReplicationToken(args.Token, token_store.TokenSourceAPI)
|
s.agent.tokens.UpdateReplicationToken(args.Token, token_store.TokenSourceAPI)
|
||||||
|
@ -1499,7 +1559,9 @@ func (s *HTTPHandlers) AgentConnectCARoots(resp http.ResponseWriter, req *http.R
|
||||||
}
|
}
|
||||||
|
|
||||||
// AgentConnectCALeafCert returns the certificate bundle for a service
|
// AgentConnectCALeafCert returns the certificate bundle for a service
|
||||||
// instance. This supports blocking queries to update the returned bundle.
|
// instance. This endpoint ignores all "Cache-Control" attributes.
|
||||||
|
// This supports blocking queries to update the returned bundle.
|
||||||
|
// Non-blocking queries will always verify that the cache entry is still valid.
|
||||||
func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||||
// Get the service name. Note that this is the name of the service,
|
// Get the service name. Note that this is the name of the service,
|
||||||
// not the ID of the service instance.
|
// not the ID of the service instance.
|
||||||
|
@ -1523,6 +1585,14 @@ func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *htt
|
||||||
args.MaxQueryTime = qOpts.MaxQueryTime
|
args.MaxQueryTime = qOpts.MaxQueryTime
|
||||||
args.Token = qOpts.Token
|
args.Token = qOpts.Token
|
||||||
|
|
||||||
|
// TODO(ffmmmm): maybe set MustRevalidate in ConnectCALeafRequest (as part of CacheInfo())
|
||||||
|
// We don't want non-blocking queries to return expired leaf certs
|
||||||
|
// or leaf certs not valid under the current CA. So always revalidate
|
||||||
|
// the leaf cert on non-blocking queries (ie when MinQueryIndex == 0)
|
||||||
|
if args.MinQueryIndex == 0 {
|
||||||
|
args.MustRevalidate = true
|
||||||
|
}
|
||||||
|
|
||||||
if !s.validateRequestPartition(resp, &args.EnterpriseMeta) {
|
if !s.validateRequestPartition(resp, &args.EnterpriseMeta) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
|
@ -214,10 +214,14 @@ func TestAgent_TokenStore(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
a := NewTestAgent(t, `
|
a := NewTestAgent(t, `
|
||||||
acl_token = "user"
|
acl {
|
||||||
acl_agent_token = "agent"
|
tokens {
|
||||||
acl_agent_master_token = "master"`,
|
default = "user"
|
||||||
)
|
agent = "agent"
|
||||||
|
agent_recovery = "recovery"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
defer a.Shutdown()
|
defer a.Shutdown()
|
||||||
|
|
||||||
if got, want := a.tokens.UserToken(), "user"; got != want {
|
if got, want := a.tokens.UserToken(), "user"; got != want {
|
||||||
|
@ -226,7 +230,7 @@ func TestAgent_TokenStore(t *testing.T) {
|
||||||
if got, want := a.tokens.AgentToken(), "agent"; got != want {
|
if got, want := a.tokens.AgentToken(), "agent"; got != want {
|
||||||
t.Fatalf("got %q want %q", got, want)
|
t.Fatalf("got %q want %q", got, want)
|
||||||
}
|
}
|
||||||
if got, want := a.tokens.IsAgentMasterToken("master"), true; got != want {
|
if got, want := a.tokens.IsAgentRecoveryToken("recovery"), true; got != want {
|
||||||
t.Fatalf("got %v want %v", got, want)
|
t.Fatalf("got %v want %v", got, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -295,10 +299,6 @@ func TestAgent_HTTPMaxHeaderBytes(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
ports, err := freeport.Take(1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Cleanup(func() { freeport.Return(ports) })
|
|
||||||
|
|
||||||
caConfig := tlsutil.Config{}
|
caConfig := tlsutil.Config{}
|
||||||
tlsConf, err := tlsutil.NewConfigurator(caConfig, hclog.New(nil))
|
tlsConf, err := tlsutil.NewConfigurator(caConfig, hclog.New(nil))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -312,7 +312,7 @@ func TestAgent_HTTPMaxHeaderBytes(t *testing.T) {
|
||||||
},
|
},
|
||||||
RuntimeConfig: &config.RuntimeConfig{
|
RuntimeConfig: &config.RuntimeConfig{
|
||||||
HTTPAddrs: []net.Addr{
|
HTTPAddrs: []net.Addr{
|
||||||
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[0]},
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: freeport.GetOne(t)},
|
||||||
},
|
},
|
||||||
HTTPMaxHeaderBytes: tt.maxHeaderBytes,
|
HTTPMaxHeaderBytes: tt.maxHeaderBytes,
|
||||||
},
|
},
|
||||||
|
@ -1738,14 +1738,12 @@ func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) {
|
||||||
a := StartTestAgent(t, TestAgent{HCL: cfg})
|
a := StartTestAgent(t, TestAgent{HCL: cfg})
|
||||||
defer a.Shutdown()
|
defer a.Shutdown()
|
||||||
|
|
||||||
testCtx, testCancel := context.WithCancel(context.Background())
|
handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
defer testCancel()
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_, _ = w.Write([]byte("OK\n"))
|
||||||
testHTTPServer, returnPort := launchHTTPCheckServer(t, testCtx)
|
})
|
||||||
defer func() {
|
testHTTPServer := httptest.NewServer(handler)
|
||||||
testHTTPServer.Close()
|
t.Cleanup(testHTTPServer.Close)
|
||||||
returnPort()
|
|
||||||
}()
|
|
||||||
|
|
||||||
registerServicesAndChecks := func(t *testing.T, a *TestAgent) {
|
registerServicesAndChecks := func(t *testing.T, a *TestAgent) {
|
||||||
// add one persistent service with a simple check
|
// add one persistent service with a simple check
|
||||||
|
@ -1850,29 +1848,6 @@ node_name = "` + a.Config.NodeName + `"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func launchHTTPCheckServer(t *testing.T, ctx context.Context) (srv *httptest.Server, returnPortsFn func()) {
|
|
||||||
ports := freeport.MustTake(1)
|
|
||||||
port := ports[0]
|
|
||||||
|
|
||||||
addr := net.JoinHostPort("127.0.0.1", strconv.Itoa(port))
|
|
||||||
|
|
||||||
var lc net.ListenConfig
|
|
||||||
listener, err := lc.Listen(ctx, "tcp", addr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
_, _ = w.Write([]byte("OK\n"))
|
|
||||||
})
|
|
||||||
|
|
||||||
srv = &httptest.Server{
|
|
||||||
Listener: listener,
|
|
||||||
Config: &http.Server{Handler: handler},
|
|
||||||
}
|
|
||||||
srv.Start()
|
|
||||||
return srv, func() { freeport.Return(ports) }
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_AddCheck_Alias(t *testing.T) {
|
func TestAgent_AddCheck_Alias(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("too slow for testing.Short")
|
t.Skip("too slow for testing.Short")
|
||||||
|
@ -4708,14 +4683,12 @@ func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) {
|
||||||
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
gwPort := freeport.MustTake(1)
|
port := freeport.GetOne(t)
|
||||||
defer freeport.Return(gwPort)
|
gwAddr := ipaddr.FormatAddressPort("127.0.0.1", port)
|
||||||
gwAddr := ipaddr.FormatAddressPort("127.0.0.1", gwPort[0])
|
|
||||||
|
|
||||||
// Due to some ordering, we'll have to manually configure these ports in
|
// Due to some ordering, we'll have to manually configure these ports in
|
||||||
// advance.
|
// advance.
|
||||||
secondaryRPCPorts := freeport.MustTake(2)
|
secondaryRPCPorts := freeport.GetN(t, 2)
|
||||||
defer freeport.Return(secondaryRPCPorts)
|
|
||||||
|
|
||||||
a1 := StartTestAgent(t, TestAgent{Name: "bob", HCL: `
|
a1 := StartTestAgent(t, TestAgent{Name: "bob", HCL: `
|
||||||
domain = "consul"
|
domain = "consul"
|
||||||
|
@ -4769,7 +4742,7 @@ func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) {
|
||||||
ID: "mesh-gateway",
|
ID: "mesh-gateway",
|
||||||
Name: "mesh-gateway",
|
Name: "mesh-gateway",
|
||||||
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
||||||
Port: gwPort[0],
|
Port: port,
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -4883,7 +4856,7 @@ func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) {
|
||||||
ID: "mesh-gateway",
|
ID: "mesh-gateway",
|
||||||
Name: "mesh-gateway",
|
Name: "mesh-gateway",
|
||||||
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
||||||
Port: gwPort[0],
|
Port: port,
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -4898,7 +4871,7 @@ func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) {
|
||||||
ID: "mesh-gateway",
|
ID: "mesh-gateway",
|
||||||
Name: "mesh-gateway",
|
Name: "mesh-gateway",
|
||||||
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
||||||
Port: gwPort[0],
|
Port: port,
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -5068,7 +5041,7 @@ func TestAutoConfig_Integration(t *testing.T) {
|
||||||
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig})
|
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig})
|
||||||
defer srv.Shutdown()
|
defer srv.Shutdown()
|
||||||
|
|
||||||
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken))
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
||||||
|
|
||||||
// sign a JWT token
|
// sign a JWT token
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
@ -5115,7 +5088,10 @@ func TestAutoConfig_Integration(t *testing.T) {
|
||||||
// when this is successful we managed to get the gossip key and serf addresses to bind to
|
// when this is successful we managed to get the gossip key and serf addresses to bind to
|
||||||
// and then connect. Additionally we would have to have certificates or else the
|
// and then connect. Additionally we would have to have certificates or else the
|
||||||
// verify_incoming config on the server would not let it work.
|
// verify_incoming config on the server would not let it work.
|
||||||
testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken))
|
testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
||||||
|
|
||||||
|
// spot check that we now have an ACL token
|
||||||
|
require.NotEmpty(t, client.tokens.AgentToken())
|
||||||
|
|
||||||
// grab the existing cert
|
// grab the existing cert
|
||||||
cert1 := client.Agent.tlsConfigurator.Cert()
|
cert1 := client.Agent.tlsConfigurator.Cert()
|
||||||
|
@ -5126,7 +5102,7 @@ func TestAutoConfig_Integration(t *testing.T) {
|
||||||
ca := connect.TestCA(t, nil)
|
ca := connect.TestCA(t, nil)
|
||||||
req := &structs.CARequest{
|
req := &structs.CARequest{
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
|
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
|
||||||
Config: &structs.CAConfiguration{
|
Config: &structs.CAConfiguration{
|
||||||
Provider: "consul",
|
Provider: "consul",
|
||||||
Config: map[string]interface{}{
|
Config: map[string]interface{}{
|
||||||
|
@ -5159,9 +5135,6 @@ func TestAutoConfig_Integration(t *testing.T) {
|
||||||
require.NoError(r, err)
|
require.NoError(r, err)
|
||||||
require.Equal(r, client.Agent.tlsConfigurator.Cert(), &actual)
|
require.Equal(r, client.Agent.tlsConfigurator.Cert(), &actual)
|
||||||
})
|
})
|
||||||
|
|
||||||
// spot check that we now have an ACL token
|
|
||||||
require.NotEmpty(t, client.tokens.AgentToken())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgent_AutoEncrypt(t *testing.T) {
|
func TestAgent_AutoEncrypt(t *testing.T) {
|
||||||
|
@ -5201,7 +5174,7 @@ func TestAgent_AutoEncrypt(t *testing.T) {
|
||||||
srv := StartTestAgent(t, TestAgent{Name: "test-server", HCL: hclConfig})
|
srv := StartTestAgent(t, TestAgent{Name: "test-server", HCL: hclConfig})
|
||||||
defer srv.Shutdown()
|
defer srv.Shutdown()
|
||||||
|
|
||||||
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken))
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
||||||
|
|
||||||
client := StartTestAgent(t, TestAgent{Name: "test-client", HCL: TestACLConfigWithParams(nil) + `
|
client := StartTestAgent(t, TestAgent{Name: "test-client", HCL: TestACLConfigWithParams(nil) + `
|
||||||
bootstrap = false
|
bootstrap = false
|
||||||
|
@ -5224,7 +5197,7 @@ func TestAgent_AutoEncrypt(t *testing.T) {
|
||||||
|
|
||||||
// when this is successful we managed to get a TLS certificate and are using it for
|
// when this is successful we managed to get a TLS certificate and are using it for
|
||||||
// encrypted RPC connections.
|
// encrypted RPC connections.
|
||||||
testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken))
|
testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
||||||
|
|
||||||
// now we need to validate that our certificate has the correct CN
|
// now we need to validate that our certificate has the correct CN
|
||||||
aeCert := client.tlsConfigurator.Cert()
|
aeCert := client.tlsConfigurator.Cert()
|
||||||
|
@ -5281,10 +5254,7 @@ func TestAgent_ListenHTTP_MultipleAddresses(t *testing.T) {
|
||||||
t.Skip("too slow for testing.Short")
|
t.Skip("too slow for testing.Short")
|
||||||
}
|
}
|
||||||
|
|
||||||
ports, err := freeport.Take(2)
|
ports := freeport.GetN(t, 2)
|
||||||
require.NoError(t, err)
|
|
||||||
t.Cleanup(func() { freeport.Return(ports) })
|
|
||||||
|
|
||||||
caConfig := tlsutil.Config{}
|
caConfig := tlsutil.Config{}
|
||||||
tlsConf, err := tlsutil.NewConfigurator(caConfig, hclog.New(nil))
|
tlsConf, err := tlsutil.NewConfigurator(caConfig, hclog.New(nil))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -5351,3 +5321,10 @@ func uniqueAddrs(srvs []apiServer) map[string]struct{} {
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func runStep(t *testing.T, name string, fn func(t *testing.T)) {
|
||||||
|
t.Helper()
|
||||||
|
if !t.Run(name, fn) {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -279,6 +279,7 @@ func (ac *AutoConfig) getInitialConfigurationOnce(ctx context.Context, csr strin
|
||||||
Datacenter: ac.config.Datacenter,
|
Datacenter: ac.config.Datacenter,
|
||||||
Node: ac.config.NodeName,
|
Node: ac.config.NodeName,
|
||||||
Segment: ac.config.SegmentName,
|
Segment: ac.config.SegmentName,
|
||||||
|
Partition: ac.config.PartitionOrEmpty(),
|
||||||
JWT: token,
|
JWT: token,
|
||||||
CSR: csr,
|
CSR: csr,
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package autoconf
|
package autoconf
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package autoconf
|
package autoconf
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package autoconf
|
package autoconf
|
||||||
|
|
|
@ -26,9 +26,12 @@ func translateConfig(c *pbconfig.Config) config.Config {
|
||||||
Datacenter: stringPtrOrNil(c.Datacenter),
|
Datacenter: stringPtrOrNil(c.Datacenter),
|
||||||
PrimaryDatacenter: stringPtrOrNil(c.PrimaryDatacenter),
|
PrimaryDatacenter: stringPtrOrNil(c.PrimaryDatacenter),
|
||||||
NodeName: stringPtrOrNil(c.NodeName),
|
NodeName: stringPtrOrNil(c.NodeName),
|
||||||
// only output the SegmentName in the configuration if its non-empty
|
// only output the SegmentName in the configuration if it's non-empty
|
||||||
// this will avoid a warning later when parsing the persisted configuration
|
// this will avoid a warning later when parsing the persisted configuration
|
||||||
SegmentName: stringPtrOrNil(c.SegmentName),
|
SegmentName: stringPtrOrNil(c.SegmentName),
|
||||||
|
// only output the Partition in the configuration if it's non-empty
|
||||||
|
// this will avoid a warning later when parsing the persisted configuration
|
||||||
|
Partition: stringPtrOrNil(c.Partition),
|
||||||
}
|
}
|
||||||
|
|
||||||
if a := c.AutoEncrypt; a != nil {
|
if a := c.AutoEncrypt; a != nil {
|
||||||
|
@ -62,9 +65,9 @@ func translateConfig(c *pbconfig.Config) config.Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
result.ACL.Tokens = config.Tokens{
|
result.ACL.Tokens = config.Tokens{
|
||||||
Master: stringPtrOrNil(t.Master),
|
InitialManagement: stringPtrOrNil(t.InitialManagement),
|
||||||
|
AgentRecovery: stringPtrOrNil(t.AgentRecovery),
|
||||||
Replication: stringPtrOrNil(t.Replication),
|
Replication: stringPtrOrNil(t.Replication),
|
||||||
AgentMaster: stringPtrOrNil(t.AgentMaster),
|
|
||||||
Default: stringPtrOrNil(t.Default),
|
Default: stringPtrOrNil(t.Default),
|
||||||
Agent: stringPtrOrNil(t.Agent),
|
Agent: stringPtrOrNil(t.Agent),
|
||||||
ManagedServiceProvider: tokens,
|
ManagedServiceProvider: tokens,
|
||||||
|
|
|
@ -69,11 +69,11 @@ func TestTranslateConfig(t *testing.T) {
|
||||||
EnableTokenPersistence: true,
|
EnableTokenPersistence: true,
|
||||||
MSPDisableBootstrap: false,
|
MSPDisableBootstrap: false,
|
||||||
Tokens: &pbconfig.ACLTokens{
|
Tokens: &pbconfig.ACLTokens{
|
||||||
Master: "99e7e490-6baf-43fc-9010-78b6aa9a6813",
|
InitialManagement: "99e7e490-6baf-43fc-9010-78b6aa9a6813",
|
||||||
Replication: "51308d40-465c-4ac6-a636-7c0747edec89",
|
Replication: "51308d40-465c-4ac6-a636-7c0747edec89",
|
||||||
AgentMaster: "e012e1ea-78a2-41cc-bc8b-231a44196f39",
|
AgentRecovery: "e012e1ea-78a2-41cc-bc8b-231a44196f39",
|
||||||
Default: "8781a3f5-de46-4b45-83e1-c92f4cfd0332",
|
Default: "8781a3f5-de46-4b45-83e1-c92f4cfd0332",
|
||||||
Agent: "ddb8f1b0-8a99-4032-b601-87926bce244e",
|
Agent: "ddb8f1b0-8a99-4032-b601-87926bce244e",
|
||||||
ManagedServiceProvider: []*pbconfig.ACLServiceProviderToken{
|
ManagedServiceProvider: []*pbconfig.ACLServiceProviderToken{
|
||||||
{
|
{
|
||||||
AccessorID: "23f37987-7b9e-4e5b-acae-dbc9bc137bae",
|
AccessorID: "23f37987-7b9e-4e5b-acae-dbc9bc137bae",
|
||||||
|
@ -129,11 +129,11 @@ func TestTranslateConfig(t *testing.T) {
|
||||||
EnableKeyListPolicy: boolPointer(true),
|
EnableKeyListPolicy: boolPointer(true),
|
||||||
EnableTokenPersistence: boolPointer(true),
|
EnableTokenPersistence: boolPointer(true),
|
||||||
Tokens: config.Tokens{
|
Tokens: config.Tokens{
|
||||||
Master: stringPointer("99e7e490-6baf-43fc-9010-78b6aa9a6813"),
|
InitialManagement: stringPointer("99e7e490-6baf-43fc-9010-78b6aa9a6813"),
|
||||||
Replication: stringPointer("51308d40-465c-4ac6-a636-7c0747edec89"),
|
AgentRecovery: stringPointer("e012e1ea-78a2-41cc-bc8b-231a44196f39"),
|
||||||
AgentMaster: stringPointer("e012e1ea-78a2-41cc-bc8b-231a44196f39"),
|
Replication: stringPointer("51308d40-465c-4ac6-a636-7c0747edec89"),
|
||||||
Default: stringPointer("8781a3f5-de46-4b45-83e1-c92f4cfd0332"),
|
Default: stringPointer("8781a3f5-de46-4b45-83e1-c92f4cfd0332"),
|
||||||
Agent: stringPointer("ddb8f1b0-8a99-4032-b601-87926bce244e"),
|
Agent: stringPointer("ddb8f1b0-8a99-4032-b601-87926bce244e"),
|
||||||
ManagedServiceProvider: []config.ServiceProviderToken{
|
ManagedServiceProvider: []config.ServiceProviderToken{
|
||||||
{
|
{
|
||||||
AccessorID: stringPointer("23f37987-7b9e-4e5b-acae-dbc9bc137bae"),
|
AccessorID: stringPointer("23f37987-7b9e-4e5b-acae-dbc9bc137bae"),
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package autoconf
|
package autoconf
|
||||||
|
|
|
@ -192,11 +192,12 @@ func (ac *AutoConfig) caRootsRequest() structs.DCSpecificRequest {
|
||||||
|
|
||||||
func (ac *AutoConfig) leafCertRequest() cachetype.ConnectCALeafRequest {
|
func (ac *AutoConfig) leafCertRequest() cachetype.ConnectCALeafRequest {
|
||||||
return cachetype.ConnectCALeafRequest{
|
return cachetype.ConnectCALeafRequest{
|
||||||
Datacenter: ac.config.Datacenter,
|
Datacenter: ac.config.Datacenter,
|
||||||
Agent: ac.config.NodeName,
|
Agent: ac.config.NodeName,
|
||||||
DNSSAN: ac.getDNSSANs(),
|
DNSSAN: ac.getDNSSANs(),
|
||||||
IPSAN: ac.getIPSANs(),
|
IPSAN: ac.getIPSANs(),
|
||||||
Token: ac.acConfig.Tokens.AgentToken(),
|
Token: ac.acConfig.Tokens.AgentToken(),
|
||||||
|
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(ac.config.PartitionOrEmpty()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -380,6 +380,25 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache
|
||||||
return c.generateNewLeaf(reqReal, lastResultWithNewState())
|
return c.generateNewLeaf(reqReal, lastResultWithNewState())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we called Fetch() with MustRevalidate then this call came from a non-blocking query.
|
||||||
|
// Any prior CA rotations should've already expired the cert.
|
||||||
|
// All we need to do is check whether the current CA is the one that signed the leaf. If not, generate a new leaf.
|
||||||
|
// This is not a perfect solution (as a CA rotation update can be missed) but it should take care of instances like
|
||||||
|
// see https://github.com/hashicorp/consul/issues/10871, https://github.com/hashicorp/consul/issues/9862
|
||||||
|
// This seems to me like a hack, so maybe we can revisit the caching/ fetching logic in this case
|
||||||
|
if req.CacheInfo().MustRevalidate {
|
||||||
|
roots, err := c.rootsFromCache()
|
||||||
|
if err != nil {
|
||||||
|
return lastResultWithNewState(), err
|
||||||
|
}
|
||||||
|
if activeRootHasKey(roots, state.authorityKeyID) {
|
||||||
|
return lastResultWithNewState(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we reach here then the current leaf was not signed by the same CAs, just regen
|
||||||
|
return c.generateNewLeaf(reqReal, lastResultWithNewState())
|
||||||
|
}
|
||||||
|
|
||||||
// We are about to block and wait for a change or timeout.
|
// We are about to block and wait for a change or timeout.
|
||||||
|
|
||||||
// Make a chan we can be notified of changes to CA roots on. It must be
|
// Make a chan we can be notified of changes to CA roots on. It must be
|
||||||
|
@ -401,7 +420,7 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache
|
||||||
c.fetchStart(rootUpdateCh)
|
c.fetchStart(rootUpdateCh)
|
||||||
defer c.fetchDone(rootUpdateCh)
|
defer c.fetchDone(rootUpdateCh)
|
||||||
|
|
||||||
// Setup the timeout chan outside the loop so we don't keep bumping the timout
|
// Setup the timeout chan outside the loop so we don't keep bumping the timeout
|
||||||
// later if we loop around.
|
// later if we loop around.
|
||||||
timeoutCh := time.After(opts.Timeout)
|
timeoutCh := time.After(opts.Timeout)
|
||||||
|
|
||||||
|
@ -492,7 +511,7 @@ func (c *ConnectCALeaf) rootsFromCache() (*structs.IndexedCARoots, error) {
|
||||||
|
|
||||||
// generateNewLeaf does the actual work of creating a new private key,
|
// generateNewLeaf does the actual work of creating a new private key,
|
||||||
// generating a CSR and getting it signed by the servers. result argument
|
// generating a CSR and getting it signed by the servers. result argument
|
||||||
// represents the last result currently in cache if any along with it's state.
|
// represents the last result currently in cache if any along with its state.
|
||||||
func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
|
func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
|
||||||
result cache.FetchResult) (cache.FetchResult, error) {
|
result cache.FetchResult) (cache.FetchResult, error) {
|
||||||
|
|
||||||
|
@ -643,14 +662,15 @@ func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
|
||||||
// since this is only used for cache-related requests and not forwarded
|
// since this is only used for cache-related requests and not forwarded
|
||||||
// directly to any Consul servers.
|
// directly to any Consul servers.
|
||||||
type ConnectCALeafRequest struct {
|
type ConnectCALeafRequest struct {
|
||||||
Token string
|
Token string
|
||||||
Datacenter string
|
Datacenter string
|
||||||
Service string // Service name, not ID
|
Service string // Service name, not ID
|
||||||
Agent string // Agent name, not ID
|
Agent string // Agent name, not ID
|
||||||
DNSSAN []string
|
DNSSAN []string
|
||||||
IPSAN []net.IP
|
IPSAN []net.IP
|
||||||
MinQueryIndex uint64
|
MinQueryIndex uint64
|
||||||
MaxQueryTime time.Duration
|
MaxQueryTime time.Duration
|
||||||
|
MustRevalidate bool
|
||||||
|
|
||||||
structs.EnterpriseMeta
|
structs.EnterpriseMeta
|
||||||
}
|
}
|
||||||
|
@ -684,10 +704,11 @@ func (req *ConnectCALeafRequest) TargetPartition() string {
|
||||||
|
|
||||||
func (r *ConnectCALeafRequest) CacheInfo() cache.RequestInfo {
|
func (r *ConnectCALeafRequest) CacheInfo() cache.RequestInfo {
|
||||||
return cache.RequestInfo{
|
return cache.RequestInfo{
|
||||||
Token: r.Token,
|
Token: r.Token,
|
||||||
Key: r.Key(),
|
Key: r.Key(),
|
||||||
Datacenter: r.Datacenter,
|
Datacenter: r.Datacenter,
|
||||||
MinIndex: r.MinQueryIndex,
|
MinIndex: r.MinQueryIndex,
|
||||||
Timeout: r.MaxQueryTime,
|
Timeout: r.MaxQueryTime,
|
||||||
|
MustRevalidate: r.MustRevalidate,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package cachetype
|
package cachetype
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !race
|
||||||
// +build !race
|
// +build !race
|
||||||
|
|
||||||
package cachetype
|
package cachetype
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build race
|
||||||
// +build race
|
// +build race
|
||||||
|
|
||||||
package cachetype
|
package cachetype
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !consulent
|
||||||
// +build !consulent
|
// +build !consulent
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue