diff --git a/.changelog/11245.txt b/.changelog/11245.txt new file mode 100644 index 000000000..aacdb1def --- /dev/null +++ b/.changelog/11245.txt @@ -0,0 +1,3 @@ +```release-note:bug +connect: fix issue with attempting to generate an invalid upstream cluster from UpstreamConfig.Defaults. +``` \ No newline at end of file diff --git a/.changelog/11328.txt b/.changelog/11328.txt deleted file mode 100644 index 637a3876e..000000000 --- a/.changelog/11328.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -config: Allow ${} style interpolation for UI Dashboard template URLs -``` diff --git a/.changelog/11371.txt b/.changelog/11371.txt new file mode 100644 index 000000000..eff8f65a6 --- /dev/null +++ b/.changelog/11371.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Adding support of Consul API Gateway as an external source. +``` \ No newline at end of file diff --git a/.changelog/11417.txt b/.changelog/11417.txt new file mode 100644 index 000000000..019163462 --- /dev/null +++ b/.changelog/11417.txt @@ -0,0 +1,9 @@ +```release-note:improvement +ci: Artifact builds will now only run on merges to the release branches or to `main` +``` +```release-note:improvement +ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64 +``` +```release-note:improvement +ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux] +``` diff --git a/.changelog/11461.txt b/.changelog/11461.txt new file mode 100644 index 000000000..76893d897 --- /dev/null +++ b/.changelog/11461.txt @@ -0,0 +1,3 @@ +```release-note:improvement +config: warn the user if client_addr is empty because client services won't be listening +``` \ No newline at end of file diff --git a/.changelog/11472.txt b/.changelog/11472.txt new file mode 100644 index 000000000..24624d633 --- /dev/null +++ b/.changelog/11472.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: **(Enterprise only)** When no namespace is selected, make sure to default to the tokens default namespace when requesting permissions +``` diff --git a/.changelog/11474.txt b/.changelog/11474.txt new file mode 100644 index 000000000..486db5630 --- /dev/null +++ b/.changelog/11474.txt @@ -0,0 +1,6 @@ +```release-note:bug +ui: code editor styling (layout consistency + wide screen support) +``` +```release-note:improvement +ui: added copy to clipboard button in code editor toolbars +``` diff --git a/.changelog/11475.txt b/.changelog/11475.txt new file mode 100644 index 000000000..ec28fc2e5 --- /dev/null +++ b/.changelog/11475.txt @@ -0,0 +1,4 @@ +```release-note:bug +ui: Filter the global intentions list by the currently selected parition rather +than a wildcard +``` diff --git a/.changelog/11479.txt b/.changelog/11479.txt new file mode 100644 index 000000000..3da6e7feb --- /dev/null +++ b/.changelog/11479.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: When switching partitions reset the namespace back to the tokens default namespace or default +``` diff --git a/.changelog/11480.txt b/.changelog/11480.txt new file mode 100644 index 000000000..d5badc405 --- /dev/null +++ b/.changelog/11480.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Add support for iptable rules that allow DNS lookup redirection to Consul DNS. +``` \ No newline at end of file diff --git a/.changelog/_1253.txt b/.changelog/11491.txt similarity index 100% rename from .changelog/_1253.txt rename to .changelog/11491.txt diff --git a/.changelog/11505.txt b/.changelog/11505.txt new file mode 100644 index 000000000..4f8ca86bf --- /dev/null +++ b/.changelog/11505.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update UI browser support to 'roughly ~2 years back' +``` diff --git a/.changelog/11514.txt b/.changelog/11514.txt new file mode 100644 index 000000000..d869f46ab --- /dev/null +++ b/.changelog/11514.txt @@ -0,0 +1,6 @@ +```release-note:improvement +connect/ca: Return an error when querying roots from uninitialized CA. +``` +```release-note:bug +connect/ca: Allow secondary initialization to resume after being deferred due to unreachable or incompatible primary DC servers. +``` \ No newline at end of file diff --git a/.changelog/11520.txt b/.changelog/11520.txt new file mode 100644 index 000000000..d8532bc6c --- /dev/null +++ b/.changelog/11520.txt @@ -0,0 +1,4 @@ +```release-note:bug +ui: Revert to depending on the backend, 'post-user-action', to report +permissions errors rather than using UI capabilities 'pre-user-action' +``` diff --git a/.changelog/11522.txt b/.changelog/11522.txt new file mode 100644 index 000000000..dc03a5e40 --- /dev/null +++ b/.changelog/11522.txt @@ -0,0 +1,3 @@ +```release-note:bug +xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection +``` diff --git a/.changelog/11541.txt b/.changelog/11541.txt new file mode 100644 index 000000000..6a8617dba --- /dev/null +++ b/.changelog/11541.txt @@ -0,0 +1,3 @@ +```release-note:improvement +partitions: Prevent writing partition-exports entries to secondary DCs. +``` \ No newline at end of file diff --git a/.changelog/11556.txt b/.changelog/11556.txt new file mode 100644 index 000000000..1b7fd6037 --- /dev/null +++ b/.changelog/11556.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add upstream icons for upstreams and upstream instances +``` diff --git a/.changelog/11566.txt b/.changelog/11566.txt new file mode 100644 index 000000000..460617da5 --- /dev/null +++ b/.changelog/11566.txt @@ -0,0 +1,3 @@ +```release-note:improvement +connect: **(Enterprise only)** Allow ingress gateways to target services in another partition +``` \ No newline at end of file diff --git a/.changelog/11569.txt b/.changelog/11569.txt new file mode 100644 index 000000000..84657f984 --- /dev/null +++ b/.changelog/11569.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +api: responses that contain only a partial subset of results, due to filtering by ACL policies, may now include an `X-Consul-Results-Filtered-By-ACLs` header +``` diff --git a/.changelog/11573.txt b/.changelog/11573.txt new file mode 100644 index 000000000..de7840d84 --- /dev/null +++ b/.changelog/11573.txt @@ -0,0 +1,5 @@ +```release-note:improvement +connect: Support Vault auth methods for the Connect CA Vault provider. Currently, we support any non-deprecated auth methods +the latest version of Vault supports (v1.8.5), which include AppRole, AliCloud, AWS, Azure, Cloud Foundry, GitHub, Google Cloud, +JWT/OIDC, Kerberos, Kubernetes, LDAP, Oracle Cloud Infrastructure, Okta, Radius, TLS Certificates, and Username & Password. +``` \ No newline at end of file diff --git a/.changelog/11577.txt b/.changelog/11577.txt new file mode 100644 index 000000000..14fe7b014 --- /dev/null +++ b/.changelog/11577.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update global notification styling +``` diff --git a/.changelog/11578.txt b/.changelog/11578.txt new file mode 100644 index 000000000..76f66adb9 --- /dev/null +++ b/.changelog/11578.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix inline-code brand styling +``` diff --git a/.changelog/11585.txt b/.changelog/11585.txt new file mode 100644 index 000000000..563a86aa6 --- /dev/null +++ b/.changelog/11585.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: ensure new partition fields are omit empty for compatibility with older versions of consul +``` diff --git a/.changelog/11586.txt b/.changelog/11586.txt new file mode 100644 index 000000000..dd519ba29 --- /dev/null +++ b/.changelog/11586.txt @@ -0,0 +1,6 @@ +```release-note:bug +windows: fixes arm and arm64 builds +``` +```release-note:bug +macos: fixes building with a non-Apple LLVM (such as installed via Homebrew) +``` diff --git a/.changelog/11591.txt b/.changelog/11591.txt new file mode 100644 index 000000000..5efad8024 --- /dev/null +++ b/.changelog/11591.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Ensure the UI stores the default partition for the users token +``` diff --git a/.changelog/11604.txt b/.changelog/11604.txt new file mode 100644 index 000000000..d3b585e73 --- /dev/null +++ b/.changelog/11604.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add partition support for SSO +``` diff --git a/.changelog/11607.txt b/.changelog/11607.txt new file mode 100644 index 000000000..2bec10457 --- /dev/null +++ b/.changelog/11607.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: **(Enterprise only)** purge service/check registration files for incorrect partitions on reload +``` diff --git a/.changelog/11640.txt b/.changelog/11640.txt new file mode 100644 index 000000000..87d491673 --- /dev/null +++ b/.changelog/11640.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Include `Service.Namespace` into available variables for `dashboard_url_templates` +``` diff --git a/.changelog/11645.txt b/.changelog/11645.txt new file mode 100644 index 000000000..845795327 --- /dev/null +++ b/.changelog/11645.txt @@ -0,0 +1,3 @@ +```release-note:improvement +types: add TLSVersion and TLSCipherSuite +``` diff --git a/.changelog/11654.txt b/.changelog/11654.txt new file mode 100644 index 000000000..6a88f2c5d --- /dev/null +++ b/.changelog/11654.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Include `Service.Partition` into available variables for `dashboard_url_templates` +``` diff --git a/.changelog/11656.txt b/.changelog/11656.txt new file mode 100644 index 000000000..10dc47285 --- /dev/null +++ b/.changelog/11656.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Don't offer a 'Valid Datacenters' option when editing policies for non-default partitions +``` diff --git a/.changelog/11665.txt b/.changelog/11665.txt new file mode 100644 index 000000000..f4812a107 --- /dev/null +++ b/.changelog/11665.txt @@ -0,0 +1,3 @@ +```release-note:deprecation +config: `acl.tokens.master` has been renamed to `acl.tokens.initial_management`, and `acl.tokens.agent_master` has been renamed to `acl.tokens.agent_recovery` - the old field names are now deprecated and will be removed in a future major release +``` diff --git a/.changelog/11666.txt b/.changelog/11666.txt new file mode 100644 index 000000000..65080a068 --- /dev/null +++ b/.changelog/11666.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Upgrade Lock Sessions to use partitions +``` diff --git a/.changelog/11668.txt b/.changelog/11668.txt new file mode 100644 index 000000000..314bbfe57 --- /dev/null +++ b/.changelog/11668.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Add documentation link to Partition empty state +``` diff --git a/.changelog/11669.txt b/.changelog/11669.txt new file mode 100644 index 000000000..9cd63035e --- /dev/null +++ b/.changelog/11669.txt @@ -0,0 +1,6 @@ +```release-note:deprecation +api: `/v1/agent/token/agent_master` is deprecated and will be removed in a future major release - use `/v1/agent/token/agent_recovery` instead +``` +```release-note:breaking-change +cli: `consul acl set-agent-token master` has been replaced with `consul acl set-agent-token recovery` +``` diff --git a/.changelog/11670.txt b/.changelog/11670.txt new file mode 100644 index 000000000..f8f4fa5b6 --- /dev/null +++ b/.changelog/11670.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix visual issue with slight table header overflow +``` diff --git a/.changelog/11671.txt b/.changelog/11671.txt new file mode 100644 index 000000000..0f97366fb --- /dev/null +++ b/.changelog/11671.txt @@ -0,0 +1,3 @@ +```release-note:bug +ca: fixes a bug that caused the intermediate cert used to sign leaf certs to be missing from the /connect/ca/roots API response when the Vault provider was used. +``` diff --git a/.changelog/11672.txt b/.changelog/11672.txt new file mode 100644 index 000000000..d14f74b16 --- /dev/null +++ b/.changelog/11672.txt @@ -0,0 +1,3 @@ +```release-note:bug +ca: fixes a bug that caused the SigningKeyID to be wrong in the primary DC, when the Vault provider is used, after a CA config creates a new root. +``` diff --git a/.changelog/11679.txt b/.changelog/11679.txt new file mode 100644 index 000000000..daf39eb84 --- /dev/null +++ b/.changelog/11679.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Adds support for partitions to the Routing visualization. +``` diff --git a/.changelog/11680.txt b/.changelog/11680.txt new file mode 100644 index 000000000..57617f706 --- /dev/null +++ b/.changelog/11680.txt @@ -0,0 +1,3 @@ +```release-note:improvement +server: block enterprise-specific partition-exports config entry from being used in OSS Consul. +``` \ No newline at end of file diff --git a/.changelog/11681.txt b/.changelog/11681.txt new file mode 100644 index 000000000..4d7c7cbae --- /dev/null +++ b/.changelog/11681.txt @@ -0,0 +1,4 @@ +```release-note:bug +ui: Fixes an issue where under some circumstances after logging we present the +data loaded previous to you logging in. +``` diff --git a/.changelog/11693.txt b/.changelog/11693.txt new file mode 100644 index 000000000..1088a24c5 --- /dev/null +++ b/.changelog/11693.txt @@ -0,0 +1,3 @@ +```release-note:bug +ca: fixes a bug that caused non blocking leaf cert queries to return the same cached response regardless of ca rotation or leaf cert expiry +``` \ No newline at end of file diff --git a/.changelog/11696.txt b/.changelog/11696.txt new file mode 100644 index 000000000..5723a5a74 --- /dev/null +++ b/.changelog/11696.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Adds support for partitions to Service and Node Identity template visuals. +``` diff --git a/.changelog/11699.txt b/.changelog/11699.txt new file mode 100644 index 000000000..32949238b --- /dev/null +++ b/.changelog/11699.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auto-config: ensure the feature works properly with partitions +``` diff --git a/.changelog/11702.txt b/.changelog/11702.txt new file mode 100644 index 000000000..bd3b4f239 --- /dev/null +++ b/.changelog/11702.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Adds basic support for showing Services exported from another partition. +``` diff --git a/.changelog/11720.txt b/.changelog/11720.txt new file mode 100644 index 000000000..9c141b3c7 --- /dev/null +++ b/.changelog/11720.txt @@ -0,0 +1,11 @@ +```release-note:improvement +raft: Use bbolt instead of the legacy boltdb implementation +``` + +```release-note:improvement +raft: Emit boltdb related performance metrics +``` + +```release-note:improvement +raft: Added a configuration to disable boltdb freelist syncing +``` diff --git a/.changelog/11722.txt b/.changelog/11722.txt new file mode 100644 index 000000000..e0fcec47e --- /dev/null +++ b/.changelog/11722.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: add variation of force-leave that exclusively works on the WAN +``` diff --git a/.changelog/11724.txt b/.changelog/11724.txt new file mode 100644 index 000000000..61c22a60f --- /dev/null +++ b/.changelog/11724.txt @@ -0,0 +1,3 @@ +```release-note:improvement +connect: Consul will now generate a unique virtual IP for each connect-enabled service (this will also differ across namespace/partition in Enterprise). +``` diff --git a/.changelog/11725.txt b/.changelog/11725.txt new file mode 100644 index 000000000..f66bfb91d --- /dev/null +++ b/.changelog/11725.txt @@ -0,0 +1,3 @@ +```release-note:improvement +dns: Added a `virtual` endpoint for querying the assigned virtual IP for a service. +``` diff --git a/.changelog/11737.txt b/.changelog/11737.txt new file mode 100644 index 000000000..4c4addb2a --- /dev/null +++ b/.changelog/11737.txt @@ -0,0 +1,3 @@ +```release-note:improvement +partitions: **(Enterprise only)** rename APIs, commands, and public types to use "partition" rather than "admin partition". +``` \ No newline at end of file diff --git a/.changelog/11738.txt b/.changelog/11738.txt new file mode 100644 index 000000000..6584863e4 --- /dev/null +++ b/.changelog/11738.txt @@ -0,0 +1,3 @@ +```release-note:improvement +connect: **(Enterprise only)** add support for cross-partition transparent proxying. +``` \ No newline at end of file diff --git a/.changelog/11739.txt b/.changelog/11739.txt new file mode 100644 index 000000000..6040dddf7 --- /dev/null +++ b/.changelog/11739.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: **(Enterprise Only)** rename partition-exports config entry to exported-services. +``` \ No newline at end of file diff --git a/.changelog/11744.txt b/.changelog/11744.txt new file mode 100644 index 000000000..2a9d3cafd --- /dev/null +++ b/.changelog/11744.txt @@ -0,0 +1,3 @@ +```release-note:note +Renamed the `agent_master` field to `agent_recovery` in the `acl-tokens.json` file in which tokens are persisted on-disk (when `acl.enable_token_persistence` is enabled) +``` diff --git a/.changelog/11748.txt b/.changelog/11748.txt new file mode 100644 index 000000000..8917ed93f --- /dev/null +++ b/.changelog/11748.txt @@ -0,0 +1,3 @@ +```release-note:bug +areas: **(Enterprise only)** make the gRPC server tracker network area aware +``` diff --git a/.changelog/11757.txt b/.changelog/11757.txt new file mode 100644 index 000000000..897fa1fcc --- /dev/null +++ b/.changelog/11757.txt @@ -0,0 +1,3 @@ +```release-note:improvement +connect: **(Enterprise only)** add support for targeting partitions in discovery chain routes, splits, and redirects. +``` \ No newline at end of file diff --git a/.changelog/_1202.txt b/.changelog/_1202.txt new file mode 100644 index 000000000..8b6e0da52 --- /dev/null +++ b/.changelog/_1202.txt @@ -0,0 +1,3 @@ +```release-note:feature +partitions: **(Enterprise only)** segment serf LAN gossip between nodes in different partitions +``` diff --git a/.changelog/_1238.txt b/.changelog/_1238.txt new file mode 100644 index 000000000..79cb4b142 --- /dev/null +++ b/.changelog/_1238.txt @@ -0,0 +1,3 @@ +```release-note:security +namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805. +``` diff --git a/.changelog/_1321.txt b/.changelog/_1321.txt new file mode 100644 index 000000000..74ae5b23b --- /dev/null +++ b/.changelog/_1321.txt @@ -0,0 +1,3 @@ +```release-note:bug +snapshot: **(Enterprise only)** snapshot agent no longer attempts to refresh its license from the server when a local license is provided (i.e. via config or an environment variable) +``` diff --git a/.changelog/_1328.txt b/.changelog/_1328.txt new file mode 100644 index 000000000..2e931517f --- /dev/null +++ b/.changelog/_1328.txt @@ -0,0 +1,3 @@ +```release-note:bug +snapshot: **(Enterprise only)** fixed a bug where the snapshot agent would ignore the `license_path` setting in config files +``` diff --git a/.changelog/_1330.txt b/.changelog/_1330.txt new file mode 100644 index 000000000..2092c555f --- /dev/null +++ b/.changelog/_1330.txt @@ -0,0 +1,3 @@ +```release-note:bug +acl: **(Enterprise only)** fix namespace and namespace_prefix policy evaluation when both govern an authz request +``` diff --git a/.changelog/_1342.txt b/.changelog/_1342.txt new file mode 100644 index 000000000..7edca2afc --- /dev/null +++ b/.changelog/_1342.txt @@ -0,0 +1,3 @@ +```release-note:improvement +namespaces: **(Enterprise only)** policy and role defaults can reference policies in any namespace in the same partition by ID +``` diff --git a/.changelog/_1368.txt b/.changelog/_1368.txt new file mode 100644 index 000000000..68e3b396d --- /dev/null +++ b/.changelog/_1368.txt @@ -0,0 +1,3 @@ +```release-note:bug +areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time. +``` diff --git a/.changelog/_1391.txt b/.changelog/_1391.txt new file mode 100644 index 000000000..f1dbe4909 --- /dev/null +++ b/.changelog/_1391.txt @@ -0,0 +1,3 @@ +```release-note:feature +partitions: **(Enterprise only)** Ensure partitions and serf-based WAN federation are mutually exclusive. +``` diff --git a/.circleci/config.yml b/.circleci/config.yml index c4acbb88f..fcdbea518 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ references: test-results: &TEST_RESULTS_DIR /tmp/test-results cache: - yarn: &YARN_CACHE_KEY consul-ui-v5-{{ checksum "ui/yarn.lock" }} + yarn: &YARN_CACHE_KEY consul-ui-v6-{{ checksum "ui/yarn.lock" }} rubygem: &RUBYGEM_CACHE_KEY static-site-gems-v1-{{ checksum "Gemfile.lock" }} environment: &ENVIRONMENT @@ -113,7 +113,7 @@ jobs: - image: *GOLANG_IMAGE steps: - checkout - - run: go get -u github.com/hashicorp/lint-consul-retry && lint-consul-retry + - run: go install github.com/hashicorp/lint-consul-retry@master && lint-consul-retry - run: *notify-slack-failure lint: @@ -184,9 +184,8 @@ jobs: name: Install gogo/protobuf command: | gogo_version=$(go list -m github.com/gogo/protobuf | awk '{print $2}') - mkdir -p .gotools; cd .gotools; go mod init consul-tools - go get -v github.com/hashicorp/protoc-gen-go-binary - go get -v github.com/gogo/protobuf/protoc-gen-gofast@${gogo_version} + go install -v github.com/hashicorp/protoc-gen-go-binary@master + go install -v github.com/gogo/protobuf/protoc-gen-gofast@${gogo_version} - run: command: make --always-make proto diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 0f39f949d..2efc5faff 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,9 +3,9 @@ contact_links: - name: Consul Community Support url: https://discuss.hashicorp.com/c/consul/29 about: If you have a question, or are looking for advice, please post on our Discuss forum! The community loves to chime in to help. Happy Coding! - - name: Consul-Helm GitHub Issues - url: https://github.com/hashicorp/consul-helm - about: Are you submitting an issue or feature enhancement for the Consul helm chart? Please post in the Consul-Helm GitHub Issues. + - name: Consul on Kubernetes GitHub Issues + url: https://github.com/hashicorp/consul-k8s + about: Are you submitting an issue or feature enhancement for the Consul Helm chart? Please post in the consul-k8s GitHub Issues. - name: Consul Learn Tracks url: https://learn.hashicorp.com/consul?track=getting-started#getting-started - about: Please check out our Learn Guides. These hands on guides deal with many of the tasks common to using Consul \ No newline at end of file + about: Please check out our Learn Guides. These hands on guides deal with many of the tasks common to using Consul diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 8fdc6c8ff..6a0a7fd0f 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,30 +3,55 @@ updates: - package-ecosystem: gomod open-pull-requests-limit: 5 directory: "/" + labels: + - "go" + - "dependencies" + - "pr/no-changelog" schedule: interval: daily - package-ecosystem: gomod open-pull-requests-limit: 5 directory: "/api" + labels: + - "go" + - "dependencies" + - "pr/no-changelog" schedule: interval: daily - package-ecosystem: gomod open-pull-requests-limit: 5 directory: "/sdk" + labels: + - "go" + - "dependencies" + - "pr/no-changelog" schedule: interval: daily - package-ecosystem: npm open-pull-requests-limit: 5 directory: "/ui" + labels: + - "javascript" + - "dependencies" + - "pr/no-changelog" schedule: interval: daily - package-ecosystem: npm open-pull-requests-limit: 5 directory: "/website" + labels: + - "javascript" + - "dependencies" + - "type/docs-cherrypick" + - "pr/no-changelog" schedule: interval: daily - package-ecosystem: github-actions open-pull-requests-limit: 5 directory: / + labels: + - "github_actions" + - "dependencies" + - "pr/no-changelog" schedule: - interval: daily \ No newline at end of file + interval: daily diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4d404a37f..c7ac47a8f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -3,7 +3,7 @@ name: build on: push: # Sequence of patterns matched against refs/heads - branches: [ crt-release-migration-1.11.x ] + branches: [ main ] env: PKG_NAME: consul diff --git a/.release/ci.hcl b/.release/ci.hcl index b18242104..b248590b8 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -97,8 +97,21 @@ event "sign" { } } -event "verify" { +event "sign-linux-rpms" { depends = ["sign"] + action "sign-linux-rpms" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "sign-linux-rpms" + } + + notification { + on = "fail" + } +} + +event "verify" { + depends = ["sign-linux-rpms"] action "verify" { organization = "hashicorp" repository = "crt-workflows-common" diff --git a/CHANGELOG.md b/CHANGELOG.md index 4da8e90f7..5032e6b73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,141 @@ -## UNRELEASED +## 1.11.0-beta3 (November 17, 2021) + +SECURITY: + +* agent: Use SHA256 instead of MD5 to generate persistence file names. [[GH-11491](https://github.com/hashicorp/consul/issues/11491)] +* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805. + +FEATURES: + +* ca: Add a configurable TTL for Connect CA root certificates. The configuration is supported by the Vault and Consul providers. [[GH-11428](https://github.com/hashicorp/consul/issues/11428)] +* ca: Add a configurable TTL to the AWS ACM Private CA provider root certificate. [[GH-11449](https://github.com/hashicorp/consul/issues/11449)] +* health-checks: add support for h2c in http2 ping health checks [[GH-10690](https://github.com/hashicorp/consul/issues/10690)] +* partitions: **(Enterprise only)** segment serf LAN gossip between nodes in different partitions +* ui: Adding support of Consul API Gateway as an external source. [[GH-11371](https://github.com/hashicorp/consul/issues/11371)] +* ui: Topology - New views for scenarios where no dependencies exist or ACLs are disabled [[GH-11280](https://github.com/hashicorp/consul/issues/11280)] + +IMPROVEMENTS: + +* ci: Artifact builds will now only run on merges to the release branches or to `main` [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64 [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux] [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* config: warn the user if client_addr is empty because client services won't be listening [[GH-11461](https://github.com/hashicorp/consul/issues/11461)] +* connect/ca: Return an error when querying roots from uninitialized CA. [[GH-11514](https://github.com/hashicorp/consul/issues/11514)] +* connect: **(Enterprise only)** Allow ingress gateways to target services in another partition [[GH-11566](https://github.com/hashicorp/consul/issues/11566)] +* connect: add Namespace configuration setting for Vault CA provider [[GH-11477](https://github.com/hashicorp/consul/issues/11477)] +* namespaces: **(Enterprise only)** policy and role defaults can reference policies in any namespace in the same partition by ID +* partitions: Prevent writing partition-exports entries to secondary DCs. [[GH-11541](https://github.com/hashicorp/consul/issues/11541)] +* sdk: Add support for iptable rules that allow DNS lookup redirection to Consul DNS. [[GH-11480](https://github.com/hashicorp/consul/issues/11480)] +* segments: **(Enterprise only)** ensure that the serf_lan_allowed_cidrs applies to network segments [[GH-11495](https://github.com/hashicorp/consul/issues/11495)] +* ui: Add upstream icons for upstreams and upstream instances [[GH-11556](https://github.com/hashicorp/consul/issues/11556)] +* ui: Update UI browser support to 'roughly ~2 years back' [[GH-11505](https://github.com/hashicorp/consul/issues/11505)] +* ui: When switching partitions reset the namespace back to the tokens default namespace or default [[GH-11479](https://github.com/hashicorp/consul/issues/11479)] +* ui: added copy to clipboard button in code editor toolbars [[GH-11474](https://github.com/hashicorp/consul/issues/11474)] BUG FIXES: +* acl: **(Enterprise only)** fix namespace and namespace_prefix policy evaluation when both govern an authz request +* api: ensure new partition fields are omit empty for compatibility with older versions of consul [[GH-11585](https://github.com/hashicorp/consul/issues/11585)] +* connect/ca: Allow secondary initialization to resume after being deferred due to unreachable or incompatible primary DC servers. [[GH-11514](https://github.com/hashicorp/consul/issues/11514)] +* connect: fix issue with attempting to generate an invalid upstream cluster from UpstreamConfig.Defaults. [[GH-11245](https://github.com/hashicorp/consul/issues/11245)] +* macos: fixes building with a non-Apple LLVM (such as installed via Homebrew) [[GH-11586](https://github.com/hashicorp/consul/issues/11586)] +* namespaces: **(Enterprise only)** ensure the namespace replicator doesn't replicate deleted namespaces +* partitions: **(Enterprise only)** fix panic when forwarding delete operations to the leader +* snapshot: **(Enterprise only)** fixed a bug where the snapshot agent would ignore the `license_path` setting in config files +* snapshot: **(Enterprise only)** snapshot agent no longer attempts to refresh its license from the server when a local license is provided (i.e. via config or an environment variable) +* state: **(Enterprise Only)** ensure partition delete triggers namespace deletes +* ui: **(Enterprise only)** When no namespace is selected, make sure to default to the tokens default namespace when requesting permissions [[GH-11472](https://github.com/hashicorp/consul/issues/11472)] +* ui: Ensure the UI stores the default partition for the users token [[GH-11591](https://github.com/hashicorp/consul/issues/11591)] +* ui: Ensure we check intention permissions for specific services when deciding +whether to show action buttons for per service intention actions [[GH-11409](https://github.com/hashicorp/consul/issues/11409)] +* ui: Filter the global intentions list by the currently selected parition rather +than a wildcard [[GH-11475](https://github.com/hashicorp/consul/issues/11475)] +* ui: Revert to depending on the backend, 'post-user-action', to report +permissions errors rather than using UI capabilities 'pre-user-action' [[GH-11520](https://github.com/hashicorp/consul/issues/11520)] +* ui: code editor styling (layout consistency + wide screen support) [[GH-11474](https://github.com/hashicorp/consul/issues/11474)] +* windows: fixes arm and arm64 builds [[GH-11586](https://github.com/hashicorp/consul/issues/11586)] +* xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection [[GH-11522](https://github.com/hashicorp/consul/issues/11522)] + +## 1.11.0-beta2 (November 02, 2021) + +BREAKING CHANGES: + +* acl: The legacy ACL system that was deprecated in Consul 1.4.0 has been removed. Before upgrading you should verify that nothing is still using the legacy ACL system. See the [Migrate Legacy ACL Tokens Learn Guide](https://learn.hashicorp.com/tutorials/consul/access-control-token-migration) for more information. [[GH-11232](https://github.com/hashicorp/consul/issues/11232)] + +IMPROVEMENTS: + +* agent: for various /v1/agent endpoints parse the partition parameter on the request [[GH-11444](https://github.com/hashicorp/consul/issues/11444)] +* agent: refactor the agent delegate interface to be partition friendly [[GH-11429](https://github.com/hashicorp/consul/issues/11429)] +* cli: Add `-cas` and `-modify-index` flags to the `consul config delete` command to support Check-And-Set (CAS) deletion of config entries [[GH-11419](https://github.com/hashicorp/consul/issues/11419)] +* cli: update consul members output to display partitions and sort the results usefully [[GH-11446](https://github.com/hashicorp/consul/issues/11446)] +* config: Allow ${} style interpolation for UI Dashboard template URLs [[GH-11328](https://github.com/hashicorp/consul/issues/11328)] +* config: Support Check-And-Set (CAS) deletion of config entries [[GH-11419](https://github.com/hashicorp/consul/issues/11419)] +* connect: **(Enterprise only)** add support for dialing upstreams in remote partitions through mesh gateways. [[GH-11431](https://github.com/hashicorp/consul/issues/11431)] +* connect: **(Enterprise only)** updates ServiceRead and NodeRead to account for the partition-exports config entry. [[GH-11433](https://github.com/hashicorp/consul/issues/11433)] +* connect: ingress gateways may now enable built-in TLS for a subset of listeners. [[GH-11163](https://github.com/hashicorp/consul/issues/11163)] +* connect: service-resolver subset filters are validated for valid go-bexpr syntax on write [[GH-11293](https://github.com/hashicorp/consul/issues/11293)] +* connect: update supported envoy versions to 1.20.0, 1.19.1, 1.18.4, 1.17.4 [[GH-11277](https://github.com/hashicorp/consul/issues/11277)] + +DEPRECATIONS: + +* tls: With the upgrade to Go 1.17, the ordering of `tls_cipher_suites` will no longer be honored, and `tls_prefer_server_cipher_suites` is now ignored. [[GH-11364](https://github.com/hashicorp/consul/issues/11364)] + +BUG FIXES: + +* api: fixed backwards compatibility issue with AgentService SocketPath field. [[GH-11318](https://github.com/hashicorp/consul/issues/11318)] +* dns: Fixed an issue where on DNS requests made with .alt_domain response was returned as .domain [[GH-11348](https://github.com/hashicorp/consul/issues/11348)] +* raft: do not trigger an election if not part of the servers list. [[GH-11375](https://github.com/hashicorp/consul/issues/11375)] +* rpc: only attempt to authorize the DNSName in the client cert when verify_incoming_rpc=true [[GH-11255](https://github.com/hashicorp/consul/issues/11255)] +* telemetry: fixes a bug with Prometheus consul_autopilot_failure_tolerance metric where 0 is reported instead of NaN on follower servers. [[GH-11399](https://github.com/hashicorp/consul/issues/11399)] +* ui: Ensure dc selector correctly shows the currently selected dc [[GH-11380](https://github.com/hashicorp/consul/issues/11380)] +* ui: Ensure we filter tokens by policy when showing which tokens use a certain +policy whilst editing a policy [[GH-11311](https://github.com/hashicorp/consul/issues/11311)] + +## 1.11.0-beta1 (October 15, 2021) + +FEATURES: + +* partitions: allow for partition queries to be forwarded [[GH-11099](https://github.com/hashicorp/consul/issues/11099)] +* sso/oidc: **(Enterprise only)** Add support for providing acr_values in OIDC auth flow [[GH-11026](https://github.com/hashicorp/consul/issues/11026)] +* ui: Added initial support for admin partition CRUD [[GH-11188](https://github.com/hashicorp/consul/issues/11188)] + +IMPROVEMENTS: + +* api: add partition field to acl structs [[GH-11080](https://github.com/hashicorp/consul/issues/11080)] +* audit-logging: **(Enterprise Only)** Audit logs will now include select HTTP headers in each logs payload. Those headers are: `Forwarded`, `Via`, `X-Forwarded-For`, `X-Forwarded-Host` and `X-Forwarded-Proto`. [[GH-11107](https://github.com/hashicorp/consul/issues/11107)] +* connect: Add low-level feature to allow an Ingress to retrieve TLS certificates from SDS. [[GH-10903](https://github.com/hashicorp/consul/issues/10903)] +* connect: update supported envoy versions to 1.19.1, 1.18.4, 1.17.4, 1.16.5 [[GH-11115](https://github.com/hashicorp/consul/issues/11115)] +* state: reads of partitions now accept an optional memdb.WatchSet +* telemetry: Add new metrics for the count of KV entries in the Consul store. [[GH-11090](https://github.com/hashicorp/consul/issues/11090)] +* telemetry: Add new metrics for the count of connect service instances and configuration entries. [[GH-11222](https://github.com/hashicorp/consul/issues/11222)] +* ui: Add initial support for partitions to intentions [[GH-11129](https://github.com/hashicorp/consul/issues/11129)] +* ui: Add uri guard to prevent future URL encoding issues [[GH-11117](https://github.com/hashicorp/consul/issues/11117)] +* ui: Move the majority of our SASS variables to use native CSS custom +properties [[GH-11200](https://github.com/hashicorp/consul/issues/11200)] +* ui: Removed informational panel from the namespace selector menu when editing +namespaces [[GH-11130](https://github.com/hashicorp/consul/issues/11130)] + +BUG FIXES: + +* acl: **(Enterprise only)** Fix bug in 'consul members' filtering with partitions. [[GH-11263](https://github.com/hashicorp/consul/issues/11263)] +* acl: **(Enterprise only)** ensure that auth methods with namespace rules work with partitions [[GH-11323](https://github.com/hashicorp/consul/issues/11323)] +* acl: fixes the fallback behaviour of down_policy with setting extend-cache/async-cache when the token is not cached. [[GH-11136](https://github.com/hashicorp/consul/issues/11136)] +* connect: Fix upstream listener escape hatch for prepared queries [[GH-11109](https://github.com/hashicorp/consul/issues/11109)] +* grpc: strip local ACL tokens from RPCs during forwarding if crossing datacenters [[GH-11099](https://github.com/hashicorp/consul/issues/11099)] +* server: **(Enterprise only)** Ensure that servers leave network segments when leaving other gossip pools +* telemetry: Consul Clients no longer emit Autopilot metrics. [[GH-11241](https://github.com/hashicorp/consul/issues/11241)] +* telemetry: fixes a bug with Prometheus consul_autopilot_healthy metric where 0 is reported instead of NaN on servers. [[GH-11231](https://github.com/hashicorp/consul/issues/11231)] +* ui: **(Enterprise Only)** Fix saving intentions with namespaced source/destination [[GH-11095](https://github.com/hashicorp/consul/issues/11095)] +* ui: Don't show a CRD warning for read-only intentions [[GH-11149](https://github.com/hashicorp/consul/issues/11149)] +* ui: Ensure all types of data get reconciled with the backend data [[GH-11237](https://github.com/hashicorp/consul/issues/11237)] +* ui: Fixed styling of Role remove dialog on the Token edit page [[GH-11298](https://github.com/hashicorp/consul/issues/11298)] +* ui: Gracefully recover from non-existant DC errors [[GH-11077](https://github.com/hashicorp/consul/issues/11077)] +* ui: Ignore reported permissions for KV area meaning the KV is always enabled +for both read/write access if the HTTP API allows. [[GH-10916](https://github.com/hashicorp/consul/issues/10916)] +* ui: Topology - Fix up Default Allow and Permissive Intentions notices [[GH-11216](https://github.com/hashicorp/consul/issues/11216)] +* ui: hide create button for policies/roles/namespace if users token has no write permissions to those areas [[GH-10914](https://github.com/hashicorp/consul/issues/10914)] +* xds: ensure the active streams counters are 64 bit aligned on 32 bit systems [[GH-11085](https://github.com/hashicorp/consul/issues/11085)] +* xds: fixed a bug where Envoy sidecars could enter a state where they failed to receive xds updates from Consul [[GH-10987](https://github.com/hashicorp/consul/issues/10987)] * Fixing SOA record to return proper domain when alt domain in use. [[GH-10431]](https://github.com/hashicorp/consul/pull/10431) ## 1.11.0-alpha (September 16, 2021) @@ -51,6 +185,44 @@ manage licenses on older servers [[GH-10952](https://github.com/hashicorp/consul * use the MaxQueryTime instead of RPCHoldTimeout for blocking RPC queries [[GH-8978](https://github.com/hashicorp/consul/pull/8978)]. [[GH-10299](https://github.com/hashicorp/consul/issues/10299)] +## 1.10.4 (November 11, 2021) + +SECURITY: + +* agent: Use SHA256 instead of MD5 to generate persistence file names. [[GH-11491](https://github.com/hashicorp/consul/issues/11491)] +* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805. + +IMPROVEMENTS: + +* ci: Artifact builds will now only run on merges to the release branches or to `main` [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64 [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux] [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* connect/ca: Return an error when querying roots from uninitialized CA. [[GH-11514](https://github.com/hashicorp/consul/issues/11514)] +* telemetry: Add new metrics for the count of connect service instances and configuration entries. [[GH-11222](https://github.com/hashicorp/consul/issues/11222)] + +BUG FIXES: + +* acl: fixes the fallback behaviour of down_policy with setting extend-cache/async-cache when the token is not cached. [[GH-11136](https://github.com/hashicorp/consul/issues/11136)] +* api: fixed backwards compatibility issue with AgentService SocketPath field. [[GH-11318](https://github.com/hashicorp/consul/issues/11318)] +* connect/ca: Allow secondary initialization to resume after being deferred due to unreachable or incompatible primary DC servers. [[GH-11514](https://github.com/hashicorp/consul/issues/11514)] +* connect: fix issue with attempting to generate an invalid upstream cluster from UpstreamConfig.Defaults. [[GH-11245](https://github.com/hashicorp/consul/issues/11245)] +* raft: do not trigger an election if not part of the servers list. [[GH-11375](https://github.com/hashicorp/consul/issues/11375)] +* rpc: only attempt to authorize the DNSName in the client cert when verify_incoming_rpc=true [[GH-11255](https://github.com/hashicorp/consul/issues/11255)] +* server: **(Enterprise only)** Ensure that servers leave network segments when leaving other gossip pools +* snapshot: **(Enterprise only)** snapshot agent no longer attempts to refresh its license from the server when a local license is provided (i.e. via config or an environment variable) +* telemetry: Consul Clients no longer emit Autopilot metrics. [[GH-11241](https://github.com/hashicorp/consul/issues/11241)] +* telemetry: fixes a bug with Prometheus consul_autopilot_failure_tolerance metric where 0 is reported instead of NaN on follower servers. [[GH-11399](https://github.com/hashicorp/consul/issues/11399)] +* telemetry: fixes a bug with Prometheus consul_autopilot_healthy metric where 0 is reported instead of NaN on servers. [[GH-11231](https://github.com/hashicorp/consul/issues/11231)] +* ui: **(Enterprise only)** When no namespace is selected, make sure to default to the tokens default namespace when requesting permissions [[GH-11472](https://github.com/hashicorp/consul/issues/11472)] +* ui: Ensure we check intention permissions for specific services when deciding +whether to show action buttons for per service intention actions [[GH-11270](https://github.com/hashicorp/consul/issues/11270)] +* ui: Fixed styling of Role remove dialog on the Token edit page [[GH-11298](https://github.com/hashicorp/consul/issues/11298)] +* xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection [[GH-11522](https://github.com/hashicorp/consul/issues/11522)] + +BUG FIXES: + +* Fixing SOA record to return proper domain when alt domain in use. [[GH-10431]](https://github.com/hashicorp/consul/pull/10431) + ## 1.10.3 (September 27, 2021) FEATURES: @@ -281,6 +453,28 @@ NOTES: * legal: **(Enterprise only)** Enterprise binary downloads will now include a copy of the EULA and Terms of Evaluation in the zip archive +## 1.9.11 (November 11, 2021) + +SECURITY: + +* agent: Use SHA256 instead of MD5 to generate persistence file names. [[GH-11491](https://github.com/hashicorp/consul/issues/11491)] +* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805. + +IMPROVEMENTS: + +* ci: Artifact builds will now only run on merges to the release branches or to `main` [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64 [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux] [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* telemetry: Add new metrics for the count of connect service instances and configuration entries. [[GH-11222](https://github.com/hashicorp/consul/issues/11222)] + +BUG FIXES: + +* acl: fixes the fallback behaviour of down_policy with setting extend-cache/async-cache when the token is not cached. [[GH-11136](https://github.com/hashicorp/consul/issues/11136)] +* rpc: only attempt to authorize the DNSName in the client cert when verify_incoming_rpc=true [[GH-11255](https://github.com/hashicorp/consul/issues/11255)] +* server: **(Enterprise only)** Ensure that servers leave network segments when leaving other gossip pools +* ui: Fixed styling of Role remove dialog on the Token edit page [[GH-11298](https://github.com/hashicorp/consul/issues/11298)] +* xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection [[GH-11522](https://github.com/hashicorp/consul/issues/11522)] + ## 1.9.10 (September 27, 2021) FEATURES: @@ -639,6 +833,27 @@ BUG FIXES: * telemetry: fixed a bug that caused logs to be flooded with `[WARN] agent.router: Non-server in server-only area` [[GH-8685](https://github.com/hashicorp/consul/issues/8685)] * ui: show correct datacenter for gateways [[GH-8704](https://github.com/hashicorp/consul/issues/8704)] +## 1.8.17 (November 11, 2021) + +SECURITY: + +* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805. + +IMPROVEMENTS: + +* ci: Artifact builds will now only run on merges to the release branches or to `main` [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64 [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux] [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] + +BUG FIXES: + +* acl: fixes the fallback behaviour of down_policy with setting extend-cache/async-cache when the token is not cached. [[GH-11136](https://github.com/hashicorp/consul/issues/11136)] +* raft: Consul leaders will attempt to transfer leadership to another server as part of gracefully leaving the cluster. [[GH-11242](https://github.com/hashicorp/consul/issues/11242)] +* rpc: only attempt to authorize the DNSName in the client cert when verify_incoming_rpc=true [[GH-11255](https://github.com/hashicorp/consul/issues/11255)] +* server: **(Enterprise only)** Ensure that servers leave network segments when leaving other gossip pools +* ui: Fixed styling of Role delete confirmation button with the Token edit page [[GH-11297](https://github.com/hashicorp/consul/issues/11297)] +* xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection [[GH-11522](https://github.com/hashicorp/consul/issues/11522)] + ## 1.8.16 (September 27, 2021) FEATURES: diff --git a/Dockerfile b/Dockerfile index 71bf18811..8d5931e91 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,6 +24,7 @@ LABEL org.opencontainers.image.authors="Consul Team " \ org.opencontainers.image.title="consul" \ org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." +RUN apk add --no-cache dumb-init # Create a consul user and group first so the IDs get set the same way, even as # the rest of this may change over time. RUN addgroup $BIN_NAME && \ @@ -54,7 +55,9 @@ EXPOSE 8500 8600 8600/udp # Consul doesn't need root privileges so we run it as the consul user from the # entry point script. The entry point script also uses dumb-init as the top-level # process to reap any zombie processes created by Consul sub-processes. + COPY .release/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh +RUN chmod +x /usr/local/bin/docker-entrypoint.sh ENTRYPOINT ["docker-entrypoint.sh"] # By default you'll get an insecure single-node development server that stores diff --git a/GNUmakefile b/GNUmakefile index 90a439f1f..311a47533 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -3,12 +3,13 @@ GOGOVERSION?=$(shell grep github.com/gogo/protobuf go.mod | awk '{print $$2}') GOTOOLS = \ github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \ github.com/hashicorp/go-bindata/go-bindata@master \ - golang.org/x/tools/cmd/cover \ - golang.org/x/tools/cmd/stringer \ + golang.org/x/tools/cmd/cover@master \ + golang.org/x/tools/cmd/stringer@master \ github.com/gogo/protobuf/protoc-gen-gofast@$(GOGOVERSION) \ - github.com/hashicorp/protoc-gen-go-binary \ - github.com/vektra/mockery/cmd/mockery \ - github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 + github.com/hashicorp/protoc-gen-go-binary@master \ + github.com/vektra/mockery/cmd/mockery@master \ + github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 \ + github.com/hashicorp/lint-consul-retry@master GOTAGS ?= GOOS?=$(shell go env GOOS) @@ -283,12 +284,10 @@ static-assets: ui: ui-docker static-assets-docker tools: - @mkdir -p .gotools - @cd .gotools && for TOOL in $(GOTOOLS); do \ + @if [[ -d .gotools ]]; then rm -rf .gotools ; fi + @for TOOL in $(GOTOOLS); do \ echo "=== TOOL: $$TOOL" ; \ - rm -f go.mod go.sum ; \ - go mod init consul-tools ; \ - go get -v $$TOOL ; \ + go install -v $$TOOL ; \ done version: diff --git a/acl/acl.go b/acl/acl.go index ff605ade4..d56383d9f 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -16,10 +16,10 @@ type Config struct { type ExportFetcher interface { // ExportsForPartition returns the config entry defining exports for a partition - ExportsForPartition(partition string) PartitionExports + ExportsForPartition(partition string) ExportedServices } -type PartitionExports struct { +type ExportedServices struct { Data map[string]map[string][]string } diff --git a/acl/acl_oss.go b/acl/acl_oss.go index df54e3ed0..9c62c5bdb 100644 --- a/acl/acl_oss.go +++ b/acl/acl_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package acl diff --git a/acl/authorizer_oss.go b/acl/authorizer_oss.go index cf33ea268..6870a042d 100644 --- a/acl/authorizer_oss.go +++ b/acl/authorizer_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package acl diff --git a/acl/policy_authorizer_oss.go b/acl/policy_authorizer_oss.go index 0e1c20f07..4829048a8 100644 --- a/acl/policy_authorizer_oss.go +++ b/acl/policy_authorizer_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package acl diff --git a/acl/policy_merger_oss.go b/acl/policy_merger_oss.go index 9b7cb07f8..cff74c732 100644 --- a/acl/policy_merger_oss.go +++ b/acl/policy_merger_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package acl diff --git a/acl/policy_oss.go b/acl/policy_oss.go index 4a4fc84db..a5d6828b6 100644 --- a/acl/policy_oss.go +++ b/acl/policy_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package acl diff --git a/agent/acl.go b/agent/acl.go index fd196c84c..e08b5646a 100644 --- a/agent/acl.go +++ b/agent/acl.go @@ -7,6 +7,8 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/types" ) // aclAccessorID is used to convert an ACLToken's secretID to its accessorID for non- @@ -167,24 +169,24 @@ func (a *Agent) filterMembers(token string, members *[]serf.Member) error { return nil } -func (a *Agent) filterServicesWithAuthorizer(authz acl.Authorizer, services *map[structs.ServiceID]*structs.NodeService) error { +func (a *Agent) filterServicesWithAuthorizer(authz acl.Authorizer, services map[string]*api.AgentService) error { var authzContext acl.AuthorizerContext // Filter out services based on the service policy. - for id, service := range *services { - service.FillAuthzContext(&authzContext) + for id, service := range services { + agentServiceFillAuthzContext(service, &authzContext) if authz.ServiceRead(service.Service, &authzContext) == acl.Allow { continue } - a.logger.Debug("dropping service from result due to ACLs", "service", id.String()) - delete(*services, id) + a.logger.Debug("dropping service from result due to ACLs", "service", id) + delete(services, id) } return nil } -func (a *Agent) filterChecksWithAuthorizer(authz acl.Authorizer, checks *map[structs.CheckID]*structs.HealthCheck) error { +func (a *Agent) filterChecksWithAuthorizer(authz acl.Authorizer, checks map[types.CheckID]*structs.HealthCheck) error { var authzContext acl.AuthorizerContext // Filter out checks based on the node or service policy. - for id, check := range *checks { + for id, check := range checks { check.FillAuthzContext(&authzContext) if len(check.ServiceName) > 0 { if authz.ServiceRead(check.ServiceName, &authzContext) == acl.Allow { @@ -195,8 +197,8 @@ func (a *Agent) filterChecksWithAuthorizer(authz acl.Authorizer, checks *map[str continue } } - a.logger.Debug("dropping check from result due to ACLs", "check", id.String()) - delete(*checks, id) + a.logger.Debug("dropping check from result due to ACLs", "check", id) + delete(checks, id) } return nil } diff --git a/agent/acl_endpoint_test.go b/agent/acl_endpoint_test.go index 9c149c60e..bd8929aab 100644 --- a/agent/acl_endpoint_test.go +++ b/agent/acl_endpoint_test.go @@ -18,7 +18,6 @@ import ( "github.com/hashicorp/consul/agent/consul/authmethod/testauth" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" - "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" ) @@ -92,9 +91,14 @@ func TestACL_Bootstrap(t *testing.T) { } t.Parallel() - a := NewTestAgent(t, TestACLConfig()+` - acl_master_token = "" - `) + a := NewTestAgent(t, ` + primary_datacenter = "dc1" + + acl { + enabled = true + default_policy = "deny" + } + `) defer a.Shutdown() tests := []struct { @@ -882,7 +886,7 @@ func TestACL_HTTP(t *testing.T) { require.True(t, ok) require.Len(t, tokens, 1) token := tokens[0] - require.Equal(t, "Master Token", token.Description) + require.Equal(t, "Initial Management Token", token.Description) require.Len(t, token.Policies, 1) require.Equal(t, structs.ACLPolicyGlobalManagementID, token.Policies[0].ID) }) @@ -1658,7 +1662,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") // spin up a fake oidc server - oidcServer := startSSOTestServer(t) + oidcServer := oidcauthtest.Start(t) pubKey, privKey := oidcServer.SigningKeys() type mConfig = map[string]interface{} @@ -1690,7 +1694,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) { for name, tc := range cases { tc := tc t.Run(name, func(t *testing.T) { - method, err := upsertTestCustomizedAuthMethod(a.RPC, TestDefaultMasterToken, "dc1", func(method *structs.ACLAuthMethod) { + method, err := upsertTestCustomizedAuthMethod(a.RPC, TestDefaultInitialManagementToken, "dc1", func(method *structs.ACLAuthMethod) { method.Type = "jwt" method.Config = map[string]interface{}{ "JWTSupportedAlgs": []string{"ES256"}, @@ -1759,7 +1763,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) { testutil.RequireErrorContains(t, err, "Permission denied") }) - _, err = upsertTestCustomizedBindingRule(a.RPC, TestDefaultMasterToken, "dc1", func(rule *structs.ACLBindingRule) { + _, err = upsertTestCustomizedBindingRule(a.RPC, TestDefaultInitialManagementToken, "dc1", func(rule *structs.ACLBindingRule) { rule.AuthMethod = method.Name rule.BindType = structs.BindingRuleBindTypeService rule.BindName = "test--${value.name}--${value.primary_org}" @@ -1799,7 +1803,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) { // verify the token was deleted req, _ = http.NewRequest("GET", "/v1/acl/token/"+token.AccessorID, nil) - req.Header.Add("X-Consul-Token", TestDefaultMasterToken) + req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken) resp = httptest.NewRecorder() // make the request @@ -1820,7 +1824,7 @@ func TestACL_Authorize(t *testing.T) { a1 := NewTestAgent(t, TestACLConfigWithParams(nil)) defer a1.Shutdown() - testrpc.WaitForTestAgent(t, a1.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, a1.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) policyReq := structs.ACLPolicySetRequest{ Policy: structs.ACLPolicy{ @@ -1828,7 +1832,7 @@ func TestACL_Authorize(t *testing.T) { Rules: `acl = "read" operator = "write" service_prefix "" { policy = "read"} node_prefix "" { policy= "write" } key_prefix "/foo" { policy = "write" } `, }, Datacenter: "dc1", - WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken}, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, } var policy structs.ACLPolicy require.NoError(t, a1.RPC("ACL.PolicySet", &policyReq, &policy)) @@ -1842,15 +1846,15 @@ func TestACL_Authorize(t *testing.T) { }, }, Datacenter: "dc1", - WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken}, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, } var token structs.ACLToken require.NoError(t, a1.RPC("ACL.TokenSet", &tokenReq, &token)) // secondary also needs to setup a replication token to pull tokens and policies - secondaryParams := DefaulTestACLConfigParams() - secondaryParams.ReplicationToken = secondaryParams.MasterToken + secondaryParams := DefaultTestACLConfigParams() + secondaryParams.ReplicationToken = secondaryParams.InitialManagementToken secondaryParams.EnableTokenReplication = true a2 := NewTestAgent(t, `datacenter = "dc2" `+TestACLConfigWithParams(secondaryParams)) @@ -1860,7 +1864,7 @@ func TestACL_Authorize(t *testing.T) { _, err := a2.JoinWAN([]string{addr}) require.NoError(t, err) - testrpc.WaitForTestAgent(t, a2.RPC, "dc2", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, a2.RPC, "dc2", testrpc.WithToken(TestDefaultInitialManagementToken)) // this actually ensures a few things. First the dcs got connect okay, secondly that the policy we // are about ready to use in our local token creation exists in the secondary DC testrpc.WaitForACLReplication(t, a2.RPC, "dc2", structs.ACLReplicateTokens, policy.CreateIndex, 1, 0) @@ -1875,7 +1879,7 @@ func TestACL_Authorize(t *testing.T) { Local: true, }, Datacenter: "dc2", - WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken}, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, } var localToken structs.ACLToken @@ -2005,7 +2009,7 @@ func TestACL_Authorize(t *testing.T) { for _, dc := range []string{"dc1", "dc2"} { t.Run(dc, func(t *testing.T) { req, _ := http.NewRequest("POST", "/v1/internal/acl/authorize?dc="+dc, jsonBody(request)) - req.Header.Add("X-Consul-Token", TestDefaultMasterToken) + req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken) recorder := httptest.NewRecorder() raw, err := a1.srv.ACLAuthorize(recorder, req) require.NoError(t, err) @@ -2151,7 +2155,7 @@ func TestACL_Authorize(t *testing.T) { false, // agent:write false, // event:read false, // event:write - true, // intention:read + true, // intentions:read false, // intention:write false, // key:read false, // key:list @@ -2330,14 +2334,6 @@ func upsertTestCustomizedBindingRule(rpc rpcFn, masterToken string, datacenter s return &out, nil } -func startSSOTestServer(t *testing.T) *oidcauthtest.Server { - ports := freeport.MustTake(1) - return oidcauthtest.Start(t, oidcauthtest.WithPort( - ports[0], - func() { freeport.Return(ports) }, - )) -} - func TestHTTPHandlers_ACLReplicationStatus(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/acl_oss.go b/agent/acl_oss.go index a04603731..0a50dcda1 100644 --- a/agent/acl_oss.go +++ b/agent/acl_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent @@ -6,8 +7,13 @@ import ( "github.com/hashicorp/serf/serf" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/api" ) func serfMemberFillAuthzContext(m *serf.Member, ctx *acl.AuthorizerContext) { // no-op } + +func agentServiceFillAuthzContext(s *api.AgentService, ctx *acl.AuthorizerContext) { + // no-op +} diff --git a/agent/acl_test.go b/agent/acl_test.go index d979fd5bd..c085e49a3 100644 --- a/agent/acl_test.go +++ b/agent/acl_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/types" @@ -464,7 +465,7 @@ func TestACL_filterServicesWithAuthorizer(t *testing.T) { t.Parallel() a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy, catalogIdent) - filterServices := func(token string, services *map[structs.ServiceID]*structs.NodeService) error { + filterServices := func(token string, services map[string]*api.AgentService) error { authz, err := a.delegate.ResolveTokenAndDefaultMeta(token, nil, nil) if err != nil { return err @@ -473,21 +474,22 @@ func TestACL_filterServicesWithAuthorizer(t *testing.T) { return a.filterServicesWithAuthorizer(authz, services) } - services := make(map[structs.ServiceID]*structs.NodeService) - require.NoError(t, filterServices(nodeROSecret, &services)) + services := make(map[string]*api.AgentService) + require.NoError(t, filterServices(nodeROSecret, services)) - services[structs.NewServiceID("my-service", nil)] = &structs.NodeService{ID: "my-service", Service: "service"} - services[structs.NewServiceID("my-other", nil)] = &structs.NodeService{ID: "my-other", Service: "other"} - require.NoError(t, filterServices(serviceROSecret, &services)) - require.Contains(t, services, structs.NewServiceID("my-service", nil)) - require.NotContains(t, services, structs.NewServiceID("my-other", nil)) + services[structs.NewServiceID("my-service", nil).String()] = &api.AgentService{ID: "my-service", Service: "service"} + services[structs.NewServiceID("my-other", nil).String()] = &api.AgentService{ID: "my-other", Service: "other"} + require.NoError(t, filterServices(serviceROSecret, services)) + + require.Contains(t, services, structs.NewServiceID("my-service", nil).String()) + require.NotContains(t, services, structs.NewServiceID("my-other", nil).String()) } func TestACL_filterChecksWithAuthorizer(t *testing.T) { t.Parallel() a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy, catalogIdent) - filterChecks := func(token string, checks *map[structs.CheckID]*structs.HealthCheck) error { + filterChecks := func(token string, checks map[types.CheckID]*structs.HealthCheck) error { authz, err := a.delegate.ResolveTokenAndDefaultMeta(token, nil, nil) if err != nil { return err @@ -496,29 +498,29 @@ func TestACL_filterChecksWithAuthorizer(t *testing.T) { return a.filterChecksWithAuthorizer(authz, checks) } - checks := make(map[structs.CheckID]*structs.HealthCheck) - require.NoError(t, filterChecks(nodeROSecret, &checks)) + checks := make(map[types.CheckID]*structs.HealthCheck) + require.NoError(t, filterChecks(nodeROSecret, checks)) - checks[structs.NewCheckID("my-node", nil)] = &structs.HealthCheck{} - checks[structs.NewCheckID("my-service", nil)] = &structs.HealthCheck{ServiceName: "service"} - checks[structs.NewCheckID("my-other", nil)] = &structs.HealthCheck{ServiceName: "other"} - require.NoError(t, filterChecks(serviceROSecret, &checks)) - _, ok := checks[structs.NewCheckID("my-node", nil)] + checks["my-node"] = &structs.HealthCheck{} + checks["my-service"] = &structs.HealthCheck{ServiceName: "service"} + checks["my-other"] = &structs.HealthCheck{ServiceName: "other"} + require.NoError(t, filterChecks(serviceROSecret, checks)) + _, ok := checks["my-node"] require.False(t, ok) - _, ok = checks[structs.NewCheckID("my-service", nil)] + _, ok = checks["my-service"] require.True(t, ok) - _, ok = checks[structs.NewCheckID("my-other", nil)] + _, ok = checks["my-other"] require.False(t, ok) - checks[structs.NewCheckID("my-node", nil)] = &structs.HealthCheck{} - checks[structs.NewCheckID("my-service", nil)] = &structs.HealthCheck{ServiceName: "service"} - checks[structs.NewCheckID("my-other", nil)] = &structs.HealthCheck{ServiceName: "other"} - require.NoError(t, filterChecks(nodeROSecret, &checks)) - _, ok = checks[structs.NewCheckID("my-node", nil)] + checks["my-node"] = &structs.HealthCheck{} + checks["my-service"] = &structs.HealthCheck{ServiceName: "service"} + checks["my-other"] = &structs.HealthCheck{ServiceName: "other"} + require.NoError(t, filterChecks(nodeROSecret, checks)) + _, ok = checks["my-node"] require.True(t, ok) - _, ok = checks[structs.NewCheckID("my-service", nil)] + _, ok = checks["my-service"] require.False(t, ok) - _, ok = checks[structs.NewCheckID("my-other", nil)] + _, ok = checks["my-other"] require.False(t, ok) } diff --git a/agent/agent.go b/agent/agent.go index 9e9cf5c21..d4f0397bb 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -144,7 +144,20 @@ type delegate interface { // This is limited to segments and partitions that the node is a member of. LANMembers(f consul.LANMemberFilter) ([]serf.Member, error) - // GetLANCoordinate returns the coordinate of the node in the LAN gossip pool. + // GetLANCoordinate returns the coordinate of the node in the LAN gossip + // pool. + // + // - Clients return a single coordinate for the single gossip pool they are + // in (default, segment, or partition). + // + // - Servers return one coordinate for their canonical gossip pool (i.e. + // default partition/segment) and one per segment they are also ancillary + // members of. + // + // NOTE: servers do not emit coordinates for partitioned gossip pools they + // are ancillary members of. + // + // NOTE: This assumes coordinates are enabled, so check that before calling. GetLANCoordinate() (lib.CoordinateSet, error) // JoinLAN is used to have Consul join the inner-DC pool The target address @@ -1140,8 +1153,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co if runtimeCfg.RaftTrailingLogs != 0 { cfg.RaftConfig.TrailingLogs = uint64(runtimeCfg.RaftTrailingLogs) } - if runtimeCfg.ACLMasterToken != "" { - cfg.ACLMasterToken = runtimeCfg.ACLMasterToken + if runtimeCfg.ACLInitialManagementToken != "" { + cfg.ACLInitialManagementToken = runtimeCfg.ACLInitialManagementToken } cfg.ACLTokenReplication = runtimeCfg.ACLTokenReplication cfg.ACLsEnabled = runtimeCfg.ACLsEnabled @@ -1250,6 +1263,7 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co } cfg.ConfigEntryBootstrap = runtimeCfg.ConfigEntryBootstrap + cfg.RaftBoltDBConfig = runtimeCfg.RaftBoltDBConfig // Duplicate our own serf config once to make sure that the duplication // function does not drift. @@ -1264,6 +1278,7 @@ func segmentConfig(config *config.RuntimeConfig) ([]consul.NetworkSegment, error var segments []consul.NetworkSegment for _, s := range config.Segments { + // TODO: use consul.CloneSerfLANConfig(config.SerfLANConfig) here? serfConf := consul.DefaultConfig().SerfLANConfig serfConf.MemberlistConfig.BindAddr = s.Bind.IP.String() @@ -1539,13 +1554,10 @@ func (a *Agent) RefreshPrimaryGatewayFallbackAddresses(addrs []string) error { } // ForceLeave is used to remove a failed node from the cluster -func (a *Agent) ForceLeave(node string, prune bool, entMeta *structs.EnterpriseMeta) (err error) { +func (a *Agent) ForceLeave(node string, prune bool, entMeta *structs.EnterpriseMeta) error { a.logger.Info("Force leaving node", "node", node) - // TODO(partitions): merge IsMember into the RemoveFailedNode call. - if ok := a.IsMember(node); !ok { - return fmt.Errorf("agent: No node found with name '%s'", node) - } - err = a.delegate.RemoveFailedNode(node, prune, entMeta) + + err := a.delegate.RemoveFailedNode(node, prune, entMeta) if err != nil { a.logger.Warn("Failed to remove node", "node", node, @@ -1555,6 +1567,25 @@ func (a *Agent) ForceLeave(node string, prune bool, entMeta *structs.EnterpriseM return err } +// ForceLeaveWAN is used to remove a failed node from the WAN cluster +func (a *Agent) ForceLeaveWAN(node string, prune bool, entMeta *structs.EnterpriseMeta) error { + a.logger.Info("(WAN) Force leaving node", "node", node) + + srv, ok := a.delegate.(*consul.Server) + if !ok { + return fmt.Errorf("Must be a server to force-leave a node from the WAN cluster") + } + + err := srv.RemoveFailedNodeWAN(node, prune, entMeta) + if err != nil { + a.logger.Warn("(WAN) Failed to remove node", + "node", node, + "error", err, + ) + } + return err +} + // AgentLocalMember is used to retrieve the LAN member for the local node. func (a *Agent) AgentLocalMember() serf.Member { return a.delegate.AgentLocalMember() @@ -1585,18 +1616,6 @@ func (a *Agent) WANMembers() []serf.Member { return nil } -// IsMember is used to check if a node with the given nodeName -// is a member -func (a *Agent) IsMember(nodeName string) bool { - for _, m := range a.LANMembersInAgentPartition() { - if m.Name == nodeName { - return true - } - } - - return false -} - // StartSync is called once Services and Checks are registered. // This is called to prevent a race between clients and the anti-entropy routines func (a *Agent) StartSync() { @@ -1922,7 +1941,7 @@ func (a *Agent) readPersistedServiceConfigs() (map[structs.ServiceID]*structs.Se file := filepath.Join(configDir, fi.Name()) buf, err := ioutil.ReadFile(file) if err != nil { - return nil, fmt.Errorf("failed reading service config file %q: %s", file, err) + return nil, fmt.Errorf("failed reading service config file %q: %w", file, err) } // Try decoding the service config definition @@ -1941,10 +1960,28 @@ func (a *Agent) readPersistedServiceConfigs() (map[structs.ServiceID]*structs.Se newPath := a.makeServiceConfigFilePath(serviceID) if file != newPath { if err := os.Rename(file, newPath); err != nil { - a.logger.Error("Failed renaming service config file from %s to %s", file, newPath, err) + a.logger.Error("Failed renaming service config file", + "file", file, + "targetFile", newPath, + "error", err, + ) } } + if !structs.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.PartitionOrDefault()) { + a.logger.Info("Purging service config file in wrong partition", + "file", file, + "partition", p.PartitionOrDefault(), + ) + if err := os.Remove(file); err != nil { + a.logger.Error("Failed purging service config file", + "file", file, + "error", err, + ) + } + continue + } + out[serviceID] = p.Defaults } @@ -3045,14 +3082,18 @@ func (a *Agent) loadCheckState(check *structs.HealthCheck) error { if os.IsNotExist(err) { return nil } else { - return fmt.Errorf("failed reading file %q: %s", file, err) + return fmt.Errorf("failed reading check state %q: %w", file, err) } } if err := os.Rename(oldFile, file); err != nil { - a.logger.Error("Failed renaming service file from %s to %s", oldFile, file, err) + a.logger.Error("Failed renaming check state", + "file", oldFile, + "targetFile", file, + "error", err, + ) } } else { - return fmt.Errorf("failed reading file %q: %s", file, err) + return fmt.Errorf("failed reading file %q: %w", file, err) } } @@ -3242,7 +3283,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI if os.IsNotExist(err) { return nil } - return fmt.Errorf("Failed reading services dir %q: %s", svcDir, err) + return fmt.Errorf("Failed reading services dir %q: %w", svcDir, err) } for _, fi := range files { // Skip all dirs @@ -3260,7 +3301,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI file := filepath.Join(svcDir, fi.Name()) buf, err := ioutil.ReadFile(file) if err != nil { - return fmt.Errorf("failed reading service file %q: %s", file, err) + return fmt.Errorf("failed reading service file %q: %w", file, err) } // Try decoding the service definition @@ -3280,10 +3321,28 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI newPath := a.makeServiceFilePath(p.Service.CompoundServiceID()) if file != newPath { if err := os.Rename(file, newPath); err != nil { - a.logger.Error("Failed renaming service file from %s to %s", file, newPath, err) + a.logger.Error("Failed renaming service file", + "file", file, + "targetFile", newPath, + "error", err, + ) } } + if !structs.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.Service.PartitionOrDefault()) { + a.logger.Info("Purging service file in wrong partition", + "file", file, + "partition", p.Service.EnterpriseMeta.PartitionOrDefault(), + ) + if err := os.Remove(file); err != nil { + a.logger.Error("Failed purging service file", + "file", file, + "error", err, + ) + } + continue + } + // Restore LocallyRegisteredAsSidecar, see persistedService.LocallyRegisteredAsSidecar p.Service.LocallyRegisteredAsSidecar = p.LocallyRegisteredAsSidecar @@ -3296,10 +3355,10 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI "source", p.Source, ) if err := a.purgeService(serviceID); err != nil { - return fmt.Errorf("failed purging service %q: %s", serviceID, err) + return fmt.Errorf("failed purging service %q: %w", serviceID, err) } if err := a.purgeServiceConfig(serviceID); err != nil { - return fmt.Errorf("failed purging service config %q: %s", serviceID, err) + return fmt.Errorf("failed purging service config %q: %w", serviceID, err) } continue } @@ -3312,10 +3371,10 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI "file", file, ) if err := a.purgeService(serviceID); err != nil { - return fmt.Errorf("failed purging service %q: %s", serviceID.String(), err) + return fmt.Errorf("failed purging service %q: %w", serviceID.String(), err) } if err := a.purgeServiceConfig(serviceID); err != nil { - return fmt.Errorf("failed purging service config %q: %s", serviceID.String(), err) + return fmt.Errorf("failed purging service config %q: %w", serviceID.String(), err) } } else { a.logger.Debug("restored service definition from file", @@ -3336,7 +3395,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI checkStateSnapshot: snap, }) if err != nil { - return fmt.Errorf("failed adding service %q: %s", serviceID, err) + return fmt.Errorf("failed adding service %q: %w", serviceID, err) } } } @@ -3345,7 +3404,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI if a.State.Service(serviceID) == nil { // This can be cleaned up now. if err := a.purgeServiceConfig(serviceID); err != nil { - return fmt.Errorf("failed purging service config %q: %s", serviceID, err) + return fmt.Errorf("failed purging service config %q: %w", serviceID, err) } } } @@ -3388,7 +3447,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID] if os.IsNotExist(err) { return nil } - return fmt.Errorf("Failed reading checks dir %q: %s", checkDir, err) + return fmt.Errorf("Failed reading checks dir %q: %w", checkDir, err) } for _, fi := range files { // Ignore dirs - we only care about the check definition files @@ -3400,7 +3459,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID] file := filepath.Join(checkDir, fi.Name()) buf, err := ioutil.ReadFile(file) if err != nil { - return fmt.Errorf("failed reading check file %q: %s", file, err) + return fmt.Errorf("failed reading check file %q: %w", file, err) } // Decode the check @@ -3418,10 +3477,25 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID] newPath := filepath.Join(a.config.DataDir, checksDir, checkID.StringHashSHA256()) if file != newPath { if err := os.Rename(file, newPath); err != nil { - a.logger.Error("Failed renaming service file from %s to %s", file, newPath, err) + a.logger.Error("Failed renaming check file", + "file", file, + "targetFile", newPath, + "error", err, + ) } } + if !structs.EqualPartitions(a.AgentEnterpriseMeta().PartitionOrDefault(), p.Check.PartitionOrDefault()) { + a.logger.Info("Purging check file in wrong partition", + "file", file, + "partition", p.Check.PartitionOrDefault(), + ) + if err := os.Remove(file); err != nil { + return fmt.Errorf("failed purging check %q: %w", checkID, err) + } + continue + } + source, ok := ConfigSourceFromName(p.Source) if !ok { a.logger.Warn("check exists with invalid source, purging", @@ -3429,7 +3503,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID] "source", p.Source, ) if err := a.purgeCheck(checkID); err != nil { - return fmt.Errorf("failed purging check %q: %s", checkID, err) + return fmt.Errorf("failed purging check %q: %w", checkID, err) } continue } @@ -3442,7 +3516,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID] "file", file, ) if err := a.purgeCheck(checkID); err != nil { - return fmt.Errorf("Failed purging check %q: %s", checkID, err) + return fmt.Errorf("Failed purging check %q: %w", checkID, err) } } else { // Default check to critical to avoid placing potentially unhealthy @@ -3462,7 +3536,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID] "error", err, ) if err := a.purgeCheck(checkID); err != nil { - return fmt.Errorf("Failed purging check %q: %s", checkID, err) + return fmt.Errorf("Failed purging check %q: %w", checkID, err) } } a.logger.Debug("restored health check from file", diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index bb45fc541..c7ec72661 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -327,9 +327,6 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request // NOTE: we're explicitly fetching things in the requested partition and // namespace here. services := s.agent.State.Services(&entMeta) - if err := s.agent.filterServicesWithAuthorizer(authz, &services); err != nil { - return nil, err - } // Convert into api.AgentService since that includes Connect config but so far // NodeService doesn't need to internally. They are otherwise identical since @@ -337,11 +334,8 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request // anyway. agentSvcs := make(map[string]*api.AgentService) - dc := s.agent.config.Datacenter - - // Use empty list instead of nil - for id, s := range services { - agentService := buildAgentService(s, dc) + for id, svc := range services { + agentService := buildAgentService(svc, s.agent.config.Datacenter) agentSvcs[id.ID] = &agentService } @@ -350,7 +344,34 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request return nil, err } - return filter.Execute(agentSvcs) + raw, err := filter.Execute(agentSvcs) + if err != nil { + return nil, err + } + agentSvcs = raw.(map[string]*api.AgentService) + + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure total (and the filter-by-acls header we set below) + // do not include results that would be filtered out even if the user did have + // permission. + total := len(agentSvcs) + if err := s.agent.filterServicesWithAuthorizer(authz, agentSvcs); err != nil { + return nil, err + } + + // Set the X-Consul-Results-Filtered-By-ACLs header, but only if the user is + // authenticated (to prevent information leaking). + // + // This is done automatically for HTTP endpoints that proxy to an RPC endpoint + // that sets QueryMeta.ResultsFilteredByACLs, but must be done manually for + // agent-local endpoints. + // + // For more information see the comment on: Server.maskResultsFilteredByACLs. + if token != "" { + setResultsFilteredByACLs(resp, total != len(agentSvcs)) + } + + return agentSvcs, nil } // GET /v1/agent/service/:service_id @@ -473,13 +494,8 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request) // NOTE(partitions): this works because nodes exist in ONE partition checks := s.agent.State.Checks(&entMeta) - if err := s.agent.filterChecksWithAuthorizer(authz, &checks); err != nil { - return nil, err - } agentChecks := make(map[types.CheckID]*structs.HealthCheck) - - // Use empty list instead of nil for id, c := range checks { if c.ServiceTags == nil { clone := *c @@ -490,7 +506,34 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request) } } - return filter.Execute(agentChecks) + raw, err := filter.Execute(agentChecks) + if err != nil { + return nil, err + } + agentChecks = raw.(map[types.CheckID]*structs.HealthCheck) + + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure total (and the filter-by-acls header we set below) + // do not include results that would be filtered out even if the user did have + // permission. + total := len(agentChecks) + if err := s.agent.filterChecksWithAuthorizer(authz, agentChecks); err != nil { + return nil, err + } + + // Set the X-Consul-Results-Filtered-By-ACLs header, but only if the user is + // authenticated (to prevent information leaking). + // + // This is done automatically for HTTP endpoints that proxy to an RPC endpoint + // that sets QueryMeta.ResultsFilteredByACLs, but must be done manually for + // agent-local endpoints. + // + // For more information see the comment on: Server.maskResultsFilteredByACLs. + if token != "" { + setResultsFilteredByACLs(resp, total != len(agentChecks)) + } + + return agentChecks, nil } func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request) (interface{}, error) { @@ -547,9 +590,24 @@ func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request) return nil, err } } + + total := len(members) if err := s.agent.filterMembers(token, &members); err != nil { return nil, err } + + // Set the X-Consul-Results-Filtered-By-ACLs header, but only if the user is + // authenticated (to prevent information leaking). + // + // This is done automatically for HTTP endpoints that proxy to an RPC endpoint + // that sets QueryMeta.ResultsFilteredByACLs, but must be done manually for + // agent-local endpoints. + // + // For more information see the comment on: Server.maskResultsFilteredByACLs. + if token != "" { + setResultsFilteredByACLs(resp, total != len(members)) + } + return members, nil } @@ -640,8 +698,15 @@ func (s *HTTPHandlers) AgentForceLeave(resp http.ResponseWriter, req *http.Reque // Check the value of the prune query _, prune := req.URL.Query()["prune"] + // Check if the WAN is being queried + _, wan := req.URL.Query()["wan"] + addr := strings.TrimPrefix(req.URL.Path, "/v1/agent/force-leave/") - return nil, s.agent.ForceLeave(addr, prune, entMeta) + if wan { + return nil, s.agent.ForceLeaveWAN(addr, prune, entMeta) + } else { + return nil, s.agent.ForceLeave(addr, prune, entMeta) + } } // syncChanges is a helper function which wraps a blocking call to sync @@ -664,22 +729,16 @@ func (s *HTTPHandlers) AgentRegisterCheck(resp http.ResponseWriter, req *http.Re } if err := decodeBody(req.Body, &args); err != nil { - resp.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(resp, "Request decode failed: %v", err) - return nil, nil + return nil, BadRequestError{fmt.Sprintf("Request decode failed: %v", err)} } // Verify the check has a name. if args.Name == "" { - resp.WriteHeader(http.StatusBadRequest) - fmt.Fprint(resp, "Missing check name") - return nil, nil + return nil, BadRequestError{"Missing check name"} } if args.Status != "" && !structs.ValidStatus(args.Status) { - resp.WriteHeader(http.StatusBadRequest) - fmt.Fprint(resp, "Bad check status") - return nil, nil + return nil, BadRequestError{"Bad check status"} } authz, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &args.EnterpriseMeta, nil) @@ -698,19 +757,20 @@ func (s *HTTPHandlers) AgentRegisterCheck(resp http.ResponseWriter, req *http.Re chkType := args.CheckType() err = chkType.Validate() if err != nil { - resp.WriteHeader(http.StatusBadRequest) - fmt.Fprint(resp, fmt.Errorf("Invalid check: %v", err)) - return nil, nil + return nil, BadRequestError{fmt.Sprintf("Invalid check: %v", err)} } // Store the type of check based on the definition health.Type = chkType.Type() if health.ServiceID != "" { + cid := health.CompoundServiceID() // fixup the service name so that vetCheckRegister requires the right ACLs - service := s.agent.State.Service(health.CompoundServiceID()) + service := s.agent.State.Service(cid) if service != nil { health.ServiceName = service.Service + } else { + return nil, NotFoundError{fmt.Sprintf("ServiceID %q does not exist", cid.String())} } } @@ -746,14 +806,14 @@ func (s *HTTPHandlers) AgentDeregisterCheck(resp http.ResponseWriter, req *http. checkID.Normalize() - if err := s.agent.vetCheckUpdateWithAuthorizer(authz, checkID); err != nil { - return nil, err - } - if !s.validateRequestPartition(resp, &checkID.EnterpriseMeta) { return nil, nil } + if err := s.agent.vetCheckUpdateWithAuthorizer(authz, checkID); err != nil { + return nil, err + } + if err := s.agent.RemoveCheck(checkID, true); err != nil { return nil, err } @@ -945,7 +1005,7 @@ func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *htt } notFoundReason := fmt.Sprintf("ServiceId %s not found", sid.String()) if returnTextPlain(req) { - return notFoundReason, CodeWithPayloadError{StatusCode: http.StatusNotFound, Reason: notFoundReason, ContentType: "application/json"} + return notFoundReason, CodeWithPayloadError{StatusCode: http.StatusNotFound, Reason: notFoundReason, ContentType: "text/plain"} } return &api.AgentServiceChecksInfo{ AggregatedStatus: api.HealthCritical, @@ -1205,14 +1265,14 @@ func (s *HTTPHandlers) AgentDeregisterService(resp http.ResponseWriter, req *htt sid.Normalize() - if err := s.agent.vetServiceUpdateWithAuthorizer(authz, sid); err != nil { - return nil, err - } - if !s.validateRequestPartition(resp, &sid.EnterpriseMeta) { return nil, nil } + if err := s.agent.vetServiceUpdateWithAuthorizer(authz, sid); err != nil { + return nil, err + } + if err := s.agent.RemoveService(sid); err != nil { return nil, err } @@ -1449,8 +1509,8 @@ func (s *HTTPHandlers) AgentToken(resp http.ResponseWriter, req *http.Request) ( triggerAntiEntropySync = true } - case "acl_agent_master_token", "agent_master": - s.agent.tokens.UpdateAgentMasterToken(args.Token, token_store.TokenSourceAPI) + case "acl_agent_master_token", "agent_master", "agent_recovery": + s.agent.tokens.UpdateAgentRecoveryToken(args.Token, token_store.TokenSourceAPI) case "acl_replication_token", "replication": s.agent.tokens.UpdateReplicationToken(args.Token, token_store.TokenSourceAPI) @@ -1499,7 +1559,9 @@ func (s *HTTPHandlers) AgentConnectCARoots(resp http.ResponseWriter, req *http.R } // AgentConnectCALeafCert returns the certificate bundle for a service -// instance. This supports blocking queries to update the returned bundle. +// instance. This endpoint ignores all "Cache-Control" attributes. +// This supports blocking queries to update the returned bundle. +// Non-blocking queries will always verify that the cache entry is still valid. func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Get the service name. Note that this is the name of the service, // not the ID of the service instance. @@ -1523,6 +1585,14 @@ func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *htt args.MaxQueryTime = qOpts.MaxQueryTime args.Token = qOpts.Token + // TODO(ffmmmm): maybe set MustRevalidate in ConnectCALeafRequest (as part of CacheInfo()) + // We don't want non-blocking queries to return expired leaf certs + // or leaf certs not valid under the current CA. So always revalidate + // the leaf cert on non-blocking queries (ie when MinQueryIndex == 0) + if args.MinQueryIndex == 0 { + args.MustRevalidate = true + } + if !s.validateRequestPartition(resp, &args.EnterpriseMeta) { return nil, nil } diff --git a/agent/agent_endpoint_oss.go b/agent/agent_endpoint_oss.go index 1c4dd4428..2c6585a4b 100644 --- a/agent/agent_endpoint_oss.go +++ b/agent/agent_endpoint_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 7e8f3d7ff..3d4a80beb 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -53,8 +53,8 @@ func createACLTokenWithAgentReadPolicy(t *testing.T, srv *HTTPHandlers) string { req, _ := http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(policyReq)) resp := httptest.NewRecorder() - _, err := srv.ACLPolicyCreate(resp, req) - require.NoError(t, err) + srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) tokenReq := &structs.ACLToken{ Description: "agent-read-token-for-test", @@ -63,10 +63,12 @@ func createACLTokenWithAgentReadPolicy(t *testing.T, srv *HTTPHandlers) string { req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq)) resp = httptest.NewRecorder() - tokInf, err := srv.ACLTokenCreate(resp, req) + srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + svcToken := &structs.ACLToken{} + dec := json.NewDecoder(resp.Body) + err := dec.Decode(svcToken) require.NoError(t, err) - svcToken, ok := tokInf.(*structs.ACLToken) - require.True(t, ok) return svcToken.SecretID } @@ -283,13 +285,21 @@ func TestAgent_Services_MeshGateway(t *testing.T) { a.State.AddService(srv1, "") req, _ := http.NewRequest("GET", "/v1/agent/services", nil) - obj, err := a.srv.AgentServices(nil, req) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) require.NoError(t, err) - val := obj.(map[string]*api.AgentService) + require.Len(t, val, 1) actual := val["mg-dc1-01"] require.NotNil(t, actual) require.Equal(t, api.ServiceKindMeshGateway, actual.Kind) + // Proxy.ToAPI() creates an empty Upstream list instead of keeping nil so do the same with actual. + if actual.Proxy.Upstreams == nil { + actual.Proxy.Upstreams = make([]api.Upstream, 0) + } require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy) } @@ -319,13 +329,21 @@ func TestAgent_Services_TerminatingGateway(t *testing.T) { require.NoError(t, a.State.AddService(srv1, "")) req, _ := http.NewRequest("GET", "/v1/agent/services", nil) - obj, err := a.srv.AgentServices(nil, req) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) require.NoError(t, err) - val := obj.(map[string]*api.AgentService) + require.Len(t, val, 1) actual := val["tg-dc1-01"] require.NotNil(t, actual) require.Equal(t, api.ServiceKindTerminatingGateway, actual.Kind) + // Proxy.ToAPI() creates an empty Upstream list instead of keeping nil so do the same with actual. + if actual.Proxy.Upstreams == nil { + actual.Proxy.Upstreams = make([]api.Upstream, 0) + } require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy) } @@ -339,36 +357,75 @@ func TestAgent_Services_ACLFilter(t *testing.T) { defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") - srv1 := &structs.NodeService{ - ID: "mysql", - Service: "mysql", - Tags: []string{"master"}, - Port: 5000, + + services := []*structs.NodeService{ + { + ID: "web", + Service: "web", + Port: 5000, + }, + { + ID: "api", + Service: "api", + Port: 6000, + }, + } + for _, s := range services { + a.State.AddService(s, "") } - a.State.AddService(srv1, "") t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/services", nil) - obj, err := a.srv.AgentServices(nil, req) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) if err != nil { t.Fatalf("Err: %v", err) } - val := obj.(map[string]*api.AgentService) + if len(val) != 0 { - t.Fatalf("bad: %v", obj) + t.Fatalf("bad: %v", val) } + require.Len(t, val, 0) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) + + t.Run("limited token", func(t *testing.T) { + require := require.New(t) + + token := testCreateToken(t, a, ` + service "web" { + policy = "read" + } + `) + + req := httptest.NewRequest("GET", fmt.Sprintf("/v1/agent/services?token=%s", token), nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(val, 1) + require.NotEmpty(resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/services?token=root", nil) - obj, err := a.srv.AgentServices(nil, req) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) if err != nil { t.Fatalf("Err: %v", err) } - val := obj.(map[string]*api.AgentService) - if len(val) != 1 { - t.Fatalf("bad: %v", obj) - } + require.Len(t, val, 2) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) } @@ -519,8 +576,7 @@ func TestAgent_Service(t *testing.T) { // don't alter it and affect later test cases. req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(updatedProxy)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String()) }, wantWait: 100 * time.Millisecond, @@ -553,8 +609,7 @@ func TestAgent_Service(t *testing.T) { // Re-register with _same_ proxy config req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(sidecarProxy)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String()) }, wantWait: 200 * time.Millisecond, @@ -646,8 +701,7 @@ func TestAgent_Service(t *testing.T) { { req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(sidecarProxy)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code, "body: %s", resp.Body.String()) } @@ -665,14 +719,11 @@ func TestAgent_Service(t *testing.T) { go tt.updateFunc() } start := time.Now() - obj, err := a.srv.AgentService(resp, req) + a.srv.h.ServeHTTP(resp, req) elapsed := time.Since(start) if tt.wantErr != "" { - require.Error(err) - require.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.wantErr)) - } else { - require.NoError(err) + require.Contains(strings.ToLower(resp.Body.String()), strings.ToLower(tt.wantErr)) } if tt.wantCode != 0 { require.Equal(tt.wantCode, resp.Code, "body: %s", resp.Body.String()) @@ -686,12 +737,13 @@ func TestAgent_Service(t *testing.T) { } if tt.wantResp != nil { - assert.Equal(tt.wantResp, obj) + dec := json.NewDecoder(resp.Body) + val := &api.AgentService{} + err := dec.Decode(&val) + require.NoError(err) + + assert.Equal(tt.wantResp, val) assert.Equal(tt.wantResp.ContentHash, resp.Header().Get("X-Consul-ContentHash")) - } else { - // Janky but Equal doesn't help here because nil != - // *api.AgentService((*api.AgentService)(nil)) - assert.Nil(obj) } }) } @@ -718,25 +770,29 @@ func TestAgent_Checks(t *testing.T) { a.State.AddCheck(chk1, "") req, _ := http.NewRequest("GET", "/v1/agent/checks", nil) - obj, err := a.srv.AgentChecks(nil, req) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[types.CheckID]*structs.HealthCheck + err := dec.Decode(&val) if err != nil { t.Fatalf("Err: %v", err) } - val := obj.(map[types.CheckID]*structs.HealthCheck) + if len(val) != 1 { - t.Fatalf("bad checks: %v", obj) + t.Fatalf("bad checks: %v", val) } if val["mysql"].Status != api.HealthPassing { - t.Fatalf("bad check: %v", obj) + t.Fatalf("bad check: %v", val) } if val["mysql"].Node != chk1.Node { - t.Fatalf("bad check: %v", obj) + t.Fatalf("bad check: %v", val) } if val["mysql"].Interval != chk1.Interval { - t.Fatalf("bad check: %v", obj) + t.Fatalf("bad check: %v", val) } if val["mysql"].Timeout != chk1.Timeout { - t.Fatalf("bad check: %v", obj) + t.Fatalf("bad check: %v", val) } } @@ -767,9 +823,13 @@ func TestAgent_ChecksWithFilter(t *testing.T) { a.State.AddCheck(chk2, "") req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape("Name == `redis`"), nil) - obj, err := a.srv.AgentChecks(nil, req) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[types.CheckID]*structs.HealthCheck + err := dec.Decode(&val) require.NoError(t, err) - val := obj.(map[types.CheckID]*structs.HealthCheck) + require.Len(t, val, 1) _, ok := val["redis"] require.True(t, ok) @@ -789,21 +849,29 @@ func TestAgent_HealthServiceByID(t *testing.T) { ID: "mysql", Service: "mysql", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + + serviceReq := AddServiceRequest{ + Service: service, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, + } + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "mysql2", Service: "mysql2", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "mysql3", Service: "mysql3", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } @@ -885,41 +953,28 @@ func TestAgent_HealthServiceByID(t *testing.T) { t.Helper() req, _ := http.NewRequest("GET", url+"?format=text", nil) resp := httptest.NewRecorder() - data, err := a.srv.AgentHealthServiceByID(resp, req) - codeWithPayload, ok := err.(CodeWithPayloadError) - if !ok { - t.Fatalf("Err: %v", err) - } - if got, want := codeWithPayload.StatusCode, expectedCode; got != want { - t.Fatalf("returned bad status: expected %d, but had: %d in %#v", expectedCode, codeWithPayload.StatusCode, codeWithPayload) - } - body, ok := data.(string) - if !ok { - t.Fatalf("Cannot get result as string in := %#v", data) + a.srv.h.ServeHTTP(resp, req) + body := resp.Body.String() + if got, want := resp.Code, expectedCode; got != want { + t.Fatalf("returned bad status: expected %d, but had: %d", expectedCode, resp.Code) } if got, want := body, expected; got != want { t.Fatalf("got body %q want %q", got, want) } - if got, want := codeWithPayload.Reason, expected; got != want { - t.Fatalf("got body %q want %q", got, want) - } }) t.Run("format=json", func(t *testing.T) { req, _ := http.NewRequest("GET", url, nil) resp := httptest.NewRecorder() - dataRaw, err := a.srv.AgentHealthServiceByID(resp, req) - codeWithPayload, ok := err.(CodeWithPayloadError) - if !ok { - t.Fatalf("Err: %v", err) + a.srv.h.ServeHTTP(resp, req) + if got, want := resp.Code, expectedCode; got != want { + t.Fatalf("returned bad status: expected %d, but had: %d", expectedCode, resp.Code) } - if got, want := codeWithPayload.StatusCode, expectedCode; got != want { - t.Fatalf("returned bad status: expected %d, but had: %d in %#v", expectedCode, codeWithPayload.StatusCode, codeWithPayload) + dec := json.NewDecoder(resp.Body) + data := &api.AgentServiceChecksInfo{} + if err := dec.Decode(data); err != nil { + t.Fatalf("Cannot convert result from JSON: %v", err) } - data, ok := dataRaw.(*api.AgentServiceChecksInfo) - if !ok { - t.Fatalf("Cannot connvert result to JSON: %#v", dataRaw) - } - if codeWithPayload.StatusCode != http.StatusNotFound { + if resp.Code != http.StatusNotFound { if data != nil && data.AggregatedStatus != expected { t.Fatalf("got body %v want %v", data, expected) } @@ -987,42 +1042,49 @@ func TestAgent_HealthServiceByName(t *testing.T) { ID: "mysql1", Service: "mysql-pool-r", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + serviceReq := AddServiceRequest{ + Service: service, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, + } + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "mysql2", Service: "mysql-pool-r", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "mysql3", Service: "mysql-pool-rw", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "mysql4", Service: "mysql-pool-rw", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "httpd1", Service: "httpd", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "httpd2", Service: "httpd", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } @@ -1136,18 +1198,11 @@ func TestAgent_HealthServiceByName(t *testing.T) { t.Helper() req, _ := http.NewRequest("GET", url+"?format=text", nil) resp := httptest.NewRecorder() - data, err := a.srv.AgentHealthServiceByName(resp, req) - codeWithPayload, ok := err.(CodeWithPayloadError) - if !ok { - t.Fatalf("Err: %v", err) - } - if got, want := codeWithPayload.StatusCode, expectedCode; got != want { + a.srv.h.ServeHTTP(resp, req) + if got, want := resp.Code, expectedCode; got != want { t.Fatalf("returned bad status: %d. Body: %q", resp.Code, resp.Body.String()) } - if got, want := codeWithPayload.Reason, expected; got != want { - t.Fatalf("got reason %q want %q", got, want) - } - if got, want := data, expected; got != want { + if got, want := resp.Body.String(), expected; got != want { t.Fatalf("got body %q want %q", got, want) } }) @@ -1155,21 +1210,26 @@ func TestAgent_HealthServiceByName(t *testing.T) { t.Helper() req, _ := http.NewRequest("GET", url, nil) resp := httptest.NewRecorder() - dataRaw, err := a.srv.AgentHealthServiceByName(resp, req) - codeWithPayload, ok := err.(CodeWithPayloadError) - if !ok { - t.Fatalf("Err: %v", err) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + data := make([]*api.AgentServiceChecksInfo, 0) + if err := dec.Decode(&data); err != nil { + t.Fatalf("Cannot convert result from JSON: %v", err) } - data, ok := dataRaw.([]api.AgentServiceChecksInfo) - if !ok { - t.Fatalf("Cannot connvert result to JSON") - } - if got, want := codeWithPayload.StatusCode, expectedCode; got != want { + if got, want := resp.Code, expectedCode; got != want { t.Fatalf("returned bad code: %d. Body: %#v", resp.Code, data) } if resp.Code != http.StatusNotFound { - if codeWithPayload.Reason != expected { - t.Fatalf("got wrong status %#v want %#v", codeWithPayload, expected) + matched := false + for _, d := range data { + if d.AggregatedStatus == expected { + matched = true + break + } + } + + if !matched { + t.Fatalf("got wrong status, wanted %#v", expected) } } }) @@ -1234,47 +1294,54 @@ func TestAgent_HealthServicesACLEnforcement(t *testing.T) { ID: "mysql1", Service: "mysql", } - require.NoError(t, a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal)) + serviceReq := AddServiceRequest{ + Service: service, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, + } + require.NoError(t, a.AddService(serviceReq)) - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "foo1", Service: "foo", } - require.NoError(t, a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal)) + require.NoError(t, a.AddService(serviceReq)) // no token t.Run("no-token-health-by-id", func(t *testing.T) { req, err := http.NewRequest("GET", "/v1/agent/health/service/id/mysql1", nil) require.NoError(t, err) resp := httptest.NewRecorder() - _, err = a.srv.AgentHealthServiceByID(resp, req) - require.Equal(t, acl.ErrPermissionDenied, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("no-token-health-by-name", func(t *testing.T) { req, err := http.NewRequest("GET", "/v1/agent/health/service/name/mysql", nil) require.NoError(t, err) resp := httptest.NewRecorder() - _, err = a.srv.AgentHealthServiceByName(resp, req) - require.Equal(t, acl.ErrPermissionDenied, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root-token-health-by-id", func(t *testing.T) { req, err := http.NewRequest("GET", "/v1/agent/health/service/id/foo1", nil) require.NoError(t, err) - req.Header.Add("X-Consul-Token", TestDefaultMasterToken) + req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken) resp := httptest.NewRecorder() - _, err = a.srv.AgentHealthServiceByID(resp, req) - require.NotEqual(t, acl.ErrPermissionDenied, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) t.Run("root-token-health-by-name", func(t *testing.T) { req, err := http.NewRequest("GET", "/v1/agent/health/service/name/foo", nil) require.NoError(t, err) - req.Header.Add("X-Consul-Token", TestDefaultMasterToken) + req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken) resp := httptest.NewRecorder() - _, err = a.srv.AgentHealthServiceByName(resp, req) - require.NotEqual(t, acl.ErrPermissionDenied, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -1288,36 +1355,77 @@ func TestAgent_Checks_ACLFilter(t *testing.T) { defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") - chk1 := &structs.HealthCheck{ - Node: a.Config.NodeName, - CheckID: "mysql", - Name: "mysql", - Status: api.HealthPassing, + + checks := structs.HealthChecks{ + { + Node: a.Config.NodeName, + CheckID: "web", + ServiceName: "web", + Status: api.HealthPassing, + }, + { + Node: a.Config.NodeName, + CheckID: "api", + ServiceName: "api", + Status: api.HealthPassing, + }, + } + for _, c := range checks { + a.State.AddCheck(c, "") } - a.State.AddCheck(chk1, "") t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/checks", nil) - obj, err := a.srv.AgentChecks(nil, req) - if err != nil { + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := make(map[types.CheckID]*structs.HealthCheck) + if err := dec.Decode(&val); err != nil { t.Fatalf("Err: %v", err) } - val := obj.(map[types.CheckID]*structs.HealthCheck) - if len(val) != 0 { - t.Fatalf("bad checks: %v", obj) + + require.Len(t, val, 0) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) + + t.Run("limited token", func(t *testing.T) { + require := require.New(t) + + token := testCreateToken(t, a, fmt.Sprintf(` + service "web" { + policy = "read" + } + node "%s" { + policy = "read" + } + `, a.Config.NodeName)) + + req := httptest.NewRequest("GET", fmt.Sprintf("/v1/agent/checks?token=%s", token), nil) + resp := httptest.NewRecorder() + + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[types.CheckID]*structs.HealthCheck + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) } + require.Len(val, 1) + require.NotEmpty(resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/checks?token=root", nil) - obj, err := a.srv.AgentChecks(nil, req) - if err != nil { + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := make(map[types.CheckID]*structs.HealthCheck) + if err := dec.Decode(&val); err != nil { t.Fatalf("Err: %v", err) } - val := obj.(map[types.CheckID]*structs.HealthCheck) - if len(val) != 1 { - t.Fatalf("bad checks: %v", obj) - } + require.Len(t, val, 2) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) } @@ -1361,12 +1469,15 @@ func TestAgent_Self(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") req, _ := http.NewRequest("GET", "/v1/agent/self", nil) - obj, err := a.srv.AgentSelf(nil, req) - require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := &Self{} + require.NoError(t, dec.Decode(val)) - val := obj.(Self) require.Equal(t, a.Config.SerfPortLAN, int(val.Member.Port)) - require.Equal(t, a.Config.SerfPortLAN, val.DebugConfig["SerfPortLAN"].(int)) + require.Equal(t, a.Config.SerfPortLAN, int(val.DebugConfig["SerfPortLAN"].(float64))) cs, err := a.GetLANCoordinate() require.NoError(t, err) @@ -1401,24 +1512,24 @@ func TestAgent_Self_ACLDeny(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/self", nil) - if _, err := a.srv.AgentSelf(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/self?token=towel", nil) - if _, err := a.srv.AgentSelf(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a.srv) req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/self?token=%s", ro), nil) - if _, err := a.srv.AgentSelf(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -1434,24 +1545,24 @@ func TestAgent_Metrics_ACLDeny(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/metrics", nil) - if _, err := a.srv.AgentMetrics(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/metrics?token=towel", nil) - if _, err := a.srv.AgentMetrics(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a.srv) req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/metrics?token=%s", ro), nil) - if _, err := a.srv.AgentMetrics(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -1783,17 +1894,17 @@ func TestAgent_Reload_ACLDeny(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/reload", nil) - if _, err := a.srv.AgentReload(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a.srv) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/reload?token=%s", ro), nil) - if _, err := a.srv.AgentReload(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) // This proves we call the ACL function, and we've got the other reload @@ -1813,17 +1924,21 @@ func TestAgent_Members(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") req, _ := http.NewRequest("GET", "/v1/agent/members", nil) - obj, err := a.srv.AgentMembers(nil, req) - if err != nil { + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { t.Fatalf("Err: %v", err) } - val := obj.([]serf.Member) + if len(val) == 0 { - t.Fatalf("bad members: %v", obj) + t.Fatalf("bad members: %v", val) } if int(val[0].Port) != a.Config.SerfPortLAN { - t.Fatalf("not lan: %v", obj) + t.Fatalf("not lan: %v", val) } } @@ -1838,17 +1953,21 @@ func TestAgent_Members_WAN(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") req, _ := http.NewRequest("GET", "/v1/agent/members?wan=true", nil) - obj, err := a.srv.AgentMembers(nil, req) - if err != nil { + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { t.Fatalf("Err: %v", err) } - val := obj.([]serf.Member) + if len(val) == 0 { - t.Fatalf("bad members: %v", obj) + t.Fatalf("bad members: %v", val) } if int(val[0].Port) != a.Config.SerfPortWAN { - t.Fatalf("not wan: %v", obj) + t.Fatalf("not wan: %v", val) } } @@ -1858,32 +1977,70 @@ func TestAgent_Members_ACLFilter(t *testing.T) { } t.Parallel() + + // Start 2 agents and join them together. a := NewTestAgent(t, TestACLConfig()) defer a.Shutdown() + b := NewTestAgent(t, TestACLConfig()) + defer b.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + testrpc.WaitForLeader(t, b.RPC, "dc1") + + joinPath := fmt.Sprintf("/v1/agent/join/127.0.0.1:%d?token=root", b.Config.SerfPortLAN) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, httptest.NewRequest(http.MethodPut, joinPath, nil)) + require.Equal(t, http.StatusOK, resp.Code) + t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/members", nil) - obj, err := a.srv.AgentMembers(nil, req) - if err != nil { + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { t.Fatalf("Err: %v", err) } - val := obj.([]serf.Member) - if len(val) != 0 { - t.Fatalf("bad members: %v", obj) + require.Len(t, val, 0) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) + + t.Run("limited token", func(t *testing.T) { + require := require.New(t) + + token := testCreateToken(t, a, fmt.Sprintf(` + node "%s" { + policy = "read" + } + `, b.Config.NodeName)) + + req := httptest.NewRequest("GET", fmt.Sprintf("/v1/agent/members?token=%s", token), nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) } + require.Len(val, 1) + require.NotEmpty(resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/members?token=root", nil) - obj, err := a.srv.AgentMembers(nil, req) - if err != nil { + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { t.Fatalf("Err: %v", err) } - val := obj.([]serf.Member) - if len(val) != 1 { - t.Fatalf("bad members: %v", obj) - } + require.Len(t, val, 2) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) } @@ -1902,13 +2059,8 @@ func TestAgent_Join(t *testing.T) { addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil) - obj, err := a1.srv.AgentJoin(nil, req) - if err != nil { - t.Fatalf("Err: %v", err) - } - if obj != nil { - t.Fatalf("Err: %v", obj) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) if len(a1.LANMembersInAgentPartition()) != 2 { t.Fatalf("should have 2 members") @@ -1936,13 +2088,8 @@ func TestAgent_Join_WAN(t *testing.T) { addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortWAN) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?wan=true", addr), nil) - obj, err := a1.srv.AgentJoin(nil, req) - if err != nil { - t.Fatalf("Err: %v", err) - } - if obj != nil { - t.Fatalf("Err: %v", obj) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) if len(a1.WANMembers()) != 2 { t.Fatalf("should have 2 members") @@ -1972,25 +2119,27 @@ func TestAgent_Join_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil) - if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=towel", addr), nil) - _, err := a1.srv.AgentJoin(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusOK, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a1.srv) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=%s", addr, ro), nil) - if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusForbidden, resp.Code) }) } @@ -2056,13 +2205,10 @@ func TestAgent_Leave(t *testing.T) { // Graceful leave now req, _ := http.NewRequest("PUT", "/v1/agent/leave", nil) - obj, err := a2.srv.AgentLeave(nil, req) - if err != nil { - t.Fatalf("Err: %v", err) - } - if obj != nil { - t.Fatalf("Err: %v", obj) - } + resp := httptest.NewRecorder() + a2.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + retry.Run(t, func(r *retry.R) { m := a1.LANMembersInAgentPartition() if got, want := m[1].Status, serf.StatusLeft; got != want { @@ -2083,26 +2229,29 @@ func TestAgent_Leave_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/leave", nil) - if _, err := a.srv.AgentLeave(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a.srv) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/leave?token=%s", ro), nil) - if _, err := a.srv.AgentLeave(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusForbidden, resp.Code) }) // this sub-test will change the state so that there is no leader. // it must therefore be the last one in this list. t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/leave?token=towel", nil) - if _, err := a.srv.AgentLeave(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -2137,13 +2286,10 @@ func TestAgent_ForceLeave(t *testing.T) { // Force leave now req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/%s", a2.Config.NodeName), nil) - obj, err := a1.srv.AgentForceLeave(nil, req) - if err != nil { - t.Fatalf("Err: %v", err) - } - if obj != nil { - t.Fatalf("Err: %v", obj) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + retry.Run(t, func(r *retry.R) { m := a1.LANMembersInAgentPartition() if got, want := m[1].Status, serf.StatusLeft; got != want { @@ -2181,24 +2327,24 @@ func TestAgent_ForceLeave_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", uri, nil) - if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("PUT", uri+"?token=towel", nil) - if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a.srv) req, _ := http.NewRequest("PUT", fmt.Sprintf(uri+"?token=%s", ro), nil) - if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("operator write token", func(t *testing.T) { @@ -2209,9 +2355,9 @@ func TestAgent_ForceLeave_ACLDeny(t *testing.T) { opToken := testCreateToken(t, a, rules) req, _ := http.NewRequest("PUT", fmt.Sprintf(uri+"?token=%s", opToken), nil) - if _, err := a.srv.AgentForceLeave(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -2250,13 +2396,9 @@ func TestAgent_ForceLeavePrune(t *testing.T) { // Force leave now req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/%s?prune=true", a2.Config.NodeName), nil) - obj, err := a1.srv.AgentForceLeave(nil, req) - if err != nil { - t.Fatalf("Err: %v", err) - } - if obj != nil { - t.Fatalf("Err: %v", obj) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) retry.Run(t, func(r *retry.R) { m := len(a1.LANMembersInAgentPartition()) if m != 1 { @@ -2265,6 +2407,74 @@ func TestAgent_ForceLeavePrune(t *testing.T) { }) } +func TestAgent_ForceLeavePrune_WAN(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + a1 := StartTestAgent(t, TestAgent{Name: "dc1", HCL: ` + datacenter = "dc1" + primary_datacenter = "dc1" + gossip_wan { + probe_interval = "50ms" + suspicion_mult = 2 + } + `}) + defer a1.Shutdown() + + a2 := StartTestAgent(t, TestAgent{Name: "dc2", HCL: ` + datacenter = "dc2" + primary_datacenter = "dc1" + `}) + defer a2.Shutdown() + + testrpc.WaitForLeader(t, a1.RPC, "dc1") + testrpc.WaitForLeader(t, a2.RPC, "dc2") + + // Wait for the WAN join. + addr := fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortWAN) + _, err := a2.JoinWAN([]string{addr}) + require.NoError(t, err) + + testrpc.WaitForLeader(t, a1.RPC, "dc2") + testrpc.WaitForLeader(t, a2.RPC, "dc1") + + retry.Run(t, func(r *retry.R) { + require.Len(r, a1.WANMembers(), 2) + require.Len(r, a2.WANMembers(), 2) + }) + + wanNodeName_a2 := a2.Config.NodeName + ".dc2" + + // Shutdown and wait for agent being marked as failed, so we wait for full + // shutdown of Agent. + require.NoError(t, a2.Shutdown()) + retry.Run(t, func(r *retry.R) { + m := a1.WANMembers() + for _, member := range m { + if member.Name == wanNodeName_a2 { + if member.Status != serf.StatusFailed { + r.Fatalf("got status %q want %q", member.Status, serf.StatusFailed) + } + } + } + }) + + // Force leave now + req, err := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/%s?prune=1&wan=1", wanNodeName_a2), nil) + require.NoError(t, err) + + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code, resp.Body.String()) + + retry.Run(t, func(r *retry.R) { + require.Len(r, a1.WANMembers(), 1) + }) +} + func TestAgent_RegisterCheck(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -2280,13 +2490,9 @@ func TestAgent_RegisterCheck(t *testing.T) { TTL: 15 * time.Second, } req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args)) - obj, err := a.srv.AgentRegisterCheck(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) // Ensure we have a check mapping checkID := structs.NewCheckID("test", nil) @@ -2357,9 +2563,7 @@ func TestAgent_RegisterCheck_Scripts(t *testing.T) { t.Run(tt.name+" as node check", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(tt.check)) resp := httptest.NewRecorder() - if _, err := a.srv.AgentRegisterCheck(resp, req); err != nil { - t.Fatalf("err: %v", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != http.StatusOK { t.Fatalf("bad: %d", resp.Code) } @@ -2374,9 +2578,7 @@ func TestAgent_RegisterCheck_Scripts(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - if _, err := a.srv.AgentRegisterService(resp, req); err != nil { - t.Fatalf("err: %v", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != http.StatusOK { t.Fatalf("bad: %d", resp.Code) } @@ -2391,9 +2593,7 @@ func TestAgent_RegisterCheck_Scripts(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - if _, err := a.srv.AgentRegisterService(resp, req); err != nil { - t.Fatalf("err: %v", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != http.StatusOK { t.Fatalf("bad: %d", resp.Code) } @@ -2418,12 +2618,12 @@ func TestAgent_RegisterCheckScriptsExecDisable(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args)) res := httptest.NewRecorder() - _, err := a.srv.AgentRegisterCheck(res, req) - if err == nil { - t.Fatalf("expected error but got nil") + a.srv.h.ServeHTTP(res, req) + if http.StatusInternalServerError != res.Code { + t.Fatalf("expected 500 code error but got %v", res.Code) } - if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { - t.Fatalf("expected script disabled error, got: %s", err) + if !strings.Contains(res.Body.String(), "Scripts are disabled on this agent") { + t.Fatalf("expected script disabled error, got: %s", res.Body.String()) } checkID := structs.NewCheckID("test", nil) require.Nil(t, a.State.Check(checkID), "check registered with exec disabled") @@ -2448,12 +2648,12 @@ func TestAgent_RegisterCheckScriptsExecRemoteDisable(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args)) res := httptest.NewRecorder() - _, err := a.srv.AgentRegisterCheck(res, req) - if err == nil { - t.Fatalf("expected error but got nil") + a.srv.h.ServeHTTP(res, req) + if http.StatusInternalServerError != res.Code { + t.Fatalf("expected 500 code error but got %v", res.Code) } - if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { - t.Fatalf("expected script disabled error, got: %s", err) + if !strings.Contains(res.Body.String(), "Scripts are disabled on this agent") { + t.Fatalf("expected script disabled error, got: %s", res.Body.String()) } checkID := structs.NewCheckID("test", nil) require.Nil(t, a.State.Check(checkID), "check registered with exec disabled") @@ -2475,12 +2675,10 @@ func TestAgent_RegisterCheck_Passing(t *testing.T) { Status: api.HealthPassing, } req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args)) - obj, err := a.srv.AgentRegisterCheck(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusOK != resp.Code { + t.Fatalf("expcted 200 but got %v", resp.Code) } // Ensure we have a check mapping @@ -2516,10 +2714,8 @@ func TestAgent_RegisterCheck_BadStatus(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args)) resp := httptest.NewRecorder() - if _, err := a.srv.AgentRegisterCheck(resp, req); err != nil { - t.Fatalf("err: %v", err) - } - if resp.Code != 400 { + a.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusBadRequest { t.Fatalf("accepted bad status") } } @@ -2554,8 +2750,8 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { // ensure the service is ready for registering a check for it. req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(svc)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) // create a policy that has write on service foo policyReq := &structs.ACLPolicy{ @@ -2565,8 +2761,8 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { req, _ = http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(policyReq)) resp = httptest.NewRecorder() - _, err = a.srv.ACLPolicyCreate(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) // create a policy that has write on the node name of the agent policyReq = &structs.ACLPolicy{ @@ -2576,8 +2772,8 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { req, _ = http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(policyReq)) resp = httptest.NewRecorder() - _, err = a.srv.ACLPolicyCreate(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) // create a token using the write-foo policy tokenReq := &structs.ACLToken{ @@ -2591,10 +2787,14 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq)) resp = httptest.NewRecorder() - tokInf, err := a.srv.ACLTokenCreate(resp, req) - require.NoError(t, err) - svcToken, ok := tokInf.(*structs.ACLToken) - require.True(t, ok) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + dec := json.NewDecoder(resp.Body) + svcToken := &structs.ACLToken{} + if err := dec.Decode(svcToken); err != nil { + t.Fatalf("err: %v", err) + } require.NotNil(t, svcToken) // create a token using the write-node policy @@ -2609,57 +2809,67 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq)) resp = httptest.NewRecorder() - tokInf, err = a.srv.ACLTokenCreate(resp, req) - require.NoError(t, err) - nodeToken, ok := tokInf.(*structs.ACLToken) - require.True(t, ok) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + dec = json.NewDecoder(resp.Body) + nodeToken := &structs.ACLToken{} + if err := dec.Decode(nodeToken); err != nil { + t.Fatalf("err: %v", err) + } require.NotNil(t, nodeToken) t.Run("no token - node check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(nodeCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.True(r, acl.IsErrPermissionDenied(err)) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) }) t.Run("svc token - node check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+svcToken.SecretID, jsonReader(nodeCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.True(r, acl.IsErrPermissionDenied(err)) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) }) t.Run("node token - node check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+nodeToken.SecretID, jsonReader(nodeCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.NoError(r, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) }) t.Run("no token - svc check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(svcCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.True(r, acl.IsErrPermissionDenied(err)) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) }) t.Run("node token - svc check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+nodeToken.SecretID, jsonReader(svcCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.True(r, acl.IsErrPermissionDenied(err)) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) }) t.Run("svc token - svc check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+svcToken.SecretID, jsonReader(svcCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.NoError(r, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) }) } @@ -2680,12 +2890,10 @@ func TestAgent_DeregisterCheck(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test", nil) - obj, err := a.srv.AgentDeregisterCheck(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 but got %v", resp.Code) } // Ensure we have a check mapping @@ -2709,16 +2917,16 @@ func TestAgent_DeregisterCheckACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test", nil) - if _, err := a.srv.AgentDeregisterCheck(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test?token=root", nil) - if _, err := a.srv.AgentDeregisterCheck(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -2739,12 +2947,11 @@ func TestAgent_PassCheck(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test", nil) - obj, err := a.srv.AgentCheckPass(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 by got %v", resp.Code) } // Ensure we have a check mapping @@ -2772,16 +2979,16 @@ func TestAgent_PassCheck_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test", nil) - if _, err := a.srv.AgentCheckPass(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test?token=root", nil) - if _, err := a.srv.AgentCheckPass(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -2802,12 +3009,11 @@ func TestAgent_WarnCheck(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test", nil) - obj, err := a.srv.AgentCheckWarn(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 by got %v", resp.Code) } // Ensure we have a check mapping @@ -2835,16 +3041,16 @@ func TestAgent_WarnCheck_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test", nil) - if _, err := a.srv.AgentCheckWarn(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test?token=root", nil) - if _, err := a.srv.AgentCheckWarn(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -2865,12 +3071,11 @@ func TestAgent_FailCheck(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test", nil) - obj, err := a.srv.AgentCheckFail(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 by got %v", resp.Code) } // Ensure we have a check mapping @@ -2898,16 +3103,16 @@ func TestAgent_FailCheck_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test", nil) - if _, err := a.srv.AgentCheckFail(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test?token=root", nil) - if _, err := a.srv.AgentCheckFail(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -2938,14 +3143,8 @@ func TestAgent_UpdateCheck(t *testing.T) { t.Run(c.Status, func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(c)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentCheckUpdate(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) - } - if resp.Code != 200 { + a.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusOK { t.Fatalf("expected 200, got %d", resp.Code) } @@ -2963,14 +3162,8 @@ func TestAgent_UpdateCheck(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentCheckUpdate(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) - } - if resp.Code != 200 { + a.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusOK { t.Fatalf("expected 200, got %d", resp.Code) } @@ -2987,14 +3180,8 @@ func TestAgent_UpdateCheck(t *testing.T) { args := checkUpdate{Status: "itscomplicated"} req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentCheckUpdate(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) - } - if resp.Code != 400 { + a.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusBadRequest { t.Fatalf("expected 400, got %d", resp.Code) } }) @@ -3019,17 +3206,17 @@ func TestAgent_UpdateCheck_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { args := checkUpdate{api.HealthPassing, "hello-passing"} req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args)) - if _, err := a.srv.AgentCheckUpdate(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { args := checkUpdate{api.HealthPassing, "hello-passing"} req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test?token=root", jsonReader(args)) - if _, err := a.srv.AgentCheckUpdate(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -3077,13 +3264,10 @@ func testAgent_RegisterService(t *testing.T, extraHCL string) { }, } req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) - - obj, err := a.srv.AgentRegisterService(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 but got %v", resp.Code) } // Ensure the service @@ -3166,8 +3350,9 @@ func testAgent_RegisterService_ReRegister(t *testing.T, extraHCL string) { }, } req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - _, err := a.srv.AgentRegisterService(nil, req) - require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) args = &structs.ServiceDefinition{ Name: "test", @@ -3190,8 +3375,9 @@ func testAgent_RegisterService_ReRegister(t *testing.T, extraHCL string) { }, } req, _ = http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - _, err = a.srv.AgentRegisterService(nil, req) - require.NoError(t, err) + resp = httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) checks := a.State.Checks(structs.DefaultEnterpriseMetaInDefaultPartition()) require.Equal(t, 3, len(checks)) @@ -3246,8 +3432,9 @@ func testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T, ex }, } req, _ := http.NewRequest("PUT", "/v1/agent/service/register?replace-existing-checks", jsonReader(args)) - _, err := a.srv.AgentRegisterService(nil, req) - require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) args = &structs.ServiceDefinition{ Name: "test", @@ -3269,8 +3456,9 @@ func testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T, ex }, } req, _ = http.NewRequest("PUT", "/v1/agent/service/register?replace-existing-checks", jsonReader(args)) - _, err = a.srv.AgentRegisterService(nil, req) - require.NoError(t, err) + resp = httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) checks := a.State.Checks(structs.DefaultEnterpriseMetaInDefaultPartition()) require.Len(t, checks, 2) @@ -3398,9 +3586,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", strings.NewReader(json)) rr := httptest.NewRecorder() - obj, err := a.srv.AgentRegisterService(rr, req) - require.NoError(t, err) - require.Nil(t, obj) + a.srv.h.ServeHTTP(rr, req) require.Equal(t, 200, rr.Code, "body: %s", rr.Body) svc := &structs.NodeService{ @@ -3550,16 +3736,16 @@ func testAgent_RegisterService_ACLDeny(t *testing.T, extraHCL string) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -3594,10 +3780,7 @@ func testAgent_RegisterService_InvalidAddress(t *testing.T, extraHCL string) { } req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - if err != nil { - t.Fatalf("got error %v want nil", err) - } + a.srv.h.ServeHTTP(resp, req) if got, want := resp.Code, 400; got != want { t.Fatalf("got code %d want %d", got, want) } @@ -3662,9 +3845,8 @@ func testAgent_RegisterService_UnmanagedConnectProxy(t *testing.T, extraHCL stri req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentRegisterService(resp, req) - require.NoError(t, err) - require.Nil(t, obj) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) // Ensure the service sid := structs.NewServiceID("connect-proxy", nil) @@ -3741,10 +3923,14 @@ func testCreateToken(t *testing.T, a *TestAgent, rules string) string { } req, _ := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.ACLTokenCreate(resp, req) - require.NoError(t, err) - require.NotNil(t, obj) - aclResp := obj.(*structs.ACLToken) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + dec := json.NewDecoder(resp.Body) + aclResp := &structs.ACLToken{} + if err := dec.Decode(aclResp); err != nil { + t.Fatalf("err: %v", err) + } return aclResp.SecretID } @@ -3755,10 +3941,14 @@ func testCreatePolicy(t *testing.T, a *TestAgent, name, rules string) string { } req, _ := http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.ACLPolicyCreate(resp, req) - require.NoError(t, err) - require.NotNil(t, obj) - aclResp := obj.(*structs.ACLPolicy) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + dec := json.NewDecoder(resp.Body) + aclResp := &structs.ACLPolicy{} + if err := dec.Decode(aclResp); err != nil { + t.Fatalf("err: %v", err) + } return aclResp.ID } @@ -4190,15 +4380,11 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token="+token, br) resp := httptest.NewRecorder() - obj, err := a.srv.AgentRegisterService(resp, req) + a.srv.h.ServeHTTP(resp, req) if tt.wantErr != "" { - require.Error(err, "response code=%d, body:\n%s", - resp.Code, resp.Body.String()) - require.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.wantErr)) + require.Contains(strings.ToLower(resp.Body.String()), strings.ToLower(tt.wantErr)) return } - require.NoError(err) - assert.Nil(obj) require.Equal(200, resp.Code, "request failed with body: %s", resp.Body.String()) @@ -4207,7 +4393,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s // Parse the expected definition into a ServiceDefinition var sd structs.ServiceDefinition - err = json.Unmarshal([]byte(tt.json), &sd) + err := json.Unmarshal([]byte(tt.json), &sd) require.NoError(err) require.NotEmpty(sd.Name) @@ -4248,9 +4434,8 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s req := httptest.NewRequest("PUT", "/v1/agent/service/deregister/"+svcID+"?token="+token, nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentDeregisterService(resp, req) - require.NoError(err) - require.Nil(obj) + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusOK, resp.Code) svcs := a.State.AllServices() _, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)] @@ -4303,9 +4488,7 @@ func testAgent_RegisterService_UnmanagedConnectProxyInvalid(t *testing.T, extraH req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentRegisterService(resp, req) - assert.Nil(err) - assert.Nil(obj) + a.srv.h.ServeHTTP(resp, req) assert.Equal(http.StatusBadRequest, resp.Code) assert.Contains(resp.Body.String(), "Port") @@ -4353,9 +4536,8 @@ func testAgent_RegisterService_ConnectNative(t *testing.T, extraHCL string) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentRegisterService(resp, req) - assert.Nil(err) - assert.Nil(obj) + a.srv.h.ServeHTTP(resp, req) + assert.Equal(http.StatusOK, resp.Code) // Ensure the service svc := a.State.Service(structs.NewServiceID("web", nil)) @@ -4401,13 +4583,13 @@ func testAgent_RegisterService_ScriptCheck_ExecDisable(t *testing.T, extraHCL st }, } req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) - - _, err := a.srv.AgentRegisterService(nil, req) - if err == nil { - t.Fatalf("expected error but got nil") + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusInternalServerError != resp.Code { + t.Fatalf("expected 500 but got %v", resp.Code) } - if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { - t.Fatalf("expected script disabled error, got: %s", err) + if !strings.Contains(resp.Body.String(), "Scripts are disabled on this agent") { + t.Fatalf("expected script disabled error, got: %s", resp.Body.String()) } checkID := types.CheckID("test-check") require.Nil(t, a.State.Check(structs.NewCheckID(checkID, nil)), "check registered with exec disabled") @@ -4453,13 +4635,13 @@ func testAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T, extra }, } req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) - - _, err := a.srv.AgentRegisterService(nil, req) - if err == nil { - t.Fatalf("expected error but got nil") + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusInternalServerError != resp.Code { + t.Fatalf("expected 500 but got %v", resp.Code) } - if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { - t.Fatalf("expected script disabled error, got: %s", err) + if !strings.Contains(resp.Body.String(), "Scripts are disabled on this agent") { + t.Fatalf("expected script disabled error, got: %s", resp.Body.String()) } checkID := types.CheckID("test-check") require.Nil(t, a.State.Check(structs.NewCheckID(checkID, nil)), "check registered with exec disabled") @@ -4475,21 +4657,23 @@ func TestAgent_DeregisterService(t *testing.T) { defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, "dc1") - service := &structs.NodeService{ - ID: "test", - Service: "test", - } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) + serviceReq := AddServiceRequest{ + Service: &structs.NodeService{ + ID: "test", + Service: "test", + }, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, } + require.NoError(t, a.AddService(serviceReq)) req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil) - obj, err := a.srv.AgentDeregisterService(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 but got %v", resp.Code) } // Ensure we have a check mapping @@ -4507,26 +4691,30 @@ func TestAgent_DeregisterService_ACLDeny(t *testing.T) { defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") - service := &structs.NodeService{ - ID: "test", - Service: "test", - } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) + serviceReq := AddServiceRequest{ + Service: &structs.NodeService{ + ID: "test", + Service: "test", + }, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, } + require.NoError(t, a.AddService(serviceReq)) t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil) - if _, err := a.srv.AgentDeregisterService(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test?token=root", nil) - if _, err := a.srv.AgentDeregisterService(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -4543,9 +4731,7 @@ func TestAgent_ServiceMaintenance_BadRequest(t *testing.T) { t.Run("not enabled", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 400 { t.Fatalf("expected 400, got %d", resp.Code) } @@ -4554,9 +4740,7 @@ func TestAgent_ServiceMaintenance_BadRequest(t *testing.T) { t.Run("no service id", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/?enable=true", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 400 { t.Fatalf("expected 400, got %d", resp.Code) } @@ -4582,20 +4766,22 @@ func TestAgent_ServiceMaintenance_Enable(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") // Register the service - service := &structs.NodeService{ - ID: "test", - Service: "test", - } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) + serviceReq := AddServiceRequest{ + Service: &structs.NodeService{ + ID: "test", + Service: "test", + }, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, } + require.NoError(t, a.AddService(serviceReq)) // Force the service into maintenance mode req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=mytoken", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 200 { t.Fatalf("expected 200, got %d", resp.Code) } @@ -4629,13 +4815,17 @@ func TestAgent_ServiceMaintenance_Disable(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") // Register the service - service := &structs.NodeService{ - ID: "test", - Service: "test", - } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) + serviceReq := AddServiceRequest{ + Service: &structs.NodeService{ + ID: "test", + Service: "test", + }, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, } + require.NoError(t, a.AddService(serviceReq)) // Force the service into maintenance mode if err := a.EnableServiceMaintenance(structs.NewServiceID("test", nil), "", ""); err != nil { @@ -4645,9 +4835,7 @@ func TestAgent_ServiceMaintenance_Disable(t *testing.T) { // Leave maintenance mode req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=false", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 200 { t.Fatalf("expected 200, got %d", resp.Code) } @@ -4670,26 +4858,30 @@ func TestAgent_ServiceMaintenance_ACLDeny(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") // Register the service. - service := &structs.NodeService{ - ID: "test", - Service: "test", - } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) + serviceReq := AddServiceRequest{ + Service: &structs.NodeService{ + ID: "test", + Service: "test", + }, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, } + require.NoError(t, a.AddService(serviceReq)) t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken", nil) - if _, err := a.srv.AgentServiceMaintenance(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=root", nil) - if _, err := a.srv.AgentServiceMaintenance(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -4706,9 +4898,7 @@ func TestAgent_NodeMaintenance_BadRequest(t *testing.T) { // Fails when no enable flag provided req, _ := http.NewRequest("PUT", "/v1/agent/maintenance", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 400 { t.Fatalf("expected 400, got %d", resp.Code) } @@ -4727,9 +4917,7 @@ func TestAgent_NodeMaintenance_Enable(t *testing.T) { // Force the node into maintenance mode req, _ := http.NewRequest("PUT", "/v1/agent/maintenance?enable=true&reason=broken&token=mytoken", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 200 { t.Fatalf("expected 200, got %d", resp.Code) } @@ -4767,9 +4955,7 @@ func TestAgent_NodeMaintenance_Disable(t *testing.T) { // Leave maintenance mode req, _ := http.NewRequest("PUT", "/v1/agent/maintenance?enable=false", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 200 { t.Fatalf("expected 200, got %d", resp.Code) } @@ -4792,16 +4978,16 @@ func TestAgent_NodeMaintenance_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/maintenance?enable=true&reason=broken", nil) - if _, err := a.srv.AgentNodeMaintenance(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/maintenance?enable=true&reason=broken&token=root", nil) - if _, err := a.srv.AgentNodeMaintenance(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -4825,8 +5011,10 @@ func TestAgent_RegisterCheck_Service(t *testing.T) { // First register the service req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, req); err != nil { - t.Fatalf("err: %v", err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if resp.Code != 200 { + t.Fatalf("expected 200, got %d", resp.Code) } // Now register an additional check @@ -4836,8 +5024,10 @@ func TestAgent_RegisterCheck_Service(t *testing.T) { TTL: 15 * time.Second, } req, _ = http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(checkArgs)) - if _, err := a.srv.AgentRegisterCheck(nil, req); err != nil { - t.Fatalf("err: %v", err) + resp = httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if resp.Code != 200 { + t.Fatalf("expected 200, got %d", resp.Code) } // Ensure we have a check mapping @@ -4874,20 +5064,14 @@ func TestAgent_Monitor(t *testing.T) { // Try passing an invalid log level req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=invalid", nil) resp := httptest.NewRecorder() - _, err := a.srv.AgentMonitor(resp, req) - if err == nil { - t.Fatal("expected BadRequestError to have occurred, got nil") - } - - // Note that BadRequestError is handled outside the endpoint handler so we - // still see a 200 if we check here. - if _, ok := err.(BadRequestError); !ok { - t.Fatalf("expected BadRequestError to have occurred, got %#v", err) + a.srv.h.ServeHTTP(resp, req) + if http.StatusBadRequest != resp.Code { + t.Fatalf("expected 400 but got %v", resp.Code) } substring := "Unknown log level" - if !strings.Contains(err.Error(), substring) { - t.Fatalf("got: %s, wanted message containing: %s", err.Error(), substring) + if !strings.Contains(resp.Body.String(), substring) { + t.Fatalf("got: %s, wanted message containing: %s", resp.Body.String(), substring) } }) @@ -4899,10 +5083,10 @@ func TestAgent_Monitor(t *testing.T) { req = req.WithContext(cancelCtx) resp := httptest.NewRecorder() - errCh := make(chan error) + codeCh := make(chan int) go func() { - _, err := a.srv.AgentMonitor(resp, req) - errCh <- err + a.srv.h.ServeHTTP(resp, req) + codeCh <- resp.Code }() args := &structs.ServiceDefinition{ @@ -4914,8 +5098,10 @@ func TestAgent_Monitor(t *testing.T) { } registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil { - t.Fatalf("err: %v", err) + res := httptest.NewRecorder() + a.srv.h.ServeHTTP(res, registerReq) + if http.StatusOK != res.Code { + t.Fatalf("expected 200 but got %v", res.Code) } // Wait until we have received some type of logging output @@ -4924,9 +5110,8 @@ func TestAgent_Monitor(t *testing.T) { }, 3*time.Second, 100*time.Millisecond) cancelFunc() - err := <-errCh - require.NoError(t, err) - + code := <-codeCh + require.Equal(t, http.StatusOK, code) got := resp.Body.String() // Only check a substring that we are highly confident in finding @@ -4963,8 +5148,10 @@ func TestAgent_Monitor(t *testing.T) { } registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil { - t.Fatalf("err: %v", err) + res := httptest.NewRecorder() + a.srv.h.ServeHTTP(res, registerReq) + if http.StatusOK != res.Code { + t.Fatalf("expected 200 but got %v", res.Code) } // Wait until we have received some type of logging output @@ -4983,10 +5170,10 @@ func TestAgent_Monitor(t *testing.T) { req = req.WithContext(cancelCtx) resp := httptest.NewRecorder() - errCh := make(chan error) + codeCh := make(chan int) go func() { - _, err := a.srv.AgentMonitor(resp, req) - errCh <- err + a.srv.h.ServeHTTP(resp, req) + codeCh <- resp.Code }() args := &structs.ServiceDefinition{ @@ -4998,8 +5185,10 @@ func TestAgent_Monitor(t *testing.T) { } registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil { - t.Fatalf("err: %v", err) + res := httptest.NewRecorder() + a.srv.h.ServeHTTP(res, registerReq) + if http.StatusOK != res.Code { + t.Fatalf("expected 200 but got %v", res.Code) } // Wait until we have received some type of logging output @@ -5008,8 +5197,8 @@ func TestAgent_Monitor(t *testing.T) { }, 3*time.Second, 100*time.Millisecond) cancelFunc() - err := <-errCh - require.NoError(t, err) + code := <-codeCh + require.Equal(t, http.StatusOK, code) // Each line is output as a separate JSON object, we grab the first and // make sure it can be unmarshalled. @@ -5028,12 +5217,12 @@ func TestAgent_Monitor(t *testing.T) { req = req.WithContext(cancelCtx) resp := httptest.NewRecorder() - chErr := make(chan error) + codeCh := make(chan int) chStarted := make(chan struct{}) go func() { close(chStarted) - _, err := a.srv.AgentMonitor(resp, req) - chErr <- err + a.srv.h.ServeHTTP(resp, req) + codeCh <- resp.Code }() <-chStarted @@ -5045,8 +5234,8 @@ func TestAgent_Monitor(t *testing.T) { }, 3*time.Second, 100*time.Millisecond) cancelFunc() - err := <-chErr - require.NoError(t, err) + code := <-codeCh + require.Equal(t, http.StatusOK, code) got := resp.Body.String() want := "serf: Shutdown without a Leave" @@ -5068,8 +5257,10 @@ func TestAgent_Monitor_ACLDeny(t *testing.T) { // Try without a token. req, _ := http.NewRequest("GET", "/v1/agent/monitor", nil) - if _, err := a.srv.AgentMonitor(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusForbidden != resp.Code { + t.Fatalf("expected 403 but got %v", resp.Code) } // This proves we call the ACL function, and we've got the other monitor @@ -5099,11 +5290,12 @@ func TestAgent_TokenTriggersFullSync(t *testing.T) { require.NoError(t, err) resp := httptest.NewRecorder() - obj, err := a.srv.ACLPolicyCreate(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) - policy, ok := obj.(*structs.ACLPolicy) - require.True(t, ok) + dec := json.NewDecoder(resp.Body) + policy = &structs.ACLPolicy{} + require.NoError(t, dec.Decode(policy)) return policy } @@ -5121,11 +5313,12 @@ func TestAgent_TokenTriggersFullSync(t *testing.T) { require.NoError(t, err) resp := httptest.NewRecorder() - obj, err := a.srv.ACLTokenCreate(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) - token, ok := obj.(*structs.ACLToken) - require.True(t, ok) + dec := json.NewDecoder(resp.Body) + token = &structs.ACLToken{} + require.NoError(t, dec.Decode(token)) return token } @@ -5156,9 +5349,15 @@ func TestAgent_TokenTriggersFullSync(t *testing.T) { t.Run(tt.path, func(t *testing.T) { url := fmt.Sprintf("/v1/agent/token/%s?token=root", tt.path) - a := NewTestAgent(t, TestACLConfig()+` + a := NewTestAgent(t, ` + primary_datacenter = "dc1" + acl { + enabled = true + default_policy = "deny" + tokens { + initial_management = "root" default = "" agent = "" agent_master = "" @@ -5198,9 +5397,15 @@ func TestAgent_Token(t *testing.T) { // The behavior of this handler when ACLs are disabled is vetted over // in TestACL_Disabled_Response since there's already good infra set // up over there to test this, and it calls the common function. - a := NewTestAgent(t, TestACLConfig()+` + a := NewTestAgent(t, ` + primary_datacenter = "dc1" + acl { + enabled = true + default_policy = "deny" + tokens { + initial_management = "root" default = "" agent = "" agent_master = "" @@ -5225,7 +5430,7 @@ func TestAgent_Token(t *testing.T) { resetTokens := func(init tokens) { a.tokens.UpdateUserToken(init.user, init.userSource) a.tokens.UpdateAgentToken(init.agent, init.agentSource) - a.tokens.UpdateAgentMasterToken(init.master, init.masterSource) + a.tokens.UpdateAgentRecoveryToken(init.master, init.masterSource) a.tokens.UpdateReplicationToken(init.repl, init.replSource) } @@ -5311,7 +5516,7 @@ func TestAgent_Token(t *testing.T) { effective: tokens{master: "M"}, }, { - name: "set master ", + name: "set master", method: "PUT", url: "agent_master?token=root", body: body("M"), @@ -5319,6 +5524,15 @@ func TestAgent_Token(t *testing.T) { raw: tokens{master: "M", masterSource: tokenStore.TokenSourceAPI}, effective: tokens{master: "M"}, }, + { + name: "set recovery", + method: "PUT", + url: "agent_recovery?token=root", + body: body("R"), + code: http.StatusOK, + raw: tokens{master: "R", masterSource: tokenStore.TokenSourceAPI}, + effective: tokens{master: "R", masterSource: tokenStore.TokenSourceAPI}, + }, { name: "set repl legacy", method: "PUT", @@ -5391,6 +5605,15 @@ func TestAgent_Token(t *testing.T) { init: tokens{master: "M"}, raw: tokens{masterSource: tokenStore.TokenSourceAPI}, }, + { + name: "clear recovery", + method: "PUT", + url: "agent_recovery?token=root", + body: body(""), + code: http.StatusOK, + init: tokens{master: "R"}, + raw: tokens{masterSource: tokenStore.TokenSourceAPI}, + }, { name: "clear repl legacy", method: "PUT", @@ -5425,7 +5648,7 @@ func TestAgent_Token(t *testing.T) { } require.Equal(t, tt.effective.user, a.tokens.UserToken()) require.Equal(t, tt.effective.agent, a.tokens.AgentToken()) - require.Equal(t, tt.effective.master, a.tokens.AgentMasterToken()) + require.Equal(t, tt.effective.master, a.tokens.AgentRecoveryToken()) require.Equal(t, tt.effective.repl, a.tokens.ReplicationToken()) tok, src := a.tokens.UserTokenAndSource() @@ -5436,7 +5659,7 @@ func TestAgent_Token(t *testing.T) { require.Equal(t, tt.raw.agent, tok) require.Equal(t, tt.raw.agentSource, src) - tok, src = a.tokens.AgentMasterTokenAndSource() + tok, src = a.tokens.AgentRecoveryTokenAndSource() require.Equal(t, tt.raw.master, tok) require.Equal(t, tt.raw.masterSource, src) @@ -5473,9 +5696,9 @@ func TestAgentConnectCARoots_empty(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) resp := httptest.NewRecorder() - _, err := a.srv.AgentConnectCARoots(resp, req) - require.Error(err) - require.Contains(err.Error(), "Connect must be enabled") + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusInternalServerError, resp.Code) + require.Contains(resp.Body.String(), "Connect must be enabled") } func TestAgentConnectCARoots_list(t *testing.T) { @@ -5498,10 +5721,12 @@ func TestAgentConnectCARoots_list(t *testing.T) { // List req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCARoots(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + value := &structs.IndexedCARoots{} + require.NoError(dec.Decode(value)) - value := obj.(structs.IndexedCARoots) assert.Equal(value.ActiveRootID, ca2.ID) // Would like to assert that it's the same as the TestAgent domain but the // only way to access that state via this package is by RPC to the server @@ -5521,9 +5746,12 @@ func TestAgentConnectCARoots_list(t *testing.T) { { // List it again resp2 := httptest.NewRecorder() - obj2, err := a.srv.AgentConnectCARoots(resp2, req) - require.NoError(err) - assert.Equal(obj, obj2) + a.srv.h.ServeHTTP(resp2, req) + + dec := json.NewDecoder(resp2.Body) + value2 := &structs.IndexedCARoots{} + require.NoError(dec.Decode(value2)) + assert.Equal(value, value2) // Should cache hit this time and not make request assert.Equal("HIT", resp2.Header().Get("X-Cache")) @@ -5537,10 +5765,11 @@ func TestAgentConnectCARoots_list(t *testing.T) { retry.Run(t, func(r *retry.R) { // List it again resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCARoots(resp, req) - r.Check(err) + a.srv.h.ServeHTTP(resp, req) - value := obj.(structs.IndexedCARoots) + dec := json.NewDecoder(resp.Body) + value := &structs.IndexedCARoots{} + require.NoError(dec.Decode(value)) if ca.ID != value.ActiveRootID { r.Fatalf("%s != %s", ca.ID, value.ActiveRootID) } @@ -5588,16 +5817,14 @@ func TestAgentConnectCALeafCert_aclDefaultDeny(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code, "body: %s", resp.Body.String()) } req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) resp := httptest.NewRecorder() - _, err := a.srv.AgentConnectCALeafCert(resp, req) - require.Error(err) - require.True(acl.IsErrPermissionDenied(err)) + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusForbidden, resp.Code) } func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) { @@ -5628,8 +5855,7 @@ func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code, "body: %s", resp.Body.String()) } @@ -5637,12 +5863,13 @@ func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) // Get the issued cert - _, ok := obj.(*structs.IssuedCert) - require.True(ok) + dec := json.NewDecoder(resp.Body) + value := &structs.IssuedCert{} + require.NoError(dec.Decode(value)) + require.NotNil(value) } func createACLTokenWithServicePolicy(t *testing.T, srv *HTTPHandlers, policy string) string { @@ -5663,10 +5890,11 @@ func createACLTokenWithServicePolicy(t *testing.T, srv *HTTPHandlers, policy str req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq)) resp = httptest.NewRecorder() - tokInf, err := srv.ACLTokenCreate(resp, req) - require.NoError(t, err) - svcToken, ok := tokInf.(*structs.ACLToken) - require.True(t, ok) + srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + svcToken := &structs.ACLToken{} + require.NoError(t, dec.Decode(svcToken)) return svcToken.SecretID } @@ -5698,8 +5926,7 @@ func TestAgentConnectCALeafCert_aclServiceReadDeny(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code, "body: %s", resp.Body.String()) } @@ -5707,9 +5934,8 @@ func TestAgentConnectCALeafCert_aclServiceReadDeny(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) resp := httptest.NewRecorder() - _, err := a.srv.AgentConnectCALeafCert(resp, req) - require.Error(err) - require.True(acl.IsErrPermissionDenied(err)) + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusForbidden, resp.Code) } func TestAgentConnectCALeafCert_good(t *testing.T) { @@ -5747,8 +5973,7 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) if !assert.Equal(200, resp.Code) { t.Log("Body: ", resp.Body.String()) } @@ -5757,13 +5982,13 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { // List req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal("MISS", resp.Header().Get("X-Cache")) // Get the issued cert - issued, ok := obj.(*structs.IssuedCert) - assert.True(ok) + dec := json.NewDecoder(resp.Body) + issued := &structs.IssuedCert{} + require.NoError(dec.Decode(issued)) // Verify that the cert is signed by the CA requireLeafValidUnderCA(t, issued, ca1) @@ -5779,34 +6004,81 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { { // Fetch it again resp := httptest.NewRecorder() - obj2, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) - require.Equal(obj, obj2) - - // Should cache hit this time and not make request - require.Equal("HIT", resp.Header().Get("X-Cache")) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) + require.Equal(issued, issued2) } + // Set a new CA + ca2 := connect.TestCAConfigSet(t, a, nil) + // Issue a blocking query to ensure that the cert gets updated appropriately { - // Set a new CA - ca := connect.TestCAConfigSet(t, a, nil) - resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+index, nil) - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) - issued2 := obj.(*structs.IssuedCert) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) require.NotEqual(issued.CertPEM, issued2.CertPEM) require.NotEqual(issued.PrivateKeyPEM, issued2.PrivateKeyPEM) // Verify that the cert is signed by the new CA - requireLeafValidUnderCA(t, issued2, ca) + requireLeafValidUnderCA(t, issued2, ca2) // Should not be a cache hit! The data was updated in response to the blocking // query being made. require.Equal("MISS", resp.Header().Get("X-Cache")) } + + t.Run("test non-blocking queries update leaf cert", func(t *testing.T) { + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectCALeafCert(resp, req) + require.NoError(err) + + // Get the issued cert + issued, ok := obj.(*structs.IssuedCert) + assert.True(ok) + + // Verify that the cert is signed by the CA + requireLeafValidUnderCA(t, issued, ca2) + + // Issue a non blocking query to ensure that the cert gets updated appropriately + { + // Set a new CA + ca3 := connect.TestCAConfigSet(t, a, nil) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) + require.NoError(err) + obj, err = a.srv.AgentConnectCALeafCert(resp, req) + require.NoError(err) + issued2 := obj.(*structs.IssuedCert) + require.NotEqual(issued.CertPEM, issued2.CertPEM) + require.NotEqual(issued.PrivateKeyPEM, issued2.PrivateKeyPEM) + + // Verify that the cert is signed by the new CA + requireLeafValidUnderCA(t, issued2, ca3) + + // Should not be a cache hit! + require.Equal("MISS", resp.Header().Get("X-Cache")) + } + + // Test caching for the leaf cert + { + + for fetched := 0; fetched < 4; fetched++ { + + // Fetch it again + resp := httptest.NewRecorder() + obj2, err := a.srv.AgentConnectCALeafCert(resp, req) + require.NoError(err) + require.Equal(obj, obj2) + } + } + }) } // Test we can request a leaf cert for a service we have permission for @@ -5846,8 +6118,7 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/catalog/register", jsonReader(args)) resp := httptest.NewRecorder() - _, err := a.srv.CatalogRegister(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) if !assert.Equal(200, resp.Code) { t.Log("Body: ", resp.Body.String()) } @@ -5856,13 +6127,13 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { // List req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal("MISS", resp.Header().Get("X-Cache")) // Get the issued cert - issued, ok := obj.(*structs.IssuedCert) - assert.True(ok) + dec := json.NewDecoder(resp.Body) + issued := &structs.IssuedCert{} + require.NoError(dec.Decode(issued)) // Verify that the cert is signed by the CA requireLeafValidUnderCA(t, issued, ca1) @@ -5876,12 +6147,11 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { { // Fetch it again resp := httptest.NewRecorder() - obj2, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) - require.Equal(obj, obj2) - - // Should cache hit this time and not make request - require.Equal("HIT", resp.Header().Get("X-Cache")) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) + require.Equal(issued, issued2) } // Test Blocking - see https://github.com/hashicorp/consul/issues/4462 @@ -5891,7 +6161,7 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { blockingReq, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/connect/ca/leaf/test?wait=125ms&index=%d", issued.ModifyIndex), nil) doneCh := make(chan struct{}) go func() { - a.srv.AgentConnectCALeafCert(resp, blockingReq) + a.srv.h.ServeHTTP(resp, blockingReq) close(doneCh) }() @@ -5912,10 +6182,11 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { resp := httptest.NewRecorder() // Try and sign again (note no index/wait arg since cache should update in // background even if we aren't actively blocking) - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - r.Check(err) + a.srv.h.ServeHTTP(resp, req) - issued2 := obj.(*structs.IssuedCert) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) if issued.CertPEM == issued2.CertPEM { r.Fatalf("leaf has not updated") } @@ -5929,12 +6200,7 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { // Verify that the cert is signed by the new CA requireLeafValidUnderCA(t, issued2, ca) - // Should be a cache hit! The data should've updated in the cache - // in the background so this should've been fetched directly from - // the cache. - if resp.Header().Get("X-Cache") != "HIT" { - r.Fatalf("should be a cache hit") - } + require.NotEqual(issued, issued2) }) } } @@ -5996,8 +6262,7 @@ func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) } req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) if !assert.Equal(200, resp.Code) { t.Log("Body: ", resp.Body.String()) } @@ -6006,13 +6271,13 @@ func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) // List req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal("MISS", resp.Header().Get("X-Cache")) // Get the issued cert - issued, ok := obj.(*structs.IssuedCert) - assert.True(ok) + dec := json.NewDecoder(resp.Body) + issued := &structs.IssuedCert{} + require.NoError(dec.Decode(issued)) // Verify that the cert is signed by the CA requireLeafValidUnderCA(t, issued, ca1) @@ -6026,12 +6291,11 @@ func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) { // Fetch it again resp := httptest.NewRecorder() - obj2, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) - require.Equal(obj, obj2) - - // Should cache hit this time and not make request - require.Equal("HIT", resp.Header().Get("X-Cache")) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) + require.Equal(issued, issued2) } // Test that we aren't churning leaves for no reason at idle. @@ -6040,11 +6304,17 @@ func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) go func() { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+strconv.Itoa(int(issued.ModifyIndex)), nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - if err != nil { + a.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusOK { + ch <- fmt.Errorf(resp.Body.String()) + return + } + + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + if err := dec.Decode(issued2); err != nil { ch <- err } else { - issued2 := obj.(*structs.IssuedCert) if issued.CertPEM == issued2.CertPEM { ch <- fmt.Errorf("leaf woke up unexpectedly with same cert") } else { @@ -6054,7 +6324,6 @@ func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) }() start := time.Now() - select { case <-time.After(5 * time.Second): case err := <-ch: @@ -6130,23 +6399,24 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - _, err := a2.srv.AgentRegisterService(resp, req) - require.NoError(err) + a2.srv.h.ServeHTTP(resp, req) if !assert.Equal(200, resp.Code) { t.Log("Body: ", resp.Body.String()) } } // List - req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) - resp := httptest.NewRecorder() - obj, err := a2.srv.AgentConnectCALeafCert(resp, req) + req, err := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) require.NoError(err) + resp := httptest.NewRecorder() + a2.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusOK, resp.Code) require.Equal("MISS", resp.Header().Get("X-Cache")) // Get the issued cert - issued, ok := obj.(*structs.IssuedCert) - assert.True(ok) + dec := json.NewDecoder(resp.Body) + issued := &structs.IssuedCert{} + require.NoError(dec.Decode(issued)) // Verify that the cert is signed by the CA requireLeafValidUnderCA(t, issued, dc1_ca1) @@ -6160,12 +6430,11 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) { { // Fetch it again resp := httptest.NewRecorder() - obj2, err := a2.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) - require.Equal(obj, obj2) - - // Should cache hit this time and not make request - require.Equal("HIT", resp.Header().Get("X-Cache")) + a2.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) + require.Equal(issued, issued2) } // Test that we aren't churning leaves for no reason at idle. @@ -6174,11 +6443,17 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) { go func() { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+strconv.Itoa(int(issued.ModifyIndex)), nil) resp := httptest.NewRecorder() - obj, err := a2.srv.AgentConnectCALeafCert(resp, req) - if err != nil { + a2.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusOK { + ch <- fmt.Errorf(resp.Body.String()) + return + } + + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + if err := dec.Decode(issued2); err != nil { ch <- err } else { - issued2 := obj.(*structs.IssuedCert) if issued.CertPEM == issued2.CertPEM { ch <- fmt.Errorf("leaf woke up unexpectedly with same cert") } else { @@ -6213,10 +6488,12 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) { resp := httptest.NewRecorder() // Try and sign again (note no index/wait arg since cache should update in // background even if we aren't actively blocking) - obj, err := a2.srv.AgentConnectCALeafCert(resp, req) - r.Check(err) + a2.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusOK, resp.Code) - issued2 := obj.(*structs.IssuedCert) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) if issued.CertPEM == issued2.CertPEM { r.Fatalf("leaf has not updated") } @@ -6230,12 +6507,7 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) { // Verify that the cert is signed by the new CA requireLeafValidUnderCA(t, issued2, dc1_ca2) - // Should be a cache hit! The data should've updated in the cache - // in the background so this should've been fetched directly from - // the cache. - if resp.Header().Get("X-Cache") != "HIT" { - r.Fatalf("should be a cache hit") - } + require.NotEqual(issued, issued2) }) } @@ -6243,15 +6515,14 @@ func waitForActiveCARoot(t *testing.T, srv *HTTPHandlers, expect *structs.CARoot retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) resp := httptest.NewRecorder() - obj, err := srv.AgentConnectCARoots(resp, req) - if err != nil { - r.Fatalf("err: %v", err) + srv.h.ServeHTTP(resp, req) + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 but got %v", resp.Code) } - roots, ok := obj.(structs.IndexedCARoots) - if !ok { - r.Fatalf("response is wrong type %T", obj) - } + dec := json.NewDecoder(resp.Body) + roots := &structs.IndexedCARoots{} + require.NoError(t, dec.Decode(roots)) var root *structs.CARoot for _, r := range roots.Roots { @@ -6303,12 +6574,9 @@ func TestAgentConnectAuthorize_badBody(t *testing.T) { args := []string{} req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Error(err) - assert.Nil(respRaw) - // Note that BadRequestError is handled outside the endpoint handler so we - // still see a 200 if we check here. - assert.Contains(err.Error(), "decode failed") + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusBadRequest, resp.Code) + assert.Contains(resp.Body.String(), "decode failed") } func TestAgentConnectAuthorize_noTarget(t *testing.T) { @@ -6327,12 +6595,9 @@ func TestAgentConnectAuthorize_noTarget(t *testing.T) { args := &structs.ConnectAuthorizeRequest{} req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Error(err) - assert.Nil(respRaw) - // Note that BadRequestError is handled outside the endpoint handler so we - // still see a 200 if we check here. - assert.Contains(err.Error(), "Target service must be specified") + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusBadRequest, resp.Code) + assert.Contains(resp.Body.String(), "Target service must be specified") } // Client ID is not in the valid URI format @@ -6355,12 +6620,9 @@ func TestAgentConnectAuthorize_idInvalidFormat(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Error(err) - assert.Nil(respRaw) - // Note that BadRequestError is handled outside the endpoint handler so we - // still see a 200 if we check here. - assert.Contains(err.Error(), "ClientCertURI not a valid Connect identifier") + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusBadRequest, resp.Code) + assert.Contains(resp.Body.String(), "ClientCertURI not a valid Connect identifier") } // Client ID is a valid URI but its not a service URI @@ -6383,12 +6645,9 @@ func TestAgentConnectAuthorize_idNotService(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Error(err) - assert.Nil(respRaw) - // Note that BadRequestError is handled outside the endpoint handler so we - // still see a 200 if we check here. - assert.Contains(err.Error(), "ClientCertURI not a valid Service identifier") + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusBadRequest, resp.Code) + assert.Contains(resp.Body.String(), "ClientCertURI not a valid Service identifier") } // Test when there is an intention allowing the connection @@ -6429,12 +6688,13 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Nil(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code) require.Equal("MISS", resp.Header().Get("X-Cache")) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) require.True(obj.Authorized) require.Contains(obj.Reason, "Matched") @@ -6442,11 +6702,12 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { { req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Nil(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) require.True(obj.Authorized) require.Contains(obj.Reason, "Matched") @@ -6478,11 +6739,12 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { { req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Nil(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) require.False(obj.Authorized) require.Contains(obj.Reason, "Matched") @@ -6530,11 +6792,12 @@ func TestAgentConnectAuthorize_deny(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - assert.Nil(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(t, dec.Decode(obj)) assert.False(obj.Authorized) assert.Contains(obj.Reason, "Matched") } @@ -6585,11 +6848,12 @@ func TestAgentConnectAuthorize_allowTrustDomain(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) require.True(obj.Authorized) require.Contains(obj.Reason, "Matched") } @@ -6652,11 +6916,12 @@ func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) assert.True(obj.Authorized) assert.Contains(obj.Reason, "Matched") } @@ -6669,11 +6934,12 @@ func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) assert.False(obj.Authorized) assert.Contains(obj.Reason, "Matched") } @@ -6701,8 +6967,9 @@ func TestAgentConnectAuthorize_serviceWrite(t *testing.T) { req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token="+token, jsonReader(args)) resp := httptest.NewRecorder() - _, err := a.srv.AgentConnectAuthorize(resp, req) - assert.True(acl.IsErrPermissionDenied(err)) + a.srv.h.ServeHTTP(resp, req) + + assert.Equal(http.StatusForbidden, resp.Code) } // Test when no intentions match w/ a default deny policy @@ -6724,11 +6991,12 @@ func TestAgentConnectAuthorize_defaultDeny(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - assert.Nil(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(t, dec.Decode(obj)) assert.False(obj.Authorized) assert.Contains(obj.Reason, "Default behavior") } @@ -6759,12 +7027,12 @@ func TestAgentConnectAuthorize_defaultAllow(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - assert.Nil(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - assert.NotNil(respRaw) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(t, dec.Decode(obj)) assert.True(obj.Authorized) assert.Contains(obj.Reason, "Default behavior") } @@ -6790,6 +7058,7 @@ func TestAgent_Host(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") req, _ := http.NewRequest("GET", "/v1/agent/host?token=master", nil) resp := httptest.NewRecorder() + // TODO: AgentHost should write to response so that we can test using ServeHTTP() respRaw, err := a.srv.AgentHost(resp, req) assert.Nil(err) assert.Equal(http.StatusOK, resp.Code) @@ -6821,10 +7090,10 @@ func TestAgent_HostBadACL(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") req, _ := http.NewRequest("GET", "/v1/agent/host?token=agent", nil) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentHost(resp, req) + // TODO: AgentHost should write to response so that we can test using ServeHTTP() + _, err := a.srv.AgentHost(resp, req) assert.EqualError(err, "ACL not found") assert.Equal(http.StatusOK, resp.Code) - assert.Nil(respRaw) } // Thie tests that a proxy with an ExposeConfig is returned as expected. @@ -6861,12 +7130,19 @@ func TestAgent_Services_ExposeConfig(t *testing.T) { a.State.AddService(srv1, "") req, _ := http.NewRequest("GET", "/v1/agent/services", nil) - obj, err := a.srv.AgentServices(nil, req) - require.NoError(t, err) - val := obj.(map[string]*api.AgentService) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + dec := json.NewDecoder(resp.Body) + val := make(map[string]*api.AgentService) + require.NoError(t, dec.Decode(&val)) require.Len(t, val, 1) actual := val["proxy-id"] require.NotNil(t, actual) require.Equal(t, api.ServiceKindConnectProxy, actual.Kind) + // Proxy.ToAPI() creates an empty Upstream list instead of keeping nil so do the same with actual. + if actual.Proxy.Upstreams == nil { + actual.Proxy.Upstreams = make([]api.Upstream, 0) + } require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy) } diff --git a/agent/agent_oss.go b/agent/agent_oss.go index fff8ef26e..b9b1f91dc 100644 --- a/agent/agent_oss.go +++ b/agent/agent_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent diff --git a/agent/agent_test.go b/agent/agent_test.go index 6b3ea8cfb..cc3151a82 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -214,10 +214,14 @@ func TestAgent_TokenStore(t *testing.T) { t.Parallel() a := NewTestAgent(t, ` - acl_token = "user" - acl_agent_token = "agent" - acl_agent_master_token = "master"`, - ) + acl { + tokens { + default = "user" + agent = "agent" + agent_recovery = "recovery" + } + } + `) defer a.Shutdown() if got, want := a.tokens.UserToken(), "user"; got != want { @@ -226,7 +230,7 @@ func TestAgent_TokenStore(t *testing.T) { if got, want := a.tokens.AgentToken(), "agent"; got != want { t.Fatalf("got %q want %q", got, want) } - if got, want := a.tokens.IsAgentMasterToken("master"), true; got != want { + if got, want := a.tokens.IsAgentRecoveryToken("recovery"), true; got != want { t.Fatalf("got %v want %v", got, want) } } @@ -295,10 +299,6 @@ func TestAgent_HTTPMaxHeaderBytes(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ports, err := freeport.Take(1) - require.NoError(t, err) - t.Cleanup(func() { freeport.Return(ports) }) - caConfig := tlsutil.Config{} tlsConf, err := tlsutil.NewConfigurator(caConfig, hclog.New(nil)) require.NoError(t, err) @@ -312,7 +312,7 @@ func TestAgent_HTTPMaxHeaderBytes(t *testing.T) { }, RuntimeConfig: &config.RuntimeConfig{ HTTPAddrs: []net.Addr{ - &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[0]}, + &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: freeport.GetOne(t)}, }, HTTPMaxHeaderBytes: tt.maxHeaderBytes, }, @@ -1738,14 +1738,12 @@ func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) { a := StartTestAgent(t, TestAgent{HCL: cfg}) defer a.Shutdown() - testCtx, testCancel := context.WithCancel(context.Background()) - defer testCancel() - - testHTTPServer, returnPort := launchHTTPCheckServer(t, testCtx) - defer func() { - testHTTPServer.Close() - returnPort() - }() + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("OK\n")) + }) + testHTTPServer := httptest.NewServer(handler) + t.Cleanup(testHTTPServer.Close) registerServicesAndChecks := func(t *testing.T, a *TestAgent) { // add one persistent service with a simple check @@ -1850,29 +1848,6 @@ node_name = "` + a.Config.NodeName + `" } } -func launchHTTPCheckServer(t *testing.T, ctx context.Context) (srv *httptest.Server, returnPortsFn func()) { - ports := freeport.MustTake(1) - port := ports[0] - - addr := net.JoinHostPort("127.0.0.1", strconv.Itoa(port)) - - var lc net.ListenConfig - listener, err := lc.Listen(ctx, "tcp", addr) - require.NoError(t, err) - - handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte("OK\n")) - }) - - srv = &httptest.Server{ - Listener: listener, - Config: &http.Server{Handler: handler}, - } - srv.Start() - return srv, func() { freeport.Return(ports) } -} - func TestAgent_AddCheck_Alias(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -4708,14 +4683,12 @@ func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) { t.Parallel() - gwPort := freeport.MustTake(1) - defer freeport.Return(gwPort) - gwAddr := ipaddr.FormatAddressPort("127.0.0.1", gwPort[0]) + port := freeport.GetOne(t) + gwAddr := ipaddr.FormatAddressPort("127.0.0.1", port) // Due to some ordering, we'll have to manually configure these ports in // advance. - secondaryRPCPorts := freeport.MustTake(2) - defer freeport.Return(secondaryRPCPorts) + secondaryRPCPorts := freeport.GetN(t, 2) a1 := StartTestAgent(t, TestAgent{Name: "bob", HCL: ` domain = "consul" @@ -4769,7 +4742,7 @@ func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) { ID: "mesh-gateway", Name: "mesh-gateway", Meta: map[string]string{structs.MetaWANFederationKey: "1"}, - Port: gwPort[0], + Port: port, } req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) require.NoError(t, err) @@ -4883,7 +4856,7 @@ func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) { ID: "mesh-gateway", Name: "mesh-gateway", Meta: map[string]string{structs.MetaWANFederationKey: "1"}, - Port: gwPort[0], + Port: port, } req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) require.NoError(t, err) @@ -4898,7 +4871,7 @@ func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) { ID: "mesh-gateway", Name: "mesh-gateway", Meta: map[string]string{structs.MetaWANFederationKey: "1"}, - Port: gwPort[0], + Port: port, } req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) require.NoError(t, err) @@ -5068,7 +5041,7 @@ func TestAutoConfig_Integration(t *testing.T) { srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig}) defer srv.Shutdown() - testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) // sign a JWT token now := time.Now() @@ -5115,7 +5088,10 @@ func TestAutoConfig_Integration(t *testing.T) { // when this is successful we managed to get the gossip key and serf addresses to bind to // and then connect. Additionally we would have to have certificates or else the // verify_incoming config on the server would not let it work. - testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) + + // spot check that we now have an ACL token + require.NotEmpty(t, client.tokens.AgentToken()) // grab the existing cert cert1 := client.Agent.tlsConfigurator.Cert() @@ -5126,7 +5102,7 @@ func TestAutoConfig_Integration(t *testing.T) { ca := connect.TestCA(t, nil) req := &structs.CARequest{ Datacenter: "dc1", - WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken}, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, Config: &structs.CAConfiguration{ Provider: "consul", Config: map[string]interface{}{ @@ -5159,9 +5135,6 @@ func TestAutoConfig_Integration(t *testing.T) { require.NoError(r, err) require.Equal(r, client.Agent.tlsConfigurator.Cert(), &actual) }) - - // spot check that we now have an ACL token - require.NotEmpty(t, client.tokens.AgentToken()) } func TestAgent_AutoEncrypt(t *testing.T) { @@ -5201,7 +5174,7 @@ func TestAgent_AutoEncrypt(t *testing.T) { srv := StartTestAgent(t, TestAgent{Name: "test-server", HCL: hclConfig}) defer srv.Shutdown() - testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) client := StartTestAgent(t, TestAgent{Name: "test-client", HCL: TestACLConfigWithParams(nil) + ` bootstrap = false @@ -5224,7 +5197,7 @@ func TestAgent_AutoEncrypt(t *testing.T) { // when this is successful we managed to get a TLS certificate and are using it for // encrypted RPC connections. - testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) // now we need to validate that our certificate has the correct CN aeCert := client.tlsConfigurator.Cert() @@ -5281,10 +5254,7 @@ func TestAgent_ListenHTTP_MultipleAddresses(t *testing.T) { t.Skip("too slow for testing.Short") } - ports, err := freeport.Take(2) - require.NoError(t, err) - t.Cleanup(func() { freeport.Return(ports) }) - + ports := freeport.GetN(t, 2) caConfig := tlsutil.Config{} tlsConf, err := tlsutil.NewConfigurator(caConfig, hclog.New(nil)) require.NoError(t, err) @@ -5351,3 +5321,10 @@ func uniqueAddrs(srvs []apiServer) map[string]struct{} { } return result } + +func runStep(t *testing.T, name string, fn func(t *testing.T)) { + t.Helper() + if !t.Run(name, fn) { + t.FailNow() + } +} diff --git a/agent/auto-config/auto_config.go b/agent/auto-config/auto_config.go index f3eedb7eb..631ccc75d 100644 --- a/agent/auto-config/auto_config.go +++ b/agent/auto-config/auto_config.go @@ -279,6 +279,7 @@ func (ac *AutoConfig) getInitialConfigurationOnce(ctx context.Context, csr strin Datacenter: ac.config.Datacenter, Node: ac.config.NodeName, Segment: ac.config.SegmentName, + Partition: ac.config.PartitionOrEmpty(), JWT: token, CSR: csr, } diff --git a/agent/auto-config/auto_config_oss.go b/agent/auto-config/auto_config_oss.go index 1ce93c6e7..95b38aa05 100644 --- a/agent/auto-config/auto_config_oss.go +++ b/agent/auto-config/auto_config_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package autoconf diff --git a/agent/auto-config/auto_config_oss_test.go b/agent/auto-config/auto_config_oss_test.go index 2d6092f37..6a318644f 100644 --- a/agent/auto-config/auto_config_oss_test.go +++ b/agent/auto-config/auto_config_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package autoconf diff --git a/agent/auto-config/config_oss.go b/agent/auto-config/config_oss.go index 876d090d5..a8048954a 100644 --- a/agent/auto-config/config_oss.go +++ b/agent/auto-config/config_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package autoconf diff --git a/agent/auto-config/config_translate.go b/agent/auto-config/config_translate.go index 829bd6b1e..0c8939c55 100644 --- a/agent/auto-config/config_translate.go +++ b/agent/auto-config/config_translate.go @@ -26,9 +26,12 @@ func translateConfig(c *pbconfig.Config) config.Config { Datacenter: stringPtrOrNil(c.Datacenter), PrimaryDatacenter: stringPtrOrNil(c.PrimaryDatacenter), NodeName: stringPtrOrNil(c.NodeName), - // only output the SegmentName in the configuration if its non-empty + // only output the SegmentName in the configuration if it's non-empty // this will avoid a warning later when parsing the persisted configuration SegmentName: stringPtrOrNil(c.SegmentName), + // only output the Partition in the configuration if it's non-empty + // this will avoid a warning later when parsing the persisted configuration + Partition: stringPtrOrNil(c.Partition), } if a := c.AutoEncrypt; a != nil { @@ -62,9 +65,9 @@ func translateConfig(c *pbconfig.Config) config.Config { } result.ACL.Tokens = config.Tokens{ - Master: stringPtrOrNil(t.Master), + InitialManagement: stringPtrOrNil(t.InitialManagement), + AgentRecovery: stringPtrOrNil(t.AgentRecovery), Replication: stringPtrOrNil(t.Replication), - AgentMaster: stringPtrOrNil(t.AgentMaster), Default: stringPtrOrNil(t.Default), Agent: stringPtrOrNil(t.Agent), ManagedServiceProvider: tokens, diff --git a/agent/auto-config/config_translate_test.go b/agent/auto-config/config_translate_test.go index 0bdbec0bc..c70ae2087 100644 --- a/agent/auto-config/config_translate_test.go +++ b/agent/auto-config/config_translate_test.go @@ -69,11 +69,11 @@ func TestTranslateConfig(t *testing.T) { EnableTokenPersistence: true, MSPDisableBootstrap: false, Tokens: &pbconfig.ACLTokens{ - Master: "99e7e490-6baf-43fc-9010-78b6aa9a6813", - Replication: "51308d40-465c-4ac6-a636-7c0747edec89", - AgentMaster: "e012e1ea-78a2-41cc-bc8b-231a44196f39", - Default: "8781a3f5-de46-4b45-83e1-c92f4cfd0332", - Agent: "ddb8f1b0-8a99-4032-b601-87926bce244e", + InitialManagement: "99e7e490-6baf-43fc-9010-78b6aa9a6813", + Replication: "51308d40-465c-4ac6-a636-7c0747edec89", + AgentRecovery: "e012e1ea-78a2-41cc-bc8b-231a44196f39", + Default: "8781a3f5-de46-4b45-83e1-c92f4cfd0332", + Agent: "ddb8f1b0-8a99-4032-b601-87926bce244e", ManagedServiceProvider: []*pbconfig.ACLServiceProviderToken{ { AccessorID: "23f37987-7b9e-4e5b-acae-dbc9bc137bae", @@ -129,11 +129,11 @@ func TestTranslateConfig(t *testing.T) { EnableKeyListPolicy: boolPointer(true), EnableTokenPersistence: boolPointer(true), Tokens: config.Tokens{ - Master: stringPointer("99e7e490-6baf-43fc-9010-78b6aa9a6813"), - Replication: stringPointer("51308d40-465c-4ac6-a636-7c0747edec89"), - AgentMaster: stringPointer("e012e1ea-78a2-41cc-bc8b-231a44196f39"), - Default: stringPointer("8781a3f5-de46-4b45-83e1-c92f4cfd0332"), - Agent: stringPointer("ddb8f1b0-8a99-4032-b601-87926bce244e"), + InitialManagement: stringPointer("99e7e490-6baf-43fc-9010-78b6aa9a6813"), + AgentRecovery: stringPointer("e012e1ea-78a2-41cc-bc8b-231a44196f39"), + Replication: stringPointer("51308d40-465c-4ac6-a636-7c0747edec89"), + Default: stringPointer("8781a3f5-de46-4b45-83e1-c92f4cfd0332"), + Agent: stringPointer("ddb8f1b0-8a99-4032-b601-87926bce244e"), ManagedServiceProvider: []config.ServiceProviderToken{ { AccessorID: stringPointer("23f37987-7b9e-4e5b-acae-dbc9bc137bae"), diff --git a/agent/auto-config/mock_oss_test.go b/agent/auto-config/mock_oss_test.go index da227484f..0518753bb 100644 --- a/agent/auto-config/mock_oss_test.go +++ b/agent/auto-config/mock_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package autoconf diff --git a/agent/auto-config/tls.go b/agent/auto-config/tls.go index ab647b515..bf88b41bd 100644 --- a/agent/auto-config/tls.go +++ b/agent/auto-config/tls.go @@ -192,11 +192,12 @@ func (ac *AutoConfig) caRootsRequest() structs.DCSpecificRequest { func (ac *AutoConfig) leafCertRequest() cachetype.ConnectCALeafRequest { return cachetype.ConnectCALeafRequest{ - Datacenter: ac.config.Datacenter, - Agent: ac.config.NodeName, - DNSSAN: ac.getDNSSANs(), - IPSAN: ac.getIPSANs(), - Token: ac.acConfig.Tokens.AgentToken(), + Datacenter: ac.config.Datacenter, + Agent: ac.config.NodeName, + DNSSAN: ac.getDNSSANs(), + IPSAN: ac.getIPSANs(), + Token: ac.acConfig.Tokens.AgentToken(), + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(ac.config.PartitionOrEmpty()), } } diff --git a/agent/cache-types/connect_ca_leaf.go b/agent/cache-types/connect_ca_leaf.go index 9a7fcd216..1950ef756 100644 --- a/agent/cache-types/connect_ca_leaf.go +++ b/agent/cache-types/connect_ca_leaf.go @@ -380,6 +380,25 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache return c.generateNewLeaf(reqReal, lastResultWithNewState()) } + // If we called Fetch() with MustRevalidate then this call came from a non-blocking query. + // Any prior CA rotations should've already expired the cert. + // All we need to do is check whether the current CA is the one that signed the leaf. If not, generate a new leaf. + // This is not a perfect solution (as a CA rotation update can be missed) but it should take care of instances like + // see https://github.com/hashicorp/consul/issues/10871, https://github.com/hashicorp/consul/issues/9862 + // This seems to me like a hack, so maybe we can revisit the caching/ fetching logic in this case + if req.CacheInfo().MustRevalidate { + roots, err := c.rootsFromCache() + if err != nil { + return lastResultWithNewState(), err + } + if activeRootHasKey(roots, state.authorityKeyID) { + return lastResultWithNewState(), nil + } + + // if we reach here then the current leaf was not signed by the same CAs, just regen + return c.generateNewLeaf(reqReal, lastResultWithNewState()) + } + // We are about to block and wait for a change or timeout. // Make a chan we can be notified of changes to CA roots on. It must be @@ -401,7 +420,7 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache c.fetchStart(rootUpdateCh) defer c.fetchDone(rootUpdateCh) - // Setup the timeout chan outside the loop so we don't keep bumping the timout + // Setup the timeout chan outside the loop so we don't keep bumping the timeout // later if we loop around. timeoutCh := time.After(opts.Timeout) @@ -492,7 +511,7 @@ func (c *ConnectCALeaf) rootsFromCache() (*structs.IndexedCARoots, error) { // generateNewLeaf does the actual work of creating a new private key, // generating a CSR and getting it signed by the servers. result argument -// represents the last result currently in cache if any along with it's state. +// represents the last result currently in cache if any along with its state. func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest, result cache.FetchResult) (cache.FetchResult, error) { @@ -643,14 +662,15 @@ func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest, // since this is only used for cache-related requests and not forwarded // directly to any Consul servers. type ConnectCALeafRequest struct { - Token string - Datacenter string - Service string // Service name, not ID - Agent string // Agent name, not ID - DNSSAN []string - IPSAN []net.IP - MinQueryIndex uint64 - MaxQueryTime time.Duration + Token string + Datacenter string + Service string // Service name, not ID + Agent string // Agent name, not ID + DNSSAN []string + IPSAN []net.IP + MinQueryIndex uint64 + MaxQueryTime time.Duration + MustRevalidate bool structs.EnterpriseMeta } @@ -684,10 +704,11 @@ func (req *ConnectCALeafRequest) TargetPartition() string { func (r *ConnectCALeafRequest) CacheInfo() cache.RequestInfo { return cache.RequestInfo{ - Token: r.Token, - Key: r.Key(), - Datacenter: r.Datacenter, - MinIndex: r.MinQueryIndex, - Timeout: r.MaxQueryTime, + Token: r.Token, + Key: r.Key(), + Datacenter: r.Datacenter, + MinIndex: r.MinQueryIndex, + Timeout: r.MaxQueryTime, + MustRevalidate: r.MustRevalidate, } } diff --git a/agent/cache-types/connect_ca_leaf_oss.go b/agent/cache-types/connect_ca_leaf_oss.go index 2045e85ee..07de1e793 100644 --- a/agent/cache-types/connect_ca_leaf_oss.go +++ b/agent/cache-types/connect_ca_leaf_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package cachetype diff --git a/agent/cache-types/norace_test.go b/agent/cache-types/norace_test.go index cac95de9d..93645410d 100644 --- a/agent/cache-types/norace_test.go +++ b/agent/cache-types/norace_test.go @@ -1,3 +1,4 @@ +//go:build !race // +build !race package cachetype diff --git a/agent/cache-types/race_test.go b/agent/cache-types/race_test.go index 29774bf67..7848991f2 100644 --- a/agent/cache-types/race_test.go +++ b/agent/cache-types/race_test.go @@ -1,3 +1,4 @@ +//go:build race // +build race package cachetype diff --git a/agent/catalog_endpoint_oss.go b/agent/catalog_endpoint_oss.go index 361672192..da27ab476 100644 --- a/agent/catalog_endpoint_oss.go +++ b/agent/catalog_endpoint_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent diff --git a/agent/checks/docker_unix.go b/agent/checks/docker_unix.go index 0cde4ad54..528cb7d85 100644 --- a/agent/checks/docker_unix.go +++ b/agent/checks/docker_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package checks diff --git a/agent/config/builder.go b/agent/config/builder.go index 46605d50b..a022f8846 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -552,6 +552,9 @@ func (b *builder) build() (rt RuntimeConfig, err error) { // determine client addresses clientAddrs := b.expandIPs("client_addr", c.ClientAddr) + if len(clientAddrs) == 0 { + b.warn("client_addr is empty, client services (DNS, HTTP, HTTPS, GRPC) will not be listening for connections") + } dnsAddrs := b.makeAddrs(b.expandAddrs("addresses.dns", c.Addresses.DNS), clientAddrs, dnsPort) httpAddrs := b.makeAddrs(b.expandAddrs("addresses.http", c.Addresses.HTTP), clientAddrs, httpPort) httpsAddrs := b.makeAddrs(b.expandAddrs("addresses.https", c.Addresses.HTTPS), clientAddrs, httpsPort) @@ -857,18 +860,18 @@ func (b *builder) build() (rt RuntimeConfig, err error) { ACLDefaultPolicy: stringVal(c.ACL.DefaultPolicy), }, - ACLEnableKeyListPolicy: boolVal(c.ACL.EnableKeyListPolicy), - ACLMasterToken: stringVal(c.ACL.Tokens.Master), + ACLEnableKeyListPolicy: boolVal(c.ACL.EnableKeyListPolicy), + ACLInitialManagementToken: stringVal(c.ACL.Tokens.InitialManagement), ACLTokenReplication: boolVal(c.ACL.TokenReplication), ACLTokens: token.Config{ - DataDir: dataDir, - EnablePersistence: boolValWithDefault(c.ACL.EnableTokenPersistence, false), - ACLDefaultToken: stringVal(c.ACL.Tokens.Default), - ACLAgentToken: stringVal(c.ACL.Tokens.Agent), - ACLAgentMasterToken: stringVal(c.ACL.Tokens.AgentMaster), - ACLReplicationToken: stringVal(c.ACL.Tokens.Replication), + DataDir: dataDir, + EnablePersistence: boolValWithDefault(c.ACL.EnableTokenPersistence, false), + ACLDefaultToken: stringVal(c.ACL.Tokens.Default), + ACLAgentToken: stringVal(c.ACL.Tokens.Agent), + ACLAgentRecoveryToken: stringVal(c.ACL.Tokens.AgentRecovery), + ACLReplicationToken: stringVal(c.ACL.Tokens.Replication), }, // Autopilot @@ -1091,6 +1094,10 @@ func (b *builder) build() (rt RuntimeConfig, err error) { rt.UseStreamingBackend = boolValWithDefault(c.UseStreamingBackend, true) + if c.RaftBoltDBConfig != nil { + rt.RaftBoltDBConfig = *c.RaftBoltDBConfig + } + if rt.Cache.EntryFetchMaxBurst <= 0 { return RuntimeConfig{}, fmt.Errorf("cache.entry_fetch_max_burst must be strictly positive, was: %v", rt.Cache.EntryFetchMaxBurst) } @@ -2371,8 +2378,9 @@ func validateAutoConfigAuthorizer(rt RuntimeConfig) error { // create a blank identity for use to validate the claim assertions. blankID := validator.NewIdentity() varMap := map[string]string{ - "node": "fake", - "segment": "fake", + "node": "fake", + "segment": "fake", + "partition": "fake", } // validate all the claim assertions diff --git a/agent/config/builder_oss.go b/agent/config/builder_oss.go index f78bea300..ce6e8d44c 100644 --- a/agent/config/builder_oss.go +++ b/agent/config/builder_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package config diff --git a/agent/config/builder_oss_test.go b/agent/config/builder_oss_test.go index 3d2b7ba30..2fd5f50ad 100644 --- a/agent/config/builder_oss_test.go +++ b/agent/config/builder_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package config @@ -110,7 +111,11 @@ func TestValidateEnterpriseConfigKeys(t *testing.T) { config: Config{ ReadReplica: &boolVal, SegmentName: &stringVal, - ACL: ACL{Tokens: Tokens{AgentMaster: &stringVal}}, + ACL: ACL{ + Tokens: Tokens{ + DeprecatedTokens: DeprecatedTokens{AgentMaster: &stringVal}, + }, + }, }, badKeys: []string{"read_replica (or the deprecated non_voting_server)", "segment"}, }, diff --git a/agent/config/builder_test.go b/agent/config/builder_test.go index 5e3987701..5901431a0 100644 --- a/agent/config/builder_test.go +++ b/agent/config/builder_test.go @@ -247,3 +247,47 @@ func TestLoad_HTTPMaxConnsPerClientExceedsRLimit(t *testing.T) { require.Error(t, err) assert.Contains(t, err.Error(), "but limits.http_max_conns_per_client: 16777217 needs at least 16777237") } + +func TestLoad_EmptyClientAddr(t *testing.T) { + + type testCase struct { + name string + clientAddr *string + expectedWarningMessage *string + } + + fn := func(t *testing.T, tc testCase) { + opts := LoadOpts{ + FlagValues: Config{ + ClientAddr: tc.clientAddr, + DataDir: pString("dir"), + }, + } + patchLoadOptsShims(&opts) + result, err := Load(opts) + require.NoError(t, err) + if tc.expectedWarningMessage != nil { + require.Len(t, result.Warnings, 1) + require.Contains(t, result.Warnings[0], *tc.expectedWarningMessage) + } + } + + var testCases = []testCase{ + { + name: "empty string", + clientAddr: pString(""), + expectedWarningMessage: pString("client_addr is empty, client services (DNS, HTTP, HTTPS, GRPC) will not be listening for connections"), + }, + { + name: "nil pointer", + clientAddr: nil, // defaults to 127.0.0.1 + expectedWarningMessage: nil, // expecting no warnings + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + fn(t, tc) + }) + } +} diff --git a/agent/config/config.go b/agent/config/config.go index 3b5b417dd..7d8ecadbb 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -4,6 +4,8 @@ import ( "encoding/json" "fmt" + "github.com/hashicorp/consul/agent/consul" + "github.com/hashicorp/hcl" "github.com/mitchellh/mapstructure" @@ -256,6 +258,8 @@ type Config struct { RPC RPC `mapstructure:"rpc"` + RaftBoltDBConfig *consul.RaftBoltDBConfig `mapstructure:"raft_boltdb"` + // UseStreamingBackend instead of blocking queries for service health and // any other endpoints which support streaming. UseStreamingBackend *bool `mapstructure:"use_streaming_backend"` @@ -742,14 +746,23 @@ type ACL struct { } type Tokens struct { - Master *string `mapstructure:"master"` - Replication *string `mapstructure:"replication"` - AgentMaster *string `mapstructure:"agent_master"` - Default *string `mapstructure:"default"` - Agent *string `mapstructure:"agent"` + InitialManagement *string `mapstructure:"initial_management"` + Replication *string `mapstructure:"replication"` + AgentRecovery *string `mapstructure:"agent_recovery"` + Default *string `mapstructure:"default"` + Agent *string `mapstructure:"agent"` // Enterprise Only ManagedServiceProvider []ServiceProviderToken `mapstructure:"managed_service_provider"` + + DeprecatedTokens `mapstructure:",squash"` +} + +type DeprecatedTokens struct { + // DEPRECATED (ACL) - renamed to "initial_management" + Master *string `mapstructure:"master"` + // DEPRECATED (ACL) - renamed to "agent_recovery" + AgentMaster *string `mapstructure:"agent_master"` } // ServiceProviderToken groups an accessor and secret for a service provider token. Enterprise Only diff --git a/agent/config/config_oss.go b/agent/config/config_oss.go index 95b1599dd..7e061b8e5 100644 --- a/agent/config/config_oss.go +++ b/agent/config/config_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package config diff --git a/agent/config/default_oss.go b/agent/config/default_oss.go index 57f52d927..d98e27bcb 100644 --- a/agent/config/default_oss.go +++ b/agent/config/default_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package config diff --git a/agent/config/deprecated.go b/agent/config/deprecated.go index 11ea57d15..c026b21e0 100644 --- a/agent/config/deprecated.go +++ b/agent/config/deprecated.go @@ -34,11 +34,21 @@ func applyDeprecatedConfig(d *decodeTarget) (Config, []string) { dep := d.DeprecatedConfig var warns []string - if dep.ACLAgentMasterToken != nil { - if d.Config.ACL.Tokens.AgentMaster == nil { - d.Config.ACL.Tokens.AgentMaster = dep.ACLAgentMasterToken + // TODO(boxofrad): The DeprecatedConfig struct only holds fields that were once + // on the top-level Config struct (not nested fields e.g. ACL.Tokens) maybe we + // should rethink this a bit? + if d.Config.ACL.Tokens.AgentMaster != nil { + if d.Config.ACL.Tokens.AgentRecovery == nil { + d.Config.ACL.Tokens.AgentRecovery = d.Config.ACL.Tokens.AgentMaster } - warns = append(warns, deprecationWarning("acl_agent_master_token", "acl.tokens.agent_master")) + warns = append(warns, deprecationWarning("acl.tokens.agent_master", "acl.tokens.agent_recovery")) + } + + if dep.ACLAgentMasterToken != nil { + if d.Config.ACL.Tokens.AgentRecovery == nil { + d.Config.ACL.Tokens.AgentRecovery = dep.ACLAgentMasterToken + } + warns = append(warns, deprecationWarning("acl_agent_master_token", "acl.tokens.agent_recovery")) } if dep.ACLAgentToken != nil { @@ -55,11 +65,18 @@ func applyDeprecatedConfig(d *decodeTarget) (Config, []string) { warns = append(warns, deprecationWarning("acl_token", "acl.tokens.default")) } - if dep.ACLMasterToken != nil { - if d.Config.ACL.Tokens.Master == nil { - d.Config.ACL.Tokens.Master = dep.ACLMasterToken + if d.Config.ACL.Tokens.Master != nil { + if d.Config.ACL.Tokens.InitialManagement == nil { + d.Config.ACL.Tokens.InitialManagement = d.Config.ACL.Tokens.Master } - warns = append(warns, deprecationWarning("acl_master_token", "acl.tokens.master")) + warns = append(warns, deprecationWarning("acl.tokens.master", "acl.tokens.initial_management")) + } + + if dep.ACLMasterToken != nil { + if d.Config.ACL.Tokens.InitialManagement == nil { + d.Config.ACL.Tokens.InitialManagement = dep.ACLMasterToken + } + warns = append(warns, deprecationWarning("acl_master_token", "acl.tokens.initial_management")) } if dep.ACLReplicationToken != nil { diff --git a/agent/config/deprecated_test.go b/agent/config/deprecated_test.go index 98f7fa07a..6cbec5448 100644 --- a/agent/config/deprecated_test.go +++ b/agent/config/deprecated_test.go @@ -15,12 +15,10 @@ data_dir = "/foo" acl_datacenter = "dcone" -acl_agent_master_token = "token1" -acl_agent_token = "token2" -acl_token = "token3" +acl_agent_token = "token1" +acl_token = "token2" -acl_master_token = "token4" -acl_replication_token = "token5" +acl_replication_token = "token3" acl_default_policy = "deny" acl_down_policy = "async-cache" @@ -35,13 +33,11 @@ acl_enable_key_list_policy = true require.NoError(t, err) expectWarns := []string{ - deprecationWarning("acl_agent_master_token", "acl.tokens.agent_master"), deprecationWarning("acl_agent_token", "acl.tokens.agent"), deprecationWarning("acl_datacenter", "primary_datacenter"), deprecationWarning("acl_default_policy", "acl.default_policy"), deprecationWarning("acl_down_policy", "acl.down_policy"), deprecationWarning("acl_enable_key_list_policy", "acl.enable_key_list_policy"), - deprecationWarning("acl_master_token", "acl.tokens.master"), deprecationWarning("acl_replication_token", "acl.tokens.replication"), deprecationWarning("acl_token", "acl.tokens.default"), deprecationWarning("acl_ttl", "acl.token_ttl"), @@ -55,11 +51,9 @@ acl_enable_key_list_policy = true rt := result.RuntimeConfig require.Equal(t, true, rt.ACLsEnabled) require.Equal(t, "dcone", rt.PrimaryDatacenter) - require.Equal(t, "token1", rt.ACLTokens.ACLAgentMasterToken) - require.Equal(t, "token2", rt.ACLTokens.ACLAgentToken) - require.Equal(t, "token3", rt.ACLTokens.ACLDefaultToken) - require.Equal(t, "token4", rt.ACLMasterToken) - require.Equal(t, "token5", rt.ACLTokens.ACLReplicationToken) + require.Equal(t, "token1", rt.ACLTokens.ACLAgentToken) + require.Equal(t, "token2", rt.ACLTokens.ACLDefaultToken) + require.Equal(t, "token3", rt.ACLTokens.ACLReplicationToken) require.Equal(t, "deny", rt.ACLResolverSettings.ACLDefaultPolicy) require.Equal(t, "async-cache", rt.ACLResolverSettings.ACLDownPolicy) require.Equal(t, 3*time.Hour, rt.ACLResolverSettings.ACLTokenTTL) @@ -91,3 +85,91 @@ enable_acl_replication = true rt := result.RuntimeConfig require.Equal(t, true, rt.ACLTokenReplication) } + +func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) { + t.Run("top-level fields", func(t *testing.T) { + require := require.New(t) + + opts := LoadOpts{ + HCL: []string{` + data_dir = "/foo" + + acl_master_token = "token1" + acl_agent_master_token = "token2" + `}, + } + patchLoadOptsShims(&opts) + + result, err := Load(opts) + require.NoError(err) + + expectWarns := []string{ + deprecationWarning("acl_master_token", "acl.tokens.initial_management"), + deprecationWarning("acl_agent_master_token", "acl.tokens.agent_recovery"), + } + require.ElementsMatch(expectWarns, result.Warnings) + + rt := result.RuntimeConfig + require.Equal("token1", rt.ACLInitialManagementToken) + require.Equal("token2", rt.ACLTokens.ACLAgentRecoveryToken) + }) + + t.Run("embedded in tokens struct", func(t *testing.T) { + require := require.New(t) + + opts := LoadOpts{ + HCL: []string{` + data_dir = "/foo" + + acl { + tokens { + master = "token1" + agent_master = "token2" + } + } + `}, + } + patchLoadOptsShims(&opts) + + result, err := Load(opts) + require.NoError(err) + + expectWarns := []string{ + deprecationWarning("acl.tokens.master", "acl.tokens.initial_management"), + deprecationWarning("acl.tokens.agent_master", "acl.tokens.agent_recovery"), + } + require.ElementsMatch(expectWarns, result.Warnings) + + rt := result.RuntimeConfig + require.Equal("token1", rt.ACLInitialManagementToken) + require.Equal("token2", rt.ACLTokens.ACLAgentRecoveryToken) + }) + + t.Run("both", func(t *testing.T) { + require := require.New(t) + + opts := LoadOpts{ + HCL: []string{` + data_dir = "/foo" + + acl_master_token = "token1" + acl_agent_master_token = "token2" + + acl { + tokens { + master = "token3" + agent_master = "token4" + } + } + `}, + } + patchLoadOptsShims(&opts) + + result, err := Load(opts) + require.NoError(err) + + rt := result.RuntimeConfig + require.Equal("token3", rt.ACLInitialManagementToken) + require.Equal("token4", rt.ACLTokens.ACLAgentRecoveryToken) + }) +} diff --git a/agent/config/limits.go b/agent/config/limits.go index bd840e58c..f1ff5e7f5 100644 --- a/agent/config/limits.go +++ b/agent/config/limits.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package config diff --git a/agent/config/limits_windows.go b/agent/config/limits_windows.go index 6cd8817b2..1e6e1822f 100644 --- a/agent/config/limits_windows.go +++ b/agent/config/limits_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package config diff --git a/agent/config/runtime.go b/agent/config/runtime.go index aae4f67b5..1d13e19d8 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -73,12 +73,12 @@ type RuntimeConfig struct { // hcl: acl.enable_key_list_policy = (true|false) ACLEnableKeyListPolicy bool - // ACLMasterToken is used to bootstrap the ACL system. It should be specified + // ACLInitialManagementToken is used to bootstrap the ACL system. It should be specified // on the servers in the PrimaryDatacenter. When the leader comes online, it ensures - // that the Master token is available. This provides the initial token. + // that the initial management token is available. This provides the initial token. // - // hcl: acl.tokens.master = string - ACLMasterToken string + // hcl: acl.tokens.initial_management = string + ACLInitialManagementToken string // ACLtokenReplication is used to indicate that both tokens and policies // should be replicated instead of just policies @@ -943,6 +943,8 @@ type RuntimeConfig struct { // hcl: raft_trailing_logs = int RaftTrailingLogs int + RaftBoltDBConfig consul.RaftBoltDBConfig + // ReconnectTimeoutLAN specifies the amount of time to wait to reconnect with // another agent before deciding it's permanently gone. This can be used to // control the time it takes to reap failed nodes from the cluster. diff --git a/agent/config/runtime_oss.go b/agent/config/runtime_oss.go index fcc9135dc..0cec03ccd 100644 --- a/agent/config/runtime_oss.go +++ b/agent/config/runtime_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package config diff --git a/agent/config/runtime_oss_test.go b/agent/config/runtime_oss_test.go index 4e9a87ffe..2179ac2df 100644 --- a/agent/config/runtime_oss_test.go +++ b/agent/config/runtime_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package config diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 3bd4257bd..63b216268 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -23,6 +23,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/consul" @@ -4085,6 +4086,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { Service: "carrot", ServiceSubset: "kale", Namespace: "leek", + Partition: acl.DefaultPartitionName, PrefixRewrite: "/alternate", RequestTimeout: 99 * time.Second, NumRetries: 12345, @@ -5339,12 +5341,12 @@ func TestLoad_FullConfig(t *testing.T) { // user configurable values ACLTokens: token.Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "418fdff1", - ACLAgentToken: "bed2377c", - ACLAgentMasterToken: "64fd0e08", - ACLReplicationToken: "5795983a", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "418fdff1", + ACLAgentToken: "bed2377c", + ACLAgentRecoveryToken: "1dba6aba", + ACLReplicationToken: "5795983a", }, ACLsEnabled: true, @@ -5361,7 +5363,7 @@ func TestLoad_FullConfig(t *testing.T) { ACLRoleTTL: 9876 * time.Second, }, ACLEnableKeyListPolicy: true, - ACLMasterToken: "8a19ac27", + ACLInitialManagementToken: "3820e09a", ACLTokenReplication: true, AdvertiseAddrLAN: ipAddr("17.99.29.16"), AdvertiseAddrWAN: ipAddr("78.63.37.19"), @@ -6015,15 +6017,18 @@ func TestLoad_FullConfig(t *testing.T) { "args": []interface{}{"dltjDJ2a", "flEa7C2d"}, }, }, + RaftBoltDBConfig: consul.RaftBoltDBConfig{NoFreelistSync: true}, } entFullRuntimeConfig(expected) expectedWarns := []string{ deprecationWarning("acl_datacenter", "primary_datacenter"), - deprecationWarning("acl_agent_master_token", "acl.tokens.agent_master"), + deprecationWarning("acl_agent_master_token", "acl.tokens.agent_recovery"), + deprecationWarning("acl.tokens.agent_master", "acl.tokens.agent_recovery"), deprecationWarning("acl_agent_token", "acl.tokens.agent"), deprecationWarning("acl_token", "acl.tokens.default"), - deprecationWarning("acl_master_token", "acl.tokens.master"), + deprecationWarning("acl_master_token", "acl.tokens.initial_management"), + deprecationWarning("acl.tokens.master", "acl.tokens.initial_management"), deprecationWarning("acl_replication_token", "acl.tokens.replication"), deprecationWarning("enable_acl_replication", "acl.enable_token_replication"), deprecationWarning("acl_default_policy", "acl.default_policy"), diff --git a/agent/config/segment_oss.go b/agent/config/segment_oss.go index ed9c6eb67..d7a80c71e 100644 --- a/agent/config/segment_oss.go +++ b/agent/config/segment_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package config diff --git a/agent/config/segment_oss_test.go b/agent/config/segment_oss_test.go index de224be85..52b4a0964 100644 --- a/agent/config/segment_oss_test.go +++ b/agent/config/segment_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package config diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 2d1093d1e..951511bcf 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -1,6 +1,6 @@ { "ACLEnableKeyListPolicy": false, - "ACLMasterToken": "hidden", + "ACLInitialManagementToken": "hidden", "ACLResolverSettings": { "ACLDefaultPolicy": "", "ACLDownPolicy": "", @@ -14,7 +14,7 @@ }, "ACLTokenReplication": false, "ACLTokens": { - "ACLAgentMasterToken": "hidden", + "ACLAgentRecoveryToken": "hidden", "ACLAgentToken": "hidden", "ACLDefaultToken": "hidden", "ACLReplicationToken": "hidden", @@ -252,6 +252,9 @@ "RPCMaxConnsPerClient": 0, "RPCProtocol": 0, "RPCRateLimit": 0, + "RaftBoltDBConfig": { + "NoFreelistSync": false + }, "RaftProtocol": 3, "RaftSnapshotInterval": "0s", "RaftSnapshotThreshold": 0, @@ -421,4 +424,4 @@ "Version": "", "VersionPrerelease": "", "Watches": [] -} +} \ No newline at end of file diff --git a/agent/config/testdata/full-config.hcl b/agent/config/testdata/full-config.hcl index f21e26f0f..869f67252 100644 --- a/agent/config/testdata/full-config.hcl +++ b/agent/config/testdata/full-config.hcl @@ -21,7 +21,9 @@ acl = { msp_disable_bootstrap = true tokens = { master = "8a19ac27", + initial_management = "3820e09a", agent_master = "64fd0e08", + agent_recovery = "1dba6aba", replication = "5795983a", agent = "bed2377c", default = "418fdff1", @@ -326,6 +328,9 @@ raft_protocol = 3 raft_snapshot_threshold = 16384 raft_snapshot_interval = "30s" raft_trailing_logs = 83749 +raft_boltdb { + NoFreelistSync = true +} read_replica = true reconnect_timeout = "23739s" reconnect_timeout_wan = "26694s" diff --git a/agent/config/testdata/full-config.json b/agent/config/testdata/full-config.json index 200731915..017651d88 100644 --- a/agent/config/testdata/full-config.json +++ b/agent/config/testdata/full-config.json @@ -22,7 +22,9 @@ "msp_disable_bootstrap": true, "tokens" : { "master" : "8a19ac27", + "initial_management" : "3820e09a", "agent_master" : "64fd0e08", + "agent_recovery" : "1dba6aba", "replication" : "5795983a", "agent" : "bed2377c", "default" : "418fdff1", @@ -324,6 +326,9 @@ "raft_snapshot_threshold": 16384, "raft_snapshot_interval": "30s", "raft_trailing_logs": 83749, + "raft_boltdb": { + "NoFreelistSync": true + }, "read_replica": true, "reconnect_timeout": "23739s", "reconnect_timeout_wan": "26694s", diff --git a/agent/connect/ca/provider_aws.go b/agent/connect/ca/provider_aws.go index 531f39f03..b813ef507 100644 --- a/agent/connect/ca/provider_aws.go +++ b/agent/connect/ca/provider_aws.go @@ -603,7 +603,7 @@ func (a *AWSProvider) Sign(csr *x509.CertificateRequest) (string, error) { // SignIntermediate implements Provider func (a *AWSProvider) SignIntermediate(csr *x509.CertificateRequest) (string, error) { - err := validateSignIntermediate(csr, &connect.SpiffeIDSigning{ClusterID: a.clusterID, Domain: "consul"}) + err := validateSignIntermediate(csr, connect.SpiffeIDSigningForCluster(a.clusterID)) if err != nil { return "", err } diff --git a/agent/connect/ca/provider_consul.go b/agent/connect/ca/provider_consul.go index 21f860f43..dea91e5d7 100644 --- a/agent/connect/ca/provider_consul.go +++ b/agent/connect/ca/provider_consul.go @@ -17,7 +17,6 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/consul/agent/connect" - "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" ) @@ -56,7 +55,7 @@ func NewConsulProvider(delegate ConsulProviderStateDelegate, logger hclog.Logger } type ConsulProviderStateDelegate interface { - State() *state.Store + ProviderState(id string) (*structs.CAConsulProviderState, error) ApplyCARequest(*structs.CARequest) (interface{}, error) } @@ -76,13 +75,13 @@ func (c *ConsulProvider) Configure(cfg ProviderConfig) error { c.id = hexStringHash(fmt.Sprintf("%s,%s,%s,%d,%v", config.PrivateKey, config.RootCert, config.PrivateKeyType, config.PrivateKeyBits, cfg.IsPrimary)) c.clusterID = cfg.ClusterID c.isPrimary = cfg.IsPrimary - c.spiffeID = connect.SpiffeIDSigningForCluster(&structs.CAConfiguration{ClusterID: c.clusterID}) + c.spiffeID = connect.SpiffeIDSigningForCluster(c.clusterID) // Passthrough test state for state handling tests. See testState doc. c.parseTestState(cfg.RawConfig, cfg.State) // Exit early if the state store has an entry for this provider's config. - _, providerState, err := c.Delegate.State().CAProviderState(c.id) + providerState, err := c.Delegate.ProviderState(c.id) if err != nil { return err } @@ -98,7 +97,7 @@ func (c *ConsulProvider) Configure(cfg ProviderConfig) error { // Check if there are any entries with old ID schemes. for _, oldID := range oldIDs { - _, providerState, err = c.Delegate.State().CAProviderState(oldID) + providerState, err = c.Delegate.ProviderState(oldID) if err != nil { return err } @@ -589,8 +588,7 @@ func (c *ConsulProvider) SupportsCrossSigning() (bool, error) { // getState returns the current provider state from the state delegate, and returns // ErrNotInitialized if no entry is found. func (c *ConsulProvider) getState() (*structs.CAConsulProviderState, error) { - stateStore := c.Delegate.State() - _, providerState, err := stateStore.CAProviderState(c.id) + providerState, err := c.Delegate.ProviderState(c.id) if err != nil { return nil, err } @@ -617,19 +615,13 @@ func (c *ConsulProvider) incrementAndGetNextSerialNumber() (uint64, error) { // generateCA makes a new root CA using the current private key func (c *ConsulProvider) generateCA(privateKey string, sn uint64, rootCertTTL time.Duration) (string, error) { - stateStore := c.Delegate.State() - _, config, err := stateStore.CAConfig(nil) - if err != nil { - return "", err - } - privKey, err := connect.ParseSigner(privateKey) if err != nil { return "", fmt.Errorf("error parsing private key %q: %s", privateKey, err) } // The URI (SPIFFE compatible) for the cert - id := connect.SpiffeIDSigningForCluster(config) + id := connect.SpiffeIDSigningForCluster(c.clusterID) keyId, err := connect.KeyId(privKey.Public()) if err != nil { return "", err diff --git a/agent/connect/ca/provider_consul_test.go b/agent/connect/ca/provider_consul_test.go index f4e7c7923..6b4f31837 100644 --- a/agent/connect/ca/provider_consul_test.go +++ b/agent/connect/ca/provider_consul_test.go @@ -17,8 +17,9 @@ type consulCAMockDelegate struct { state *state.Store } -func (c *consulCAMockDelegate) State() *state.Store { - return c.state +func (c *consulCAMockDelegate) ProviderState(id string) (*structs.CAConsulProviderState, error) { + _, s, err := c.state.CAProviderState(id) + return s, err } func (c *consulCAMockDelegate) ApplyCARequest(req *structs.CARequest) (interface{}, error) { diff --git a/agent/connect/ca/provider_vault.go b/agent/connect/ca/provider_vault.go index 0f174f1a5..ef1920d7a 100644 --- a/agent/connect/ca/provider_vault.go +++ b/agent/connect/ca/provider_vault.go @@ -8,9 +8,11 @@ import ( "fmt" "io/ioutil" "net/http" + "os" "strings" "time" + "github.com/hashicorp/consul/lib/decode" "github.com/hashicorp/go-hclog" vaultapi "github.com/hashicorp/vault/api" "github.com/mitchellh/mapstructure" @@ -19,7 +21,29 @@ import ( "github.com/hashicorp/consul/agent/structs" ) -const VaultCALeafCertRole = "leaf-cert" +const ( + VaultCALeafCertRole = "leaf-cert" + + VaultAuthMethodTypeAliCloud = "alicloud" + VaultAuthMethodTypeAppRole = "approle" + VaultAuthMethodTypeAWS = "aws" + VaultAuthMethodTypeAzure = "azure" + VaultAuthMethodTypeCloudFoundry = "cf" + VaultAuthMethodTypeGitHub = "github" + VaultAuthMethodTypeGCP = "gcp" + VaultAuthMethodTypeJWT = "jwt" + VaultAuthMethodTypeKerberos = "kerberos" + VaultAuthMethodTypeKubernetes = "kubernetes" + VaultAuthMethodTypeLDAP = "ldap" + VaultAuthMethodTypeOCI = "oci" + VaultAuthMethodTypeOkta = "okta" + VaultAuthMethodTypeRadius = "radius" + VaultAuthMethodTypeTLS = "cert" + VaultAuthMethodTypeToken = "token" + VaultAuthMethodTypeUserpass = "userpass" + + defaultK8SServiceAccountTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token" +) var ErrBackendNotMounted = fmt.Errorf("backend not mounted") var ErrBackendNotInitialized = fmt.Errorf("backend not initialized") @@ -74,6 +98,13 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error { return err } + if config.AuthMethod != nil { + loginResp, err := vaultLogin(client, config.AuthMethod) + if err != nil { + return err + } + config.Token = loginResp.Auth.ClientToken + } client.SetToken(config.Token) // We don't want to set the namespace if it's empty to prevent potential @@ -87,14 +118,14 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error { v.client = client v.isPrimary = cfg.IsPrimary v.clusterID = cfg.ClusterID - v.spiffeID = connect.SpiffeIDSigningForCluster(&structs.CAConfiguration{ClusterID: v.clusterID}) + v.spiffeID = connect.SpiffeIDSigningForCluster(v.clusterID) // Look up the token to see if we can auto-renew its lease. secret, err := client.Auth().Token().LookupSelf() if err != nil { return err } else if secret == nil { - return fmt.Errorf("Could not look up Vault provider token: not found") + return fmt.Errorf("could not look up Vault provider token: not found") } var token struct { Renewable bool @@ -105,7 +136,7 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error { } // Set up a renewer to renew the token automatically, if supported. - if token.Renewable { + if token.Renewable || config.AuthMethod != nil { lifetimeWatcher, err := client.NewLifetimeWatcher(&vaultapi.LifetimeWatcherInput{ Secret: &vaultapi.Secret{ Auth: &vaultapi.SecretAuth{ @@ -118,10 +149,10 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error { RenewBehavior: vaultapi.RenewBehaviorIgnoreErrors, }) if err != nil { - return fmt.Errorf("Error beginning Vault provider token renewal: %v", err) + return fmt.Errorf("error beginning Vault provider token renewal: %v", err) } - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) v.shutdown = cancel go v.renewToken(ctx, lifetimeWatcher) } @@ -129,7 +160,9 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error { return nil } -// renewToken uses a vaultapi.Renewer to repeatedly renew our token's lease. +// renewToken uses a vaultapi.LifetimeWatcher to repeatedly renew our token's lease. +// If the token can no longer be renewed and auth method is set, +// it will re-authenticate to Vault using the auth method and restart the renewer with the new token. func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.LifetimeWatcher) { go watcher.Start() defer watcher.Stop() @@ -144,7 +177,35 @@ func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.Lifeti v.logger.Error("Error renewing token for Vault provider", "error", err) } - // Watcher routine has finished, so start it again. + // If the watcher has exited and auth method is enabled, + // re-authenticate using the auth method and set up a new watcher. + if v.config.AuthMethod != nil { + // Login to Vault using the auth method. + loginResp, err := vaultLogin(v.client, v.config.AuthMethod) + if err != nil { + v.logger.Error("Error login in to Vault with %q auth method", v.config.AuthMethod.Type) + // Restart the watcher. + go watcher.Start() + continue + } + + // Set the new token for the vault client. + v.client.SetToken(loginResp.Auth.ClientToken) + v.logger.Info("Successfully re-authenticated with Vault using auth method") + + // Start the new watcher for the new token. + watcher, err = v.client.NewLifetimeWatcher(&vaultapi.LifetimeWatcherInput{ + Secret: loginResp, + RenewBehavior: vaultapi.RenewBehaviorIgnoreErrors, + }) + if err != nil { + v.logger.Error("Error starting token renewal process") + go watcher.Start() + continue + } + } + + // Restart the watcher. go watcher.Start() case <-watcher.RenewCh(): @@ -185,7 +246,6 @@ func (v *VaultProvider) GenerateRoot() error { DefaultLeaseTTL: v.config.RootCertTTL.String(), }, }) - if err != nil { return err } @@ -599,7 +659,10 @@ func ParseVaultCAConfig(raw map[string]interface{}) (*structs.VaultCAProviderCon } decodeConf := &mapstructure.DecoderConfig{ - DecodeHook: structs.ParseDurationFunc(), + DecodeHook: mapstructure.ComposeDecodeHookFunc( + structs.ParseDurationFunc(), + decode.HookTranslateKeys, + ), Result: &config, WeaklyTypedInput: true, } @@ -613,8 +676,12 @@ func ParseVaultCAConfig(raw map[string]interface{}) (*structs.VaultCAProviderCon return nil, fmt.Errorf("error decoding config: %s", err) } - if config.Token == "" { - return nil, fmt.Errorf("must provide a Vault token") + if config.Token == "" && config.AuthMethod == nil { + return nil, fmt.Errorf("must provide a Vault token or configure a Vault auth method") + } + + if config.Token != "" && config.AuthMethod != nil { + return nil, fmt.Errorf("only one of Vault token or Vault auth method can be provided, but not both") } if config.RootPKIPath == "" { @@ -637,3 +704,76 @@ func ParseVaultCAConfig(raw map[string]interface{}) (*structs.VaultCAProviderCon return &config, nil } + +func vaultLogin(client *vaultapi.Client, authMethod *structs.VaultAuthMethod) (*vaultapi.Secret, error) { + // Adapted from https://www.vaultproject.io/docs/auth/kubernetes#code-example + loginPath, err := configureVaultAuthMethod(authMethod) + if err != nil { + return nil, err + } + + resp, err := client.Logical().Write(loginPath, authMethod.Params) + if err != nil { + return nil, err + } + if resp == nil || resp.Auth == nil || resp.Auth.ClientToken == "" { + return nil, fmt.Errorf("login response did not return client token") + } + + return resp, nil +} + +func configureVaultAuthMethod(authMethod *structs.VaultAuthMethod) (loginPath string, err error) { + if authMethod.MountPath == "" { + authMethod.MountPath = authMethod.Type + } + + switch authMethod.Type { + case VaultAuthMethodTypeKubernetes: + // For the Kubernetes Auth method, we will try to read the JWT token + // from the default service account file location if jwt was not provided. + if jwt, ok := authMethod.Params["jwt"]; !ok || jwt == "" { + serviceAccountToken, err := os.ReadFile(defaultK8SServiceAccountTokenPath) + if err != nil { + return "", err + } + + authMethod.Params["jwt"] = string(serviceAccountToken) + } + loginPath = fmt.Sprintf("auth/%s/login", authMethod.MountPath) + // These auth methods require a username for the login API path. + case VaultAuthMethodTypeLDAP, VaultAuthMethodTypeUserpass, VaultAuthMethodTypeOkta, VaultAuthMethodTypeRadius: + // Get username from the params. + if username, ok := authMethod.Params["username"]; ok { + loginPath = fmt.Sprintf("auth/%s/login/%s", authMethod.MountPath, username) + } else { + return "", fmt.Errorf("failed to get 'username' from auth method params") + } + // This auth method requires a role for the login API path. + case VaultAuthMethodTypeOCI: + if role, ok := authMethod.Params["role"]; ok { + loginPath = fmt.Sprintf("auth/%s/login/%s", authMethod.MountPath, role) + } else { + return "", fmt.Errorf("failed to get 'role' from auth method params") + } + case VaultAuthMethodTypeToken: + return "", fmt.Errorf("'token' auth method is not supported via auth method configuration; " + + "please provide the token with the 'token' parameter in the CA configuration") + // The rest of the auth methods use auth/ login API path. + case VaultAuthMethodTypeAliCloud, + VaultAuthMethodTypeAppRole, + VaultAuthMethodTypeAWS, + VaultAuthMethodTypeAzure, + VaultAuthMethodTypeCloudFoundry, + VaultAuthMethodTypeGitHub, + VaultAuthMethodTypeGCP, + VaultAuthMethodTypeJWT, + VaultAuthMethodTypeKerberos, + VaultAuthMethodTypeTLS: + loginPath = fmt.Sprintf("auth/%s/login", authMethod.MountPath) + default: + return "", fmt.Errorf("auth method %q is not supported", authMethod.Type) + } + + return +} diff --git a/agent/connect/ca/provider_vault_test.go b/agent/connect/ca/provider_vault_test.go index ccb7fc01c..f09b7717e 100644 --- a/agent/connect/ca/provider_vault_test.go +++ b/agent/connect/ca/provider_vault_test.go @@ -18,6 +18,94 @@ import ( "github.com/hashicorp/consul/sdk/testutil/retry" ) +func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) { + cases := map[string]struct { + rawConfig map[string]interface{} + expConfig *structs.VaultCAProviderConfig + expError string + }{ + "no token and no auth method provided": { + rawConfig: map[string]interface{}{}, + expError: "must provide a Vault token or configure a Vault auth method", + }, + "both token and auth method provided": { + rawConfig: map[string]interface{}{"Token": "test", "AuthMethod": map[string]interface{}{"Type": "test"}}, + expError: "only one of Vault token or Vault auth method can be provided, but not both", + }, + "no root PKI path": { + rawConfig: map[string]interface{}{"Token": "test"}, + expError: "must provide a valid path to a root PKI backend", + }, + "no root intermediate path": { + rawConfig: map[string]interface{}{"Token": "test", "RootPKIPath": "test"}, + expError: "must provide a valid path for the intermediate PKI backend", + }, + "adds a slash to RootPKIPath and IntermediatePKIPath": { + rawConfig: map[string]interface{}{"Token": "test", "RootPKIPath": "test", "IntermediatePKIPath": "test"}, + expConfig: &structs.VaultCAProviderConfig{ + CommonCAProviderConfig: defaultCommonConfig(), + Token: "test", + RootPKIPath: "test/", + IntermediatePKIPath: "test/", + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + config, err := ParseVaultCAConfig(c.rawConfig) + if c.expError != "" { + require.EqualError(t, err, c.expError) + } else { + require.NoError(t, err) + require.Equal(t, c.expConfig, config) + } + }) + } +} + +func TestVaultCAProvider_configureVaultAuthMethod(t *testing.T) { + cases := map[string]struct { + expLoginPath string + params map[string]interface{} + expError string + }{ + "alicloud": {expLoginPath: "auth/alicloud/login"}, + "approle": {expLoginPath: "auth/approle/login"}, + "aws": {expLoginPath: "auth/aws/login"}, + "azure": {expLoginPath: "auth/azure/login"}, + "cf": {expLoginPath: "auth/cf/login"}, + "github": {expLoginPath: "auth/github/login"}, + "gcp": {expLoginPath: "auth/gcp/login"}, + "jwt": {expLoginPath: "auth/jwt/login"}, + "kerberos": {expLoginPath: "auth/kerberos/login"}, + "kubernetes": {expLoginPath: "auth/kubernetes/login", params: map[string]interface{}{"jwt": "fake"}}, + "ldap": {expLoginPath: "auth/ldap/login/foo", params: map[string]interface{}{"username": "foo"}}, + "oci": {expLoginPath: "auth/oci/login/foo", params: map[string]interface{}{"role": "foo"}}, + "okta": {expLoginPath: "auth/okta/login/foo", params: map[string]interface{}{"username": "foo"}}, + "radius": {expLoginPath: "auth/radius/login/foo", params: map[string]interface{}{"username": "foo"}}, + "cert": {expLoginPath: "auth/cert/login"}, + "token": {expError: "'token' auth method is not supported via auth method configuration; please provide the token with the 'token' parameter in the CA configuration"}, + "userpass": {expLoginPath: "auth/userpass/login/foo", params: map[string]interface{}{"username": "foo"}}, + "unsupported": {expError: "auth method \"unsupported\" is not supported"}, + } + + for authMethodType, c := range cases { + t.Run(authMethodType, func(t *testing.T) { + loginPath, err := configureVaultAuthMethod(&structs.VaultAuthMethod{ + Type: authMethodType, + Params: c.params, + }) + if c.expError == "" { + require.NoError(t, err) + require.Equal(t, c.expLoginPath, loginPath) + } else { + require.EqualError(t, err, c.expError) + } + }) + } +} + func TestVaultCAProvider_VaultTLSConfig(t *testing.T) { config := &structs.VaultCAProviderConfig{ CAFile: "/capath/ca.pem", @@ -507,6 +595,138 @@ func TestVaultProvider_Cleanup(t *testing.T) { }) } +func TestVaultProvider_ConfigureWithAuthMethod(t *testing.T) { + + SkipIfVaultNotPresent(t) + + cases := []struct { + authMethodType string + configureAuthMethodFunc func(t *testing.T, vaultClient *vaultapi.Client) map[string]interface{} + }{ + { + authMethodType: "userpass", + configureAuthMethodFunc: func(t *testing.T, vaultClient *vaultapi.Client) map[string]interface{} { + _, err := vaultClient.Logical().Write("/auth/userpass/users/test", + map[string]interface{}{"password": "foo", "policies": "admins"}) + require.NoError(t, err) + return map[string]interface{}{ + "Type": "userpass", + "Params": map[string]interface{}{ + "username": "test", + "password": "foo", + }, + } + }, + }, + { + authMethodType: "approle", + configureAuthMethodFunc: func(t *testing.T, vaultClient *vaultapi.Client) map[string]interface{} { + _, err := vaultClient.Logical().Write("auth/approle/role/my-role", nil) + require.NoError(t, err) + resp, err := vaultClient.Logical().Read("auth/approle/role/my-role/role-id") + require.NoError(t, err) + roleID := resp.Data["role_id"] + + resp, err = vaultClient.Logical().Write("auth/approle/role/my-role/secret-id", nil) + require.NoError(t, err) + secretID := resp.Data["secret_id"] + + return map[string]interface{}{ + "Type": "approle", + "Params": map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + } + }, + }, + } + + for _, c := range cases { + t.Run(c.authMethodType, func(t *testing.T) { + testVault := NewTestVaultServer(t) + + err := testVault.Client().Sys().EnableAuthWithOptions(c.authMethodType, &vaultapi.EnableAuthOptions{Type: c.authMethodType}) + require.NoError(t, err) + + authMethodConf := c.configureAuthMethodFunc(t, testVault.Client()) + + conf := map[string]interface{}{ + "Address": testVault.Addr, + "RootPKIPath": "pki-root/", + "IntermediatePKIPath": "pki-intermediate/", + "AuthMethod": authMethodConf, + } + + provider := NewVaultProvider(hclog.New(nil)) + + cfg := ProviderConfig{ + ClusterID: connect.TestClusterID, + Datacenter: "dc1", + IsPrimary: true, + RawConfig: conf, + } + t.Cleanup(provider.Stop) + err = provider.Configure(cfg) + require.NoError(t, err) + require.NotEmpty(t, provider.client.Token()) + }) + } +} + +func TestVaultProvider_RotateAuthMethodToken(t *testing.T) { + + SkipIfVaultNotPresent(t) + + testVault := NewTestVaultServer(t) + + err := testVault.Client().Sys().EnableAuthWithOptions("approle", &vaultapi.EnableAuthOptions{Type: "approle"}) + require.NoError(t, err) + + _, err = testVault.Client().Logical().Write("auth/approle/role/my-role", + map[string]interface{}{"token_ttl": "2s", "token_explicit_max_ttl": "2s"}) + require.NoError(t, err) + resp, err := testVault.Client().Logical().Read("auth/approle/role/my-role/role-id") + require.NoError(t, err) + roleID := resp.Data["role_id"] + + resp, err = testVault.Client().Logical().Write("auth/approle/role/my-role/secret-id", nil) + require.NoError(t, err) + secretID := resp.Data["secret_id"] + + conf := map[string]interface{}{ + "Address": testVault.Addr, + "RootPKIPath": "pki-root/", + "IntermediatePKIPath": "pki-intermediate/", + "AuthMethod": map[string]interface{}{ + "Type": "approle", + "Params": map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + }, + } + + provider := NewVaultProvider(hclog.New(nil)) + + cfg := ProviderConfig{ + ClusterID: connect.TestClusterID, + Datacenter: "dc1", + IsPrimary: true, + RawConfig: conf, + } + t.Cleanup(provider.Stop) + err = provider.Configure(cfg) + require.NoError(t, err) + token := provider.client.Token() + require.NotEmpty(t, token) + + // Check that the token is rotated after max_ttl time has passed. + require.Eventually(t, func() bool { + return provider.client.Token() != token + }, 10*time.Second, 100*time.Millisecond) +} + func getIntermediateCertTTL(t *testing.T, caConf *structs.CAConfiguration) time.Duration { t.Helper() @@ -526,10 +746,6 @@ func getIntermediateCertTTL(t *testing.T, caConf *structs.CAConfiguration) time. return dur } -func testVaultProvider(t *testing.T) (*VaultProvider, *TestVaultServer) { - return testVaultProviderWithConfig(t, true, nil) -} - func testVaultProviderWithConfig(t *testing.T, isPrimary bool, rawConf map[string]interface{}) (*VaultProvider, *TestVaultServer) { testVault, err := runTestVault(t) if err != nil { @@ -573,6 +789,7 @@ func createVaultProvider(t *testing.T, isPrimary bool, addr, token string, rawCo cfg.Datacenter = "dc2" } + t.Cleanup(provider.Stop) require.NoError(t, provider.Configure(cfg)) if isPrimary { require.NoError(t, provider.GenerateRoot()) diff --git a/agent/connect/ca/testing.go b/agent/connect/ca/testing.go index 00f49c579..6ba9df791 100644 --- a/agent/connect/ca/testing.go +++ b/agent/connect/ca/testing.go @@ -120,11 +120,7 @@ func runTestVault(t testing.T) (*TestVaultServer, error) { return nil, fmt.Errorf("%q not found on $PATH", vaultBinaryName) } - ports := freeport.MustTake(2) - returnPortsFn := func() { - freeport.Return(ports) - } - + ports := freeport.GetN(t, 2) var ( clientAddr = fmt.Sprintf("127.0.0.1:%d", ports[0]) clusterAddr = fmt.Sprintf("127.0.0.1:%d", ports[1]) @@ -136,7 +132,6 @@ func runTestVault(t testing.T) (*TestVaultServer, error) { Address: "http://" + clientAddr, }) if err != nil { - returnPortsFn() return nil, err } client.SetToken(token) @@ -156,20 +151,21 @@ func runTestVault(t testing.T) (*TestVaultServer, error) { cmd.Stdout = ioutil.Discard cmd.Stderr = ioutil.Discard if err := cmd.Start(); err != nil { - returnPortsFn() return nil, err } testVault := &TestVaultServer{ - RootToken: token, - Addr: "http://" + clientAddr, - cmd: cmd, - client: client, - returnPortsFn: returnPortsFn, + RootToken: token, + Addr: "http://" + clientAddr, + cmd: cmd, + client: client, } t.Cleanup(func() { - testVault.Stop() + if err := testVault.Stop(); err != nil { + t.Log("failed to stop vault server: %w", err) + } }) + return testVault, nil } @@ -178,9 +174,6 @@ type TestVaultServer struct { Addr string cmd *exec.Cmd client *vaultapi.Client - - // returnPortsFn will put the ports claimed for the test back into the - returnPortsFn func() } var printedVaultVersion sync.Once @@ -226,11 +219,6 @@ func (v *TestVaultServer) Stop() error { if err := v.cmd.Wait(); err != nil { return err } - - if v.returnPortsFn != nil { - v.returnPortsFn() - } - return nil } diff --git a/agent/connect/uri.go b/agent/connect/uri.go index 7bdf223e7..fa1387086 100644 --- a/agent/connect/uri.go +++ b/agent/connect/uri.go @@ -76,6 +76,10 @@ func ParseCertURI(input *url.URL) (CertURI, error) { } } + if ap == "" { + ap = "default" + } + return &SpiffeIDService{ Host: input.Host, Partition: ap, @@ -103,6 +107,10 @@ func ParseCertURI(input *url.URL) (CertURI, error) { } } + if ap == "" { + ap = "default" + } + return &SpiffeIDAgent{ Host: input.Host, Partition: ap, diff --git a/agent/connect/uri_agent_oss.go b/agent/connect/uri_agent_oss.go index 0936d680a..1ae6f18c3 100644 --- a/agent/connect/uri_agent_oss.go +++ b/agent/connect/uri_agent_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package connect diff --git a/agent/connect/uri_agent_oss_test.go b/agent/connect/uri_agent_oss_test.go index 8bfc38784..37ebc0bf3 100644 --- a/agent/connect/uri_agent_oss_test.go +++ b/agent/connect/uri_agent_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package connect diff --git a/agent/connect/uri_service_oss.go b/agent/connect/uri_service_oss.go index 3838ef955..8270f96c2 100644 --- a/agent/connect/uri_service_oss.go +++ b/agent/connect/uri_service_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package connect diff --git a/agent/connect/uri_service_oss_test.go b/agent/connect/uri_service_oss_test.go index a844469e5..069df2616 100644 --- a/agent/connect/uri_service_oss_test.go +++ b/agent/connect/uri_service_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package connect diff --git a/agent/connect/uri_signing.go b/agent/connect/uri_signing.go index 671bb79fc..84cd29cde 100644 --- a/agent/connect/uri_signing.go +++ b/agent/connect/uri_signing.go @@ -4,8 +4,6 @@ import ( "fmt" "net/url" "strings" - - "github.com/hashicorp/consul/agent/structs" ) // SpiffeIDSigning is the structure to represent the SPIFFE ID for a @@ -64,6 +62,6 @@ func (id SpiffeIDSigning) CanSign(cu CertURI) bool { // break all certificate validation. That does mean that DNS prefix might not // match the identity URIs and so the trust domain might not actually resolve // which we would like but don't actually need. -func SpiffeIDSigningForCluster(config *structs.CAConfiguration) *SpiffeIDSigning { - return &SpiffeIDSigning{ClusterID: config.ClusterID, Domain: "consul"} +func SpiffeIDSigningForCluster(clusterID string) *SpiffeIDSigning { + return &SpiffeIDSigning{ClusterID: clusterID, Domain: "consul"} } diff --git a/agent/connect/uri_signing_test.go b/agent/connect/uri_signing_test.go index ca4020b99..36becc37b 100644 --- a/agent/connect/uri_signing_test.go +++ b/agent/connect/uri_signing_test.go @@ -5,17 +5,12 @@ import ( "strings" "testing" - "github.com/hashicorp/consul/agent/structs" - "github.com/stretchr/testify/assert" ) func TestSpiffeIDSigningForCluster(t *testing.T) { // For now it should just append .consul to the ID. - config := &structs.CAConfiguration{ - ClusterID: TestClusterID, - } - id := SpiffeIDSigningForCluster(config) + id := SpiffeIDSigningForCluster(TestClusterID) assert.Equal(t, id.URI().String(), "spiffe://"+TestClusterID+".consul") } diff --git a/agent/connect/uri_test.go b/agent/connect/uri_test.go index 47e1e4199..96b2b7a71 100644 --- a/agent/connect/uri_test.go +++ b/agent/connect/uri_test.go @@ -5,10 +5,13 @@ import ( "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/testutil" ) func TestParseCertURIFromString(t *testing.T) { + defaultEntMeta := structs.DefaultEnterpriseMetaInDefaultPartition() + var cases = []struct { Name string URI string @@ -26,6 +29,7 @@ func TestParseCertURIFromString(t *testing.T) { "spiffe://1234.consul/ns/default/dc/dc01/svc/web", &SpiffeIDService{ Host: "1234.consul", + Partition: defaultEntMeta.PartitionOrDefault(), Namespace: "default", Datacenter: "dc01", Service: "web", @@ -49,6 +53,7 @@ func TestParseCertURIFromString(t *testing.T) { "spiffe://1234.consul/agent/client/dc/dc1/id/uuid", &SpiffeIDAgent{ Host: "1234.consul", + Partition: defaultEntMeta.PartitionOrDefault(), Datacenter: "dc1", Agent: "uuid", }, @@ -70,6 +75,7 @@ func TestParseCertURIFromString(t *testing.T) { "spiffe://1234.consul/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux", &SpiffeIDService{ Host: "1234.consul", + Partition: defaultEntMeta.PartitionOrDefault(), Namespace: "foo/bar", Datacenter: "bar/baz", Service: "baz/qux", diff --git a/agent/consul/acl.go b/agent/consul/acl.go index c659e7b37..9a2c74a17 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -1053,7 +1053,7 @@ func (r *ACLResolver) resolveLocallyManagedToken(token string) (structs.ACLIdent return nil, nil, false } - if r.tokens.IsAgentMasterToken(token) { + if r.tokens.IsAgentRecoveryToken(token) { return structs.NewAgentMasterTokenIdentity(r.config.NodeName, token), r.agentMasterAuthz, true } @@ -1219,10 +1219,12 @@ func (f *aclFilter) allowSession(node string, ent *acl.AuthorizerContext) bool { } // filterHealthChecks is used to filter a set of health checks down based on -// the configured ACL rules for a token. -func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) { +// the configured ACL rules for a token. Returns true if any elements were +// removed. +func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) bool { hc := *checks var authzContext acl.AuthorizerContext + var removed bool for i := 0; i < len(hc); i++ { check := hc[i] @@ -1232,31 +1234,40 @@ func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) { } f.logger.Debug("dropping check from result due to ACLs", "check", check.CheckID) + removed = true hc = append(hc[:i], hc[i+1:]...) i-- } *checks = hc + return removed } -// filterServices is used to filter a set of services based on ACLs. -func (f *aclFilter) filterServices(services structs.Services, entMeta *structs.EnterpriseMeta) { +// filterServices is used to filter a set of services based on ACLs. Returns +// true if any elements were removed. +func (f *aclFilter) filterServices(services structs.Services, entMeta *structs.EnterpriseMeta) bool { var authzContext acl.AuthorizerContext entMeta.FillAuthzContext(&authzContext) + var removed bool + for svc := range services { if f.allowService(svc, &authzContext) { continue } f.logger.Debug("dropping service from result due to ACLs", "service", svc) + removed = true delete(services, svc) } + + return removed } // filterServiceNodes is used to filter a set of nodes for a given service -// based on the configured ACL rules. -func (f *aclFilter) filterServiceNodes(nodes *structs.ServiceNodes) { +// based on the configured ACL rules. Returns true if any elements were removed. +func (f *aclFilter) filterServiceNodes(nodes *structs.ServiceNodes) bool { sn := *nodes var authzContext acl.AuthorizerContext + var removed bool for i := 0; i < len(sn); i++ { node := sn[i] @@ -1265,26 +1276,30 @@ func (f *aclFilter) filterServiceNodes(nodes *structs.ServiceNodes) { if f.allowNode(node.Node, &authzContext) && f.allowService(node.ServiceName, &authzContext) { continue } + removed = true f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node.Node, &node.EnterpriseMeta)) sn = append(sn[:i], sn[i+1:]...) i-- } *nodes = sn + return removed } // filterNodeServices is used to filter services on a given node base on ACLs. -func (f *aclFilter) filterNodeServices(services **structs.NodeServices) { +// Returns true if any elements were removed +func (f *aclFilter) filterNodeServices(services **structs.NodeServices) bool { if *services == nil { - return + return false } var authzContext acl.AuthorizerContext (*services).Node.FillAuthzContext(&authzContext) if !f.allowNode((*services).Node.Node, &authzContext) { *services = nil - return + return true } + var removed bool for svcName, svc := range (*services).Services { svc.FillAuthzContext(&authzContext) @@ -1292,50 +1307,53 @@ func (f *aclFilter) filterNodeServices(services **structs.NodeServices) { continue } f.logger.Debug("dropping service from result due to ACLs", "service", svc.CompoundServiceID()) + removed = true delete((*services).Services, svcName) } + + return removed } // filterNodeServices is used to filter services on a given node base on ACLs. -func (f *aclFilter) filterNodeServiceList(services **structs.NodeServiceList) { - if services == nil || *services == nil { - return +// Returns true if any elements were removed. +func (f *aclFilter) filterNodeServiceList(services *structs.NodeServiceList) bool { + if services.Node == nil { + return false } var authzContext acl.AuthorizerContext - (*services).Node.FillAuthzContext(&authzContext) - if !f.allowNode((*services).Node.Node, &authzContext) { - *services = nil - return + services.Node.FillAuthzContext(&authzContext) + if !f.allowNode(services.Node.Node, &authzContext) { + *services = structs.NodeServiceList{} + return true } - svcs := (*services).Services - modified := false + var removed bool + svcs := services.Services for i := 0; i < len(svcs); i++ { svc := svcs[i] svc.FillAuthzContext(&authzContext) - if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svc.Service, &authzContext) { + if f.allowService(svc.Service, &authzContext) { continue } + f.logger.Debug("dropping service from result due to ACLs", "service", svc.CompoundServiceID()) svcs = append(svcs[:i], svcs[i+1:]...) i-- - modified = true + removed = true } + services.Services = svcs - if modified { - *services = &structs.NodeServiceList{ - Node: (*services).Node, - Services: svcs, - } - } + return removed } -// filterCheckServiceNodes is used to filter nodes based on ACL rules. -func (f *aclFilter) filterCheckServiceNodes(nodes *structs.CheckServiceNodes) { +// filterCheckServiceNodes is used to filter nodes based on ACL rules. Returns +// true if any elements were removed. +func (f *aclFilter) filterCheckServiceNodes(nodes *structs.CheckServiceNodes) bool { csn := *nodes var authzContext acl.AuthorizerContext + var removed bool for i := 0; i < len(csn); i++ { node := csn[i] @@ -1344,41 +1362,47 @@ func (f *aclFilter) filterCheckServiceNodes(nodes *structs.CheckServiceNodes) { continue } f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node.Node.Node, node.Node.GetEnterpriseMeta())) + removed = true csn = append(csn[:i], csn[i+1:]...) i-- } *nodes = csn + return removed } // filterServiceTopology is used to filter upstreams/downstreams based on ACL rules. // this filter is unlike others in that it also returns whether the result was filtered by ACLs func (f *aclFilter) filterServiceTopology(topology *structs.ServiceTopology) bool { - numUp := len(topology.Upstreams) - numDown := len(topology.Downstreams) - - f.filterCheckServiceNodes(&topology.Upstreams) - f.filterCheckServiceNodes(&topology.Downstreams) - - return numUp != len(topology.Upstreams) || numDown != len(topology.Downstreams) + filteredUpstreams := f.filterCheckServiceNodes(&topology.Upstreams) + filteredDownstreams := f.filterCheckServiceNodes(&topology.Downstreams) + return filteredUpstreams || filteredDownstreams } // filterDatacenterCheckServiceNodes is used to filter nodes based on ACL rules. -func (f *aclFilter) filterDatacenterCheckServiceNodes(datacenterNodes *map[string]structs.CheckServiceNodes) { +// Returns true if any elements are removed. +func (f *aclFilter) filterDatacenterCheckServiceNodes(datacenterNodes *map[string]structs.CheckServiceNodes) bool { dn := *datacenterNodes out := make(map[string]structs.CheckServiceNodes) + var removed bool for dc := range dn { nodes := dn[dc] - f.filterCheckServiceNodes(&nodes) + if f.filterCheckServiceNodes(&nodes) { + removed = true + } if len(nodes) > 0 { out[dc] = nodes } } *datacenterNodes = out + return removed } -// filterSessions is used to filter a set of sessions based on ACLs. -func (f *aclFilter) filterSessions(sessions *structs.Sessions) { +// filterSessions is used to filter a set of sessions based on ACLs. Returns +// true if any elements were removed. +func (f *aclFilter) filterSessions(sessions *structs.Sessions) bool { s := *sessions + + var removed bool for i := 0; i < len(s); i++ { session := s[i] @@ -1388,18 +1412,21 @@ func (f *aclFilter) filterSessions(sessions *structs.Sessions) { if f.allowSession(session.Node, &entCtx) { continue } + removed = true f.logger.Debug("dropping session from result due to ACLs", "session", session.ID) s = append(s[:i], s[i+1:]...) i-- } *sessions = s + return removed } // filterCoordinates is used to filter nodes in a coordinate dump based on ACL -// rules. -func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) { +// rules. Returns true if any elements were removed. +func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) bool { c := *coords var authzContext acl.AuthorizerContext + var removed bool for i := 0; i < len(c); i++ { c[i].FillAuthzContext(&authzContext) @@ -1408,19 +1435,24 @@ func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) { continue } f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node, c[i].GetEnterpriseMeta())) + removed = true c = append(c[:i], c[i+1:]...) i-- } *coords = c + return removed } // filterIntentions is used to filter intentions based on ACL rules. // We prune entries the user doesn't have access to, and we redact any tokens -// if the user doesn't have a management token. -func (f *aclFilter) filterIntentions(ixns *structs.Intentions) { +// if the user doesn't have a management token. Returns true if any elements +// were removed. +func (f *aclFilter) filterIntentions(ixns *structs.Intentions) bool { ret := make(structs.Intentions, 0, len(*ixns)) + var removed bool for _, ixn := range *ixns { if !ixn.CanRead(f.authorizer) { + removed = true f.logger.Debug("dropping intention from result due to ACLs", "intention", ixn.ID) continue } @@ -1429,14 +1461,17 @@ func (f *aclFilter) filterIntentions(ixns *structs.Intentions) { } *ixns = ret + return removed } // filterNodeDump is used to filter through all parts of a node dump and -// remove elements the provided ACL token cannot access. -func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { +// remove elements the provided ACL token cannot access. Returns true if +// any elements were removed. +func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) bool { nd := *dump var authzContext acl.AuthorizerContext + var removed bool for i := 0; i < len(nd); i++ { info := nd[i] @@ -1444,6 +1479,7 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { info.FillAuthzContext(&authzContext) if node := info.Node; !f.allowNode(node, &authzContext) { f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node, info.GetEnterpriseMeta())) + removed = true nd = append(nd[:i], nd[i+1:]...) i-- continue @@ -1457,6 +1493,7 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { continue } f.logger.Debug("dropping service from result due to ACLs", "service", svc) + removed = true info.Services = append(info.Services[:j], info.Services[j+1:]...) j-- } @@ -1469,17 +1506,21 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { continue } f.logger.Debug("dropping check from result due to ACLs", "check", chk.CheckID) + removed = true info.Checks = append(info.Checks[:j], info.Checks[j+1:]...) j-- } } *dump = nd + return removed } -// filterServiceDump is used to filter nodes based on ACL rules. -func (f *aclFilter) filterServiceDump(services *structs.ServiceDump) { +// filterServiceDump is used to filter nodes based on ACL rules. Returns true +// if any elements were removed. +func (f *aclFilter) filterServiceDump(services *structs.ServiceDump) bool { svcs := *services var authzContext acl.AuthorizerContext + var removed bool for i := 0; i < len(svcs); i++ { service := svcs[i] @@ -1497,18 +1538,22 @@ func (f *aclFilter) filterServiceDump(services *structs.ServiceDump) { } f.logger.Debug("dropping service from result due to ACLs", "service", service.GatewayService.Service) + removed = true svcs = append(svcs[:i], svcs[i+1:]...) i-- } *services = svcs + return removed } // filterNodes is used to filter through all parts of a node list and remove -// elements the provided ACL token cannot access. -func (f *aclFilter) filterNodes(nodes *structs.Nodes) { +// elements the provided ACL token cannot access. Returns true if any elements +// were removed. +func (f *aclFilter) filterNodes(nodes *structs.Nodes) bool { n := *nodes var authzContext acl.AuthorizerContext + var removed bool for i := 0; i < len(n); i++ { n[i].FillAuthzContext(&authzContext) @@ -1517,10 +1562,12 @@ func (f *aclFilter) filterNodes(nodes *structs.Nodes) { continue } f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node, n[i].GetEnterpriseMeta())) + removed = true n = append(n[:i], n[i+1:]...) i-- } *nodes = n + return removed } // redactPreparedQueryTokens will redact any tokens unless the client has a @@ -1555,8 +1602,10 @@ func (f *aclFilter) redactPreparedQueryTokens(query **structs.PreparedQuery) { // filterPreparedQueries is used to filter prepared queries based on ACL rules. // We prune entries the user doesn't have access to, and we redact any tokens -// if the user doesn't have a management token. -func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) { +// if the user doesn't have a management token. Returns true if any (named) +// queries were removed - un-named queries are meant to be ephemeral and can +// only be enumerated by a management token +func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) bool { var authzContext acl.AuthorizerContext structs.DefaultEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext) // Management tokens can see everything with no filtering. @@ -1564,17 +1613,22 @@ func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) { // the 1.4 ACL rewrite. The global-management token will provide unrestricted query privileges // so asking for ACLWrite should be unnecessary. if f.authorizer.ACLWrite(&authzContext) == acl.Allow { - return + return false } // Otherwise, we need to see what the token has access to. + var namedQueriesRemoved bool ret := make(structs.PreparedQueries, 0, len(*queries)) for _, query := range *queries { // If no prefix ACL applies to this query then filter it, since // we know at this point the user doesn't have a management // token, otherwise see what the policy says. - prefix, ok := query.GetACLPrefix() - if !ok || f.authorizer.PreparedQueryRead(prefix, &authzContext) != acl.Allow { + prefix, hasName := query.GetACLPrefix() + switch { + case hasName && f.authorizer.PreparedQueryRead(prefix, &authzContext) != acl.Allow: + namedQueriesRemoved = true + fallthrough + case !hasName: f.logger.Debug("dropping prepared query from result due to ACLs", "query", query.ID) continue } @@ -1586,6 +1640,7 @@ func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) { ret = append(ret, final) } *queries = ret + return namedQueriesRemoved } func (f *aclFilter) filterToken(token **structs.ACLToken) { @@ -1753,14 +1808,16 @@ func (f *aclFilter) filterAuthMethods(methods *structs.ACLAuthMethods) { *methods = ret } -func (f *aclFilter) filterServiceList(services *structs.ServiceList) { +func (f *aclFilter) filterServiceList(services *structs.ServiceList) bool { ret := make(structs.ServiceList, 0, len(*services)) + var removed bool for _, svc := range *services { var authzContext acl.AuthorizerContext svc.FillAuthzContext(&authzContext) if f.authorizer.ServiceRead(svc.Name, &authzContext) != acl.Allow { + removed = true sid := structs.NewServiceID(svc.Name, &svc.EnterpriseMeta) f.logger.Debug("dropping service from result due to ACLs", "service", sid.String()) continue @@ -1770,11 +1827,14 @@ func (f *aclFilter) filterServiceList(services *structs.ServiceList) { } *services = ret + return removed } // filterGatewayServices is used to filter gateway to service mappings based on ACL rules. -func (f *aclFilter) filterGatewayServices(mappings *structs.GatewayServices) { +// Returns true if any elements were removed. +func (f *aclFilter) filterGatewayServices(mappings *structs.GatewayServices) bool { ret := make(structs.GatewayServices, 0, len(*mappings)) + var removed bool for _, s := range *mappings { // This filter only checks ServiceRead on the linked service. // ServiceRead on the gateway is checked in the GatewayServices endpoint before filtering. @@ -1783,11 +1843,13 @@ func (f *aclFilter) filterGatewayServices(mappings *structs.GatewayServices) { if f.authorizer.ServiceRead(s.Service.Name, &authzContext) != acl.Allow { f.logger.Debug("dropping service from result due to ACLs", "service", s.Service.String()) + removed = true continue } ret = append(ret, s) } *mappings = ret + return removed } func filterACLWithAuthorizer(logger hclog.Logger, authorizer acl.Authorizer, subj interface{}) { @@ -1801,52 +1863,56 @@ func filterACLWithAuthorizer(logger hclog.Logger, authorizer acl.Authorizer, sub filt.filterCheckServiceNodes(v) case *structs.IndexedCheckServiceNodes: - filt.filterCheckServiceNodes(&v.Nodes) + v.QueryMeta.ResultsFilteredByACLs = filt.filterCheckServiceNodes(&v.Nodes) + + case *structs.PreparedQueryExecuteResponse: + v.QueryMeta.ResultsFilteredByACLs = filt.filterCheckServiceNodes(&v.Nodes) case *structs.IndexedServiceTopology: filtered := filt.filterServiceTopology(v.ServiceTopology) if filtered { v.FilteredByACLs = true + v.QueryMeta.ResultsFilteredByACLs = true } case *structs.DatacenterIndexedCheckServiceNodes: - filt.filterDatacenterCheckServiceNodes(&v.DatacenterNodes) + v.QueryMeta.ResultsFilteredByACLs = filt.filterDatacenterCheckServiceNodes(&v.DatacenterNodes) case *structs.IndexedCoordinates: - filt.filterCoordinates(&v.Coordinates) + v.QueryMeta.ResultsFilteredByACLs = filt.filterCoordinates(&v.Coordinates) case *structs.IndexedHealthChecks: - filt.filterHealthChecks(&v.HealthChecks) + v.QueryMeta.ResultsFilteredByACLs = filt.filterHealthChecks(&v.HealthChecks) case *structs.IndexedIntentions: - filt.filterIntentions(&v.Intentions) + v.QueryMeta.ResultsFilteredByACLs = filt.filterIntentions(&v.Intentions) case *structs.IndexedNodeDump: - filt.filterNodeDump(&v.Dump) + v.QueryMeta.ResultsFilteredByACLs = filt.filterNodeDump(&v.Dump) case *structs.IndexedServiceDump: - filt.filterServiceDump(&v.Dump) + v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceDump(&v.Dump) case *structs.IndexedNodes: - filt.filterNodes(&v.Nodes) + v.QueryMeta.ResultsFilteredByACLs = filt.filterNodes(&v.Nodes) case *structs.IndexedNodeServices: - filt.filterNodeServices(&v.NodeServices) + v.QueryMeta.ResultsFilteredByACLs = filt.filterNodeServices(&v.NodeServices) - case **structs.NodeServiceList: - filt.filterNodeServiceList(v) + case *structs.IndexedNodeServiceList: + v.QueryMeta.ResultsFilteredByACLs = filt.filterNodeServiceList(&v.NodeServices) case *structs.IndexedServiceNodes: - filt.filterServiceNodes(&v.ServiceNodes) + v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceNodes(&v.ServiceNodes) case *structs.IndexedServices: - filt.filterServices(v.Services, &v.EnterpriseMeta) + v.QueryMeta.ResultsFilteredByACLs = filt.filterServices(v.Services, &v.EnterpriseMeta) case *structs.IndexedSessions: - filt.filterSessions(&v.Sessions) + v.QueryMeta.ResultsFilteredByACLs = filt.filterSessions(&v.Sessions) case *structs.IndexedPreparedQueries: - filt.filterPreparedQueries(&v.Queries) + v.QueryMeta.ResultsFilteredByACLs = filt.filterPreparedQueries(&v.Queries) case **structs.PreparedQuery: filt.redactPreparedQueryTokens(v) @@ -1881,10 +1947,18 @@ func filterACLWithAuthorizer(logger hclog.Logger, authorizer acl.Authorizer, sub filt.filterAuthMethod(v) case *structs.IndexedServiceList: - filt.filterServiceList(&v.Services) + v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceList(&v.Services) - case *structs.GatewayServices: - filt.filterGatewayServices(v) + case *structs.IndexedGatewayServices: + v.QueryMeta.ResultsFilteredByACLs = filt.filterGatewayServices(&v.Services) + + case *structs.IndexedNodesWithGateways: + if filt.filterCheckServiceNodes(&v.Nodes) { + v.QueryMeta.ResultsFilteredByACLs = true + } + if filt.filterGatewayServices(&v.Gateways) { + v.QueryMeta.ResultsFilteredByACLs = true + } default: panic(fmt.Errorf("Unhandled type passed to ACL filter: %T %#v", subj, subj)) @@ -1906,6 +1980,6 @@ func filterACL(r *ACLResolver, token string, subj interface{}) error { type partitionInfoNoop struct{} -func (p *partitionInfoNoop) ExportsForPartition(partition string) acl.PartitionExports { - return acl.PartitionExports{} +func (p *partitionInfoNoop) ExportsForPartition(partition string) acl.ExportedServices { + return acl.ExportedServices{} } diff --git a/agent/consul/acl_authmethod_oss.go b/agent/consul/acl_authmethod_oss.go index 7d68a1639..b2f28da9a 100644 --- a/agent/consul/acl_authmethod_oss.go +++ b/agent/consul/acl_authmethod_oss.go @@ -1,4 +1,5 @@ -//+build !consulent +//go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/acl_endpoint.go b/agent/consul/acl_endpoint.go index f3e7f3980..376278c83 100644 --- a/agent/consul/acl_endpoint.go +++ b/agent/consul/acl_endpoint.go @@ -700,9 +700,8 @@ func (a *ACL) tokenSetInternal(args *structs.ACLTokenSetRequest, reply *structs. token.SetHash(true) - // validate the enterprise meta - err = state.ACLTokenUpsertValidateEnterprise(token, accessorMatch) - if err != nil { + // validate the enterprise specific fields + if err = a.tokenUpsertValidateEnterprise(token, accessorMatch); err != nil { return err } @@ -1181,9 +1180,8 @@ func (a *ACL) PolicySet(args *structs.ACLPolicySetRequest, reply *structs.ACLPol return err } - // validate the enterprise meta - err = state.ACLPolicyUpsertValidateEnterprise(policy, idMatch) - if err != nil { + // validate the enterprise specific fields + if err = a.policyUpsertValidateEnterprise(policy, idMatch); err != nil { return err } @@ -1360,7 +1358,7 @@ func (a *ACL) PolicyResolve(args *structs.ACLPolicyBatchGetRequest, reply *struc } } - a.srv.setQueryMeta(&reply.QueryMeta) + a.srv.setQueryMeta(&reply.QueryMeta, args.Token) return nil } @@ -1543,8 +1541,8 @@ func (a *ACL) RoleSet(args *structs.ACLRoleSetRequest, reply *structs.ACLRole) e } } - // validate the enterprise meta - if err := state.ACLRoleUpsertValidateEnterprise(role, existing); err != nil { + // validate the enterprise specific fields + if err := a.roleUpsertValidateEnterprise(role, existing); err != nil { return err } @@ -1761,7 +1759,7 @@ func (a *ACL) RoleResolve(args *structs.ACLRoleBatchGetRequest, reply *structs.A } } - a.srv.setQueryMeta(&reply.QueryMeta) + a.srv.setQueryMeta(&reply.QueryMeta, args.Token) return nil } diff --git a/agent/consul/acl_endpoint_oss.go b/agent/consul/acl_endpoint_oss.go index 61788f183..80cb54c80 100644 --- a/agent/consul/acl_endpoint_oss.go +++ b/agent/consul/acl_endpoint_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul @@ -7,6 +8,21 @@ import ( "github.com/hashicorp/consul/agent/structs" ) +func (a *ACL) tokenUpsertValidateEnterprise(token *structs.ACLToken, existing *structs.ACLToken) error { + state := a.srv.fsm.State() + return state.ACLTokenUpsertValidateEnterprise(token, existing) +} + +func (a *ACL) policyUpsertValidateEnterprise(policy *structs.ACLPolicy, existing *structs.ACLPolicy) error { + state := a.srv.fsm.State() + return state.ACLPolicyUpsertValidateEnterprise(policy, existing) +} + +func (a *ACL) roleUpsertValidateEnterprise(role *structs.ACLRole, existing *structs.ACLRole) error { + state := a.srv.fsm.State() + return state.ACLRoleUpsertValidateEnterprise(role, existing) +} + func (a *ACL) enterpriseAuthMethodTypeValidation(authMethodType string) error { return nil } diff --git a/agent/consul/acl_endpoint_test.go b/agent/consul/acl_endpoint_test.go index bfe8a0220..ddf00ba11 100644 --- a/agent/consul/acl_endpoint_test.go +++ b/agent/consul/acl_endpoint_test.go @@ -20,7 +20,6 @@ import ( "github.com/hashicorp/consul/agent/consul/authmethod/testauth" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" - "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" ) @@ -33,7 +32,7 @@ func TestACLEndpoint_BootstrapTokens(t *testing.T) { t.Parallel() dir, srv, codec := testACLServerWithConfig(t, func(c *Config) { // remove this as we are bootstrapping - c.ACLMasterToken = "" + c.ACLInitialManagementToken = "" }, false) waitForLeaderEstablishment(t, srv) @@ -4868,7 +4867,7 @@ func TestACLEndpoint_Login_jwt(t *testing.T) { acl := ACL{srv: srv} // spin up a fake oidc server - oidcServer := startSSOTestServer(t) + oidcServer := oidcauthtest.Start(t) pubKey, privKey := oidcServer.SigningKeys() type mConfig = map[string]interface{} @@ -5003,14 +5002,6 @@ func TestACLEndpoint_Login_jwt(t *testing.T) { } } -func startSSOTestServer(t *testing.T) *oidcauthtest.Server { - ports := freeport.MustTake(1) - return oidcauthtest.Start(t, oidcauthtest.WithPort( - ports[0], - func() { freeport.Return(ports) }, - )) -} - func TestACLEndpoint_Logout(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -5248,14 +5239,18 @@ func TestValidateBindingRuleBindName(t *testing.T) { } // upsertTestToken creates a token for testing purposes -func upsertTestToken(codec rpc.ClientCodec, masterToken string, datacenter string, - tokenModificationFn func(token *structs.ACLToken)) (*structs.ACLToken, error) { +func upsertTestTokenInEntMeta(codec rpc.ClientCodec, masterToken string, datacenter string, + tokenModificationFn func(token *structs.ACLToken), entMeta *structs.EnterpriseMeta) (*structs.ACLToken, error) { + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } arg := structs.ACLTokenSetRequest{ Datacenter: datacenter, ACLToken: structs.ACLToken{ - Description: "User token", - Local: false, - Policies: nil, + Description: "User token", + Local: false, + Policies: nil, + EnterpriseMeta: *entMeta, }, WriteRequest: structs.WriteRequest{Token: masterToken}, } @@ -5279,15 +5274,21 @@ func upsertTestToken(codec rpc.ClientCodec, masterToken string, datacenter strin return &out, nil } -func upsertTestTokenWithPolicyRules(codec rpc.ClientCodec, masterToken string, datacenter string, rules string) (*structs.ACLToken, error) { - policy, err := upsertTestPolicyWithRules(codec, masterToken, datacenter, rules) +func upsertTestToken(codec rpc.ClientCodec, masterToken string, datacenter string, + tokenModificationFn func(token *structs.ACLToken)) (*structs.ACLToken, error) { + return upsertTestTokenInEntMeta(codec, masterToken, datacenter, + tokenModificationFn, structs.DefaultEnterpriseMetaInDefaultPartition()) +} + +func upsertTestTokenWithPolicyRulesInEntMeta(codec rpc.ClientCodec, masterToken string, datacenter string, rules string, entMeta *structs.EnterpriseMeta) (*structs.ACLToken, error) { + policy, err := upsertTestPolicyWithRulesInEntMeta(codec, masterToken, datacenter, rules, entMeta) if err != nil { return nil, err } - token, err := upsertTestToken(codec, masterToken, datacenter, func(token *structs.ACLToken) { + token, err := upsertTestTokenInEntMeta(codec, masterToken, datacenter, func(token *structs.ACLToken) { token.Policies = []structs.ACLTokenPolicyLink{{ID: policy.ID}} - }) + }, entMeta) if err != nil { return nil, err } @@ -5295,6 +5296,10 @@ func upsertTestTokenWithPolicyRules(codec rpc.ClientCodec, masterToken string, d return token, nil } +func upsertTestTokenWithPolicyRules(codec rpc.ClientCodec, masterToken string, datacenter string, rules string) (*structs.ACLToken, error) { + return upsertTestTokenWithPolicyRulesInEntMeta(codec, masterToken, datacenter, rules, nil) +} + func retrieveTestTokenAccessorForSecret(codec rpc.ClientCodec, masterToken string, datacenter string, id string) (string, error) { arg := structs.ACLTokenGetRequest{ TokenID: id, @@ -5402,8 +5407,16 @@ func upsertTestPolicy(codec rpc.ClientCodec, masterToken string, datacenter stri } func upsertTestPolicyWithRules(codec rpc.ClientCodec, masterToken string, datacenter string, rules string) (*structs.ACLPolicy, error) { + return upsertTestPolicyWithRulesInEntMeta(codec, masterToken, datacenter, rules, structs.DefaultEnterpriseMetaInDefaultPartition()) +} + +func upsertTestPolicyWithRulesInEntMeta(codec rpc.ClientCodec, masterToken string, datacenter string, rules string, entMeta *structs.EnterpriseMeta) (*structs.ACLPolicy, error) { return upsertTestCustomizedPolicy(codec, masterToken, datacenter, func(policy *structs.ACLPolicy) { + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } policy.Rules = rules + policy.EnterpriseMeta = *entMeta }) } diff --git a/agent/consul/acl_oss.go b/agent/consul/acl_oss.go index ba24ee677..c42064fc1 100644 --- a/agent/consul/acl_oss.go +++ b/agent/consul/acl_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul diff --git a/agent/consul/acl_oss_test.go b/agent/consul/acl_oss_test.go index 773e2a04d..917696105 100644 --- a/agent/consul/acl_oss_test.go +++ b/agent/consul/acl_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul diff --git a/agent/consul/acl_replication_test.go b/agent/consul/acl_replication_test.go index 8bc1e8c24..14494292c 100644 --- a/agent/consul/acl_replication_test.go +++ b/agent/consul/acl_replication_test.go @@ -301,7 +301,7 @@ func TestACLReplication_Tokens(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -513,7 +513,7 @@ func TestACLReplication_Policies(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -633,7 +633,7 @@ func TestACLReplication_TokensRedacted(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -783,7 +783,7 @@ func TestACLReplication_AllTypes(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() diff --git a/agent/consul/acl_server_oss.go b/agent/consul/acl_server_oss.go index 97aa4e439..a3ed18aea 100644 --- a/agent/consul/acl_server_oss.go +++ b/agent/consul/acl_server_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index 61b4df204..03707c0cb 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" - "github.com/mitchellh/copystructure" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -2151,128 +2151,161 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega func TestACL_filterHealthChecks(t *testing.T) { t.Parallel() - // Create some health checks. - fill := func() structs.HealthChecks { - return structs.HealthChecks{ - &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - ServiceName: "foo", + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedHealthChecks { + return &structs.IndexedHealthChecks{ + HealthChecks: structs.HealthChecks{ + { + Node: "node1", + CheckID: "check1", + ServiceName: "foo", + }, }, } } - { - hc := fill() - filt := newACLFilter(acl.DenyAll(), nil) - filt.filterHealthChecks(&hc) - if len(hc) != 0 { - t.Fatalf("bad: %#v", hc) - } - } + t.Run("allowed", func(t *testing.T) { + require := require.New(t) - // Allowed to see the service but not the node. - policy, err := acl.NewPolicyFromSource(` -service "foo" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) - { - hc := fill() - filt := newACLFilter(perms, nil) - filt.filterHealthChecks(&hc) - if len(hc) != 0 { - t.Fatalf("bad: %#v", hc) - } - } + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) - // Chain on access to the node. - policy, err = acl.NewPolicyFromSource(` -node "node1" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } + list := makeList() + filterACLWithAuthorizer(logger, authz, list) - // Now it should go through. - { - hc := fill() - filt := newACLFilter(perms, nil) - filt.filterHealthChecks(&hc) - if len(hc) != 1 { - t.Fatalf("bad: %#v", hc) - } - } + require.Len(list.HealthChecks, 1) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("allowed to read the service, but not the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.HealthChecks) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.HealthChecks) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.HealthChecks) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_filterIntentions(t *testing.T) { t.Parallel() - assert := assert.New(t) - fill := func() structs.Intentions { - return structs.Intentions{ - &structs.Intention{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d1", - DestinationName: "bar", - }, - &structs.Intention{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d2", - DestinationName: "foo", + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedIntentions { + return &structs.IndexedIntentions{ + Intentions: structs.Intentions{ + &structs.Intention{ + ID: "f004177f-2c28-83b7-4229-eacc25fe55d1", + DestinationName: "bar", + }, + &structs.Intention{ + ID: "f004177f-2c28-83b7-4229-eacc25fe55d2", + DestinationName: "foo", + }, }, } } - // Try permissive filtering. - { - ixns := fill() - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterIntentions(&ixns) - assert.Len(ixns, 2) - } + t.Run("allowed", func(t *testing.T) { + require := require.New(t) - // Try restrictive filtering. - { - ixns := fill() - filt := newACLFilter(acl.DenyAll(), nil) - filt.filterIntentions(&ixns) - assert.Len(ixns, 0) - } + list := makeList() + filterACLWithAuthorizer(logger, acl.AllowAll(), list) - // Policy to see one - policy, err := acl.NewPolicyFromSource(` -service "foo" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - assert.Nil(err) - perms, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) - assert.Nil(err) + require.Len(list.Intentions, 2) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) - // Filter - { - ixns := fill() - filt := newACLFilter(perms, nil) - filt.filterIntentions(&ixns) - assert.Len(ixns, 1) - } + t.Run("allowed to read 1", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Intentions, 1) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Intentions) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_filterServices(t *testing.T) { t.Parallel() + + require := require.New(t) + // Create some services services := structs.Services{ "service1": []string{}, @@ -2282,292 +2315,543 @@ func TestACL_filterServices(t *testing.T) { // Try permissive filtering. filt := newACLFilter(acl.AllowAll(), nil) - filt.filterServices(services, nil) - if len(services) != 3 { - t.Fatalf("bad: %#v", services) - } + removed := filt.filterServices(services, nil) + require.False(removed) + require.Len(services, 3) // Try restrictive filtering. filt = newACLFilter(acl.DenyAll(), nil) - filt.filterServices(services, nil) - if len(services) != 0 { - t.Fatalf("bad: %#v", services) - } + removed = filt.filterServices(services, nil) + require.True(removed) + require.Empty(services) } func TestACL_filterServiceNodes(t *testing.T) { t.Parallel() - // Create some service nodes. - fill := func() structs.ServiceNodes { - return structs.ServiceNodes{ - &structs.ServiceNode{ - Node: "node1", - ServiceName: "foo", + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedServiceNodes { + return &structs.IndexedServiceNodes{ + ServiceNodes: structs.ServiceNodes{ + { + Node: "node1", + ServiceName: "foo", + }, }, } } - // Try permissive filtering. - { - nodes := fill() - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterServiceNodes(&nodes) - if len(nodes) != 1 { - t.Fatalf("bad: %#v", nodes) - } - } + t.Run("allowed", func(t *testing.T) { + require := require.New(t) - // Try restrictive filtering. - { - nodes := fill() - filt := newACLFilter(acl.DenyAll(), nil) - filt.filterServiceNodes(&nodes) - if len(nodes) != 0 { - t.Fatalf("bad: %#v", nodes) - } - } + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) - // Allowed to see the service but not the node. - policy, err := acl.NewPolicyFromSource(` -service "foo" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) - // But with version 8 the node will block it. - { - nodes := fill() - filt := newACLFilter(perms, nil) - filt.filterServiceNodes(&nodes) - if len(nodes) != 0 { - t.Fatalf("bad: %#v", nodes) - } - } + list := makeList() + filterACLWithAuthorizer(logger, authz, list) - // Chain on access to the node. - policy, err = acl.NewPolicyFromSource(` -node "node1" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } + require.Len(list.ServiceNodes, 1) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) - // Now it should go through. - { - nodes := fill() - filt := newACLFilter(perms, nil) - filt.filterServiceNodes(&nodes) - if len(nodes) != 1 { - t.Fatalf("bad: %#v", nodes) - } - } + t.Run("allowed to read the service, but not the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.ServiceNodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.ServiceNodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_filterNodeServices(t *testing.T) { t.Parallel() - // Create some node services. - fill := func() *structs.NodeServices { - return &structs.NodeServices{ - Node: &structs.Node{ - Node: "node1", - }, - Services: map[string]*structs.NodeService{ - "foo": { - ID: "foo", - Service: "foo", - }, - }, - } - } - // Try nil, which is a possible input. - { - var services *structs.NodeServices - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterNodeServices(&services) - if services != nil { - t.Fatalf("bad: %#v", services) - } - } + logger := hclog.NewNullLogger() - // Try permissive filtering. - { - services := fill() - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterNodeServices(&services) - if len(services.Services) != 1 { - t.Fatalf("bad: %#v", services.Services) - } - } - - // Try restrictive filtering. - { - services := fill() - filt := newACLFilter(acl.DenyAll(), nil) - filt.filterNodeServices(&services) - if services != nil { - t.Fatalf("bad: %#v", *services) - } - } - - // Allowed to see the service but not the node. - policy, err := acl.NewPolicyFromSource(` -service "foo" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Node will block it. - { - services := fill() - filt := newACLFilter(perms, nil) - filt.filterNodeServices(&services) - if services != nil { - t.Fatalf("bad: %#v", services) - } - } - - // Chain on access to the node. - policy, err = acl.NewPolicyFromSource(` -node "node1" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Now it should go through. - { - services := fill() - filt := newACLFilter(perms, nil) - filt.filterNodeServices(&services) - if len((*services).Services) != 1 { - t.Fatalf("bad: %#v", (*services).Services) - } - } -} - -func TestACL_filterCheckServiceNodes(t *testing.T) { - t.Parallel() - // Create some nodes. - fill := func() structs.CheckServiceNodes { - return structs.CheckServiceNodes{ - structs.CheckServiceNode{ + makeList := func() *structs.IndexedNodeServices { + return &structs.IndexedNodeServices{ + NodeServices: &structs.NodeServices{ Node: &structs.Node{ Node: "node1", }, - Service: &structs.NodeService{ - ID: "foo", - Service: "foo", - }, - Checks: structs.HealthChecks{ - &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - ServiceName: "foo", + Services: map[string]*structs.NodeService{ + "foo": { + ID: "foo", + Service: "foo", }, }, }, } } - // Try permissive filtering. - { - nodes := fill() - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterCheckServiceNodes(&nodes) - if len(nodes) != 1 { - t.Fatalf("bad: %#v", nodes) - } - if len(nodes[0].Checks) != 1 { - t.Fatalf("bad: %#v", nodes[0].Checks) - } - } + t.Run("nil input", func(t *testing.T) { + require := require.New(t) - // Try restrictive filtering. - { - nodes := fill() - filt := newACLFilter(acl.DenyAll(), nil) - filt.filterCheckServiceNodes(&nodes) - if len(nodes) != 0 { - t.Fatalf("bad: %#v", nodes) + list := &structs.IndexedNodeServices{ + NodeServices: nil, } - } + filterACLWithAuthorizer(logger, acl.AllowAll(), list) - // Allowed to see the service but not the node. - policy, err := acl.NewPolicyFromSource(` -service "foo" { - policy = "read" + require.Nil(list.NodeServices) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.NodeServices.Services, 1) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("allowed to read the service, but not the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Nil(list.NodeServices) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.NodeServices.Services) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Nil(list.NodeServices) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - { - nodes := fill() - filt := newACLFilter(perms, nil) - filt.filterCheckServiceNodes(&nodes) - if len(nodes) != 0 { - t.Fatalf("bad: %#v", nodes) +func TestACL_filterNodeServiceList(t *testing.T) { + t.Parallel() + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedNodeServiceList { + return &structs.IndexedNodeServiceList{ + NodeServices: structs.NodeServiceList{ + Node: &structs.Node{ + Node: "node1", + }, + Services: []*structs.NodeService{ + {Service: "foo"}, + }, + }, } } - // Chain on access to the node. - policy, err = acl.NewPolicyFromSource(` -node "node1" { - policy = "read" + t.Run("empty NodeServices", func(t *testing.T) { + require := require.New(t) + + var list structs.IndexedNodeServiceList + filterACLWithAuthorizer(logger, acl.AllowAll(), &list) + + require.Empty(list) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.NodeServices.Services, 1) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("allowed to read the service, but not the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.NodeServices) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.NotEmpty(list.NodeServices.Node) + require.Empty(list.NodeServices.Services) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.NodeServices) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) + +func TestACL_filterGatewayServices(t *testing.T) { + t.Parallel() + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedGatewayServices { + return &structs.IndexedGatewayServices{ + Services: structs.GatewayServices{ + {Service: structs.ServiceName{Name: "foo"}}, + }, + } } - // Now it should go through. - { - nodes := fill() - filt := newACLFilter(perms, nil) - filt.filterCheckServiceNodes(&nodes) - if len(nodes) != 1 { - t.Fatalf("bad: %#v", nodes) - } - if len(nodes[0].Checks) != 1 { - t.Fatalf("bad: %#v", nodes[0].Checks) + t.Run("allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Services, 1) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Services) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) +} + +func TestACL_filterCheckServiceNodes(t *testing.T) { + t.Parallel() + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedCheckServiceNodes { + return &structs.IndexedCheckServiceNodes{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Node: "node1", + }, + Service: &structs.NodeService{ + ID: "foo", + Service: "foo", + }, + Checks: structs.HealthChecks{ + { + Node: "node1", + CheckID: "check1", + ServiceName: "foo", + }, + }, + }, + }, } } + + t.Run("allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Nodes, 1) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("allowed to read the service, but not the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Nodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Nodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Nodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) +} + +func TestACL_filterPreparedQueryExecuteResponse(t *testing.T) { + t.Parallel() + + logger := hclog.NewNullLogger() + + makeList := func() *structs.PreparedQueryExecuteResponse { + return &structs.PreparedQueryExecuteResponse{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Node: "node1", + }, + Service: &structs.NodeService{ + ID: "foo", + Service: "foo", + }, + Checks: structs.HealthChecks{ + { + Node: "node1", + CheckID: "check1", + ServiceName: "foo", + }, + }, + }, + }, + } + } + + t.Run("allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Nodes, 1) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("allowed to read the service, but not the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Nodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Nodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Nodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_filterServiceTopology(t *testing.T) { @@ -2732,167 +3016,222 @@ service "bar" { func TestACL_filterCoordinates(t *testing.T) { t.Parallel() - // Create some coordinates. - coords := structs.Coordinates{ - &structs.Coordinate{ - Node: "node1", - Coord: generateRandomCoordinate(), - }, - &structs.Coordinate{ - Node: "node2", - Coord: generateRandomCoordinate(), - }, + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedCoordinates { + return &structs.IndexedCoordinates{ + Coordinates: structs.Coordinates{ + {Node: "node1", Coord: generateRandomCoordinate()}, + {Node: "node2", Coord: generateRandomCoordinate()}, + }, + } } - // Try permissive filtering. - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterCoordinates(&coords) - if len(coords) != 2 { - t.Fatalf("bad: %#v", coords) - } + t.Run("allowed", func(t *testing.T) { + require := require.New(t) - // Try restrictive filtering - filt = newACLFilter(acl.DenyAll(), nil) - filt.filterCoordinates(&coords) - if len(coords) != 0 { - t.Fatalf("bad: %#v", coords) - } + list := makeList() + filterACLWithAuthorizer(logger, acl.AllowAll(), list) + + require.Len(list.Coordinates, 2) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("allowed to read one node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Coordinates, 1) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Coordinates) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_filterSessions(t *testing.T) { t.Parallel() - // Create a session list. - sessions := structs.Sessions{ - &structs.Session{ - Node: "foo", - }, - &structs.Session{ - Node: "bar", - }, + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedSessions { + return &structs.IndexedSessions{ + Sessions: structs.Sessions{ + {Node: "foo"}, + {Node: "bar"}, + }, + } } - // Try permissive filtering. - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterSessions(&sessions) - if len(sessions) != 2 { - t.Fatalf("bad: %#v", sessions) - } + t.Run("all allowed", func(t *testing.T) { + require := require.New(t) - // Try restrictive filtering - filt = newACLFilter(acl.DenyAll(), nil) - filt.filterSessions(&sessions) - if len(sessions) != 0 { - t.Fatalf("bad: %#v", sessions) - } + list := makeList() + filterACLWithAuthorizer(logger, acl.AllowAll(), list) + + require.Len(list.Sessions, 2) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("just one node's sessions allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + session "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Sessions, 1) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Sessions) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_filterNodeDump(t *testing.T) { t.Parallel() - // Create a node dump. - fill := func() structs.NodeDump { - return structs.NodeDump{ - &structs.NodeInfo{ - Node: "node1", - Services: []*structs.NodeService{ - { - ID: "foo", - Service: "foo", + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedNodeDump { + return &structs.IndexedNodeDump{ + Dump: structs.NodeDump{ + { + Node: "node1", + Services: []*structs.NodeService{ + { + ID: "foo", + Service: "foo", + }, }, - }, - Checks: []*structs.HealthCheck{ - { - Node: "node1", - CheckID: "check1", - ServiceName: "foo", + Checks: []*structs.HealthCheck{ + { + Node: "node1", + CheckID: "check1", + ServiceName: "foo", + }, }, }, }, } } - // Try permissive filtering. - { - dump := fill() - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterNodeDump(&dump) - if len(dump) != 1 { - t.Fatalf("bad: %#v", dump) - } - if len(dump[0].Services) != 1 { - t.Fatalf("bad: %#v", dump[0].Services) - } - if len(dump[0].Checks) != 1 { - t.Fatalf("bad: %#v", dump[0].Checks) - } - } + t.Run("allowed", func(t *testing.T) { + require := require.New(t) - // Try restrictive filtering. - { - dump := fill() - filt := newACLFilter(acl.DenyAll(), nil) - filt.filterNodeDump(&dump) - if len(dump) != 0 { - t.Fatalf("bad: %#v", dump) - } - } + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) - // Allowed to see the service but not the node. - policy, err := acl.NewPolicyFromSource(` -service "foo" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) - // But the node will block it. - { - dump := fill() - filt := newACLFilter(perms, nil) - filt.filterNodeDump(&dump) - if len(dump) != 0 { - t.Fatalf("bad: %#v", dump) - } - } + list := makeList() + filterACLWithAuthorizer(logger, authz, list) - // Chain on access to the node. - policy, err = acl.NewPolicyFromSource(` -node "node1" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } + require.Len(list.Dump, 1) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) - // Now it should go through. - { - dump := fill() - filt := newACLFilter(perms, nil) - filt.filterNodeDump(&dump) - if len(dump) != 1 { - t.Fatalf("bad: %#v", dump) - } - if len(dump[0].Services) != 1 { - t.Fatalf("bad: %#v", dump[0].Services) - } - if len(dump[0].Checks) != 1 { - t.Fatalf("bad: %#v", dump[0].Checks) - } - } + t.Run("allowed to read the service, but not the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Dump) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Dump, 1) + require.Empty(list.Dump[0].Services) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Dump) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_filterNodes(t *testing.T) { t.Parallel() + + require := require.New(t) + // Create a nodes list. nodes := structs.Nodes{ &structs.Node{ @@ -2905,117 +3244,389 @@ func TestACL_filterNodes(t *testing.T) { // Try permissive filtering. filt := newACLFilter(acl.AllowAll(), nil) - filt.filterNodes(&nodes) - if len(nodes) != 2 { - t.Fatalf("bad: %#v", nodes) - } + removed := filt.filterNodes(&nodes) + require.False(removed) + require.Len(nodes, 2) // Try restrictive filtering filt = newACLFilter(acl.DenyAll(), nil) - filt.filterNodes(&nodes) - if len(nodes) != 0 { - t.Fatalf("bad: %#v", nodes) + removed = filt.filterNodes(&nodes) + require.True(removed) + require.Len(nodes, 0) +} + +func TestACL_filterIndexedNodesWithGateways(t *testing.T) { + t.Parallel() + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedNodesWithGateways { + return &structs.IndexedNodesWithGateways{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Node: "node1", + }, + Service: &structs.NodeService{ + ID: "foo", + Service: "foo", + }, + Checks: structs.HealthChecks{ + { + Node: "node1", + CheckID: "check1", + ServiceName: "foo", + }, + }, + }, + }, + Gateways: structs.GatewayServices{ + {Service: structs.ServiceNameFromString("foo")}, + {Service: structs.ServiceNameFromString("bar")}, + }, + } } + + t.Run("allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + service "bar" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Nodes, 1) + require.Len(list.Gateways, 2) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("not allowed to read the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + service "bar" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Nodes) + require.Len(list.Gateways, 2) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + service "bar" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Nodes) + require.Len(list.Gateways, 1) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("not allowed to read the other gatway service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Nodes, 1) + require.Len(list.Gateways, 1) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Nodes) + require.Empty(list.Gateways) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) +} + +func TestACL_filterIndexedServiceDump(t *testing.T) { + t.Parallel() + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedServiceDump { + return &structs.IndexedServiceDump{ + Dump: structs.ServiceDump{ + { + Node: &structs.Node{ + Node: "node1", + }, + Service: &structs.NodeService{ + Service: "foo", + }, + GatewayService: &structs.GatewayService{ + Service: structs.ServiceNameFromString("foo"), + Gateway: structs.ServiceNameFromString("foo-gateway"), + }, + }, + // No node information. + { + GatewayService: &structs.GatewayService{ + Service: structs.ServiceNameFromString("bar"), + Gateway: structs.ServiceNameFromString("bar-gateway"), + }, + }, + }, + } + } + + t.Run("allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + service_prefix "foo" { + policy = "read" + } + service_prefix "bar" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Dump, 2) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("not allowed to access node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service_prefix "foo" { + policy = "read" + } + service_prefix "bar" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Dump, 1) + require.Equal("bar", list.Dump[0].GatewayService.Service.Name) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("not allowed to access service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + service "foo-gateway" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Dump) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("not allowed to access gateway", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + service "foo" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Dump) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { t.Parallel() - // Create some data. - fixture := map[string]structs.CheckServiceNodes{ - "dc1": []structs.CheckServiceNode{ - newTestMeshGatewayNode( - "dc1", "gateway1a", "1.2.3.4", 5555, map[string]string{structs.MetaWANFederationKey: "1"}, api.HealthPassing, - ), - newTestMeshGatewayNode( - "dc1", "gateway2a", "4.3.2.1", 9999, map[string]string{structs.MetaWANFederationKey: "1"}, api.HealthPassing, - ), - }, - "dc2": []structs.CheckServiceNode{ - newTestMeshGatewayNode( - "dc2", "gateway1b", "5.6.7.8", 9999, map[string]string{structs.MetaWANFederationKey: "1"}, api.HealthPassing, - ), - newTestMeshGatewayNode( - "dc2", "gateway2b", "8.7.6.5", 1111, map[string]string{structs.MetaWANFederationKey: "1"}, api.HealthPassing, - ), - }, + + logger := hclog.NewNullLogger() + + makeList := func() *structs.DatacenterIndexedCheckServiceNodes { + return &structs.DatacenterIndexedCheckServiceNodes{ + DatacenterNodes: map[string]structs.CheckServiceNodes{ + "dc1": []structs.CheckServiceNode{ + newTestMeshGatewayNode( + "dc1", "gateway1a", "1.2.3.4", 5555, map[string]string{structs.MetaWANFederationKey: "1"}, api.HealthPassing, + ), + newTestMeshGatewayNode( + "dc1", "gateway2a", "4.3.2.1", 9999, map[string]string{structs.MetaWANFederationKey: "1"}, api.HealthPassing, + ), + }, + "dc2": []structs.CheckServiceNode{ + newTestMeshGatewayNode( + "dc2", "gateway1b", "5.6.7.8", 9999, map[string]string{structs.MetaWANFederationKey: "1"}, api.HealthPassing, + ), + newTestMeshGatewayNode( + "dc2", "gateway2b", "8.7.6.5", 1111, map[string]string{structs.MetaWANFederationKey: "1"}, api.HealthPassing, + ), + }, + }, + } } - fill := func(t *testing.T) map[string]structs.CheckServiceNodes { - t.Helper() - dup, err := copystructure.Copy(fixture) - require.NoError(t, err) - return dup.(map[string]structs.CheckServiceNodes) - } + t.Run("allowed", func(t *testing.T) { + require := require.New(t) - // Try permissive filtering. - { - dcNodes := fill(t) - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterDatacenterCheckServiceNodes(&dcNodes) - require.Len(t, dcNodes, 2) - require.Equal(t, fill(t), dcNodes) - } + policy, err := acl.NewPolicyFromSource(` + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) - // Try restrictive filtering. - { - dcNodes := fill(t) - filt := newACLFilter(acl.DenyAll(), nil) - filt.filterDatacenterCheckServiceNodes(&dcNodes) - require.Len(t, dcNodes, 0) - } + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) - var ( - policy *acl.Policy - err error - perms acl.Authorizer - ) - // Allowed to see the service but not the node. - policy, err = acl.NewPolicyFromSource(` - service_prefix "" { policy = "read" } - `, acl.SyntaxCurrent, nil, nil) - require.NoError(t, err) - perms, err = acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) - require.NoError(t, err) + list := makeList() + filterACLWithAuthorizer(logger, authz, list) - { - dcNodes := fill(t) - filt := newACLFilter(perms, nil) - filt.filterDatacenterCheckServiceNodes(&dcNodes) - require.Len(t, dcNodes, 0) - } + require.Len(list.DatacenterNodes["dc1"], 2) + require.Len(list.DatacenterNodes["dc2"], 2) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) - // Allowed to see the node but not the service. - policy, err = acl.NewPolicyFromSource(` - node_prefix "" { policy = "read" } - `, acl.SyntaxCurrent, nil, nil) - require.NoError(t, err) - perms, err = acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) - require.NoError(t, err) + t.Run("allowed to read the service, but not the node", func(t *testing.T) { + require := require.New(t) - { - dcNodes := fill(t) - filt := newACLFilter(perms, nil) - filt.filterDatacenterCheckServiceNodes(&dcNodes) - require.Len(t, dcNodes, 0) - } + policy, err := acl.NewPolicyFromSource(` + service_prefix "" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) - // Allowed to see the service AND the node - policy, err = acl.NewPolicyFromSource(` - service_prefix "" { policy = "read" } - node_prefix "" { policy = "read" } - `, acl.SyntaxCurrent, nil, nil) - require.NoError(t, err) - _, err = acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) - require.NoError(t, err) + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) - // Now it should go through. - { - dcNodes := fill(t) - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterDatacenterCheckServiceNodes(&dcNodes) - require.Len(t, dcNodes, 2) - require.Equal(t, fill(t), dcNodes) - } + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.DatacenterNodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node_prefix "" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.DatacenterNodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.DatacenterNodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_redactPreparedQueryTokens(t *testing.T) { @@ -3113,70 +3724,130 @@ func TestFilterACL_redactTokenSecrets(t *testing.T) { func TestACL_filterPreparedQueries(t *testing.T) { t.Parallel() - queries := structs.PreparedQueries{ - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d1", - }, - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d2", - Name: "query-with-no-token", - }, - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d3", - Name: "query-with-a-token", - Token: "root", - }, + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedPreparedQueries { + return &structs.IndexedPreparedQueries{ + Queries: structs.PreparedQueries{ + {ID: "f004177f-2c28-83b7-4229-eacc25fe55d1"}, + { + ID: "f004177f-2c28-83b7-4229-eacc25fe55d2", + Name: "query-with-no-token", + }, + { + ID: "f004177f-2c28-83b7-4229-eacc25fe55d3", + Name: "query-with-a-token", + Token: "root", + }, + }, + } } - expected := structs.PreparedQueries{ - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d1", - }, - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d2", - Name: "query-with-no-token", - }, - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d3", - Name: "query-with-a-token", - Token: "root", - }, + t.Run("management token", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.ManageAll(), list) + + // Check we get the un-named query. + require.Len(list.Queries, 3) + + // Check we get the un-redacted token. + require.Equal("root", list.Queries[2].Token) + + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("permissive filtering", func(t *testing.T) { + require := require.New(t) + + list := makeList() + queryWithToken := list.Queries[2] + + filterACLWithAuthorizer(logger, acl.AllowAll(), list) + + // Check the un-named query is filtered out. + require.Len(list.Queries, 2) + + // Check the token is redacted. + require.Equal(redactedToken, list.Queries[1].Token) + + // Check the original object is unmodified. + require.Equal("root", queryWithToken.Token) + + // ResultsFilteredByACLs should not include un-named queries, which are only + // readable by a management token. + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("limited access", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + query "query-with-a-token" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + // Check we only get the query we have access to. + require.Len(list.Queries, 1) + + // Check the token is redacted. + require.Equal(redactedToken, list.Queries[0].Token) + + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("restrictive filtering", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Queries) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) +} + +func TestACL_filterServiceList(t *testing.T) { + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedServiceList { + return &structs.IndexedServiceList{ + Services: structs.ServiceList{ + {Name: "foo"}, + {Name: "bar"}, + }, + } } - // Try permissive filtering with a management token. This will allow the - // embedded token to be seen. - filt := newACLFilter(acl.ManageAll(), nil) - filt.filterPreparedQueries(&queries) - if !reflect.DeepEqual(queries, expected) { - t.Fatalf("bad: %#v", queries) - } + t.Run("permissive filtering", func(t *testing.T) { + require := require.New(t) - // Hang on to the entry with a token, which needs to survive the next - // operation. - original := queries[2] + list := makeList() + filterACLWithAuthorizer(logger, acl.AllowAll(), list) - // Now try permissive filtering with a client token, which should cause - // the embedded token to get redacted, and the query with no name to get - // filtered out. - filt = newACLFilter(acl.AllowAll(), nil) - filt.filterPreparedQueries(&queries) - expected[2].Token = redactedToken - expected = append(structs.PreparedQueries{}, expected[1], expected[2]) - if !reflect.DeepEqual(queries, expected) { - t.Fatalf("bad: %#v", queries) - } + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + require.Len(list.Services, 2) + }) - // Make sure that the original object didn't lose its token. - if original.Token != "root" { - t.Fatalf("bad token: %s", original.Token) - } + t.Run("restrictive filtering", func(t *testing.T) { + require := require.New(t) - // Now try restrictive filtering. - filt = newACLFilter(acl.DenyAll(), nil) - filt.filterPreparedQueries(&queries) - if len(queries) != 0 { - t.Fatalf("bad: %#v", queries) - } + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + require.Empty(list.Services) + }) } func TestACL_unhandledFilterType(t *testing.T) { @@ -3349,7 +4020,7 @@ func TestACLResolver_AgentMaster(t *testing.T) { cfg.DisableDuration = 0 }) - tokens.UpdateAgentMasterToken("9a184a11-5599-459e-b71a-550e5f9a5a23", token.TokenSourceConfig) + tokens.UpdateAgentRecoveryToken("9a184a11-5599-459e-b71a-550e5f9a5a23", token.TokenSourceConfig) ident, authz, err := r.ResolveTokenToIdentityAndAuthorizer("9a184a11-5599-459e-b71a-550e5f9a5a23") require.NoError(t, err) diff --git a/agent/consul/acl_token_exp_test.go b/agent/consul/acl_token_exp_test.go index 6bb3f6ce9..17e8622c1 100644 --- a/agent/consul/acl_token_exp_test.go +++ b/agent/consul/acl_token_exp_test.go @@ -44,7 +44,7 @@ func testACLTokenReap_Primary(t *testing.T, local, global bool) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLTokenMinExpirationTTL = 10 * time.Millisecond c.ACLTokenMaxExpirationTTL = 8 * time.Second }) diff --git a/agent/consul/authmethod/authmethods_oss.go b/agent/consul/authmethod/authmethods_oss.go index ca0b73046..0e5cc5a7f 100644 --- a/agent/consul/authmethod/authmethods_oss.go +++ b/agent/consul/authmethod/authmethods_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package authmethod diff --git a/agent/consul/authmethod/kubeauth/k8s_oss.go b/agent/consul/authmethod/kubeauth/k8s_oss.go index f87ca502f..b3d74361e 100644 --- a/agent/consul/authmethod/kubeauth/k8s_oss.go +++ b/agent/consul/authmethod/kubeauth/k8s_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package kubeauth diff --git a/agent/consul/authmethod/kubeauth/testing.go b/agent/consul/authmethod/kubeauth/testing.go index 7a61804fd..4b15378fd 100644 --- a/agent/consul/authmethod/kubeauth/testing.go +++ b/agent/consul/authmethod/kubeauth/testing.go @@ -4,20 +4,16 @@ import ( "bytes" "encoding/json" "encoding/pem" - "fmt" "io/ioutil" "log" - "net" "net/http" "net/http/httptest" "net/url" "regexp" - "strconv" "strings" "sync" "time" - "github.com/hashicorp/consul/sdk/freeport" "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" authv1 "k8s.io/api/authentication/v1" @@ -33,9 +29,8 @@ import ( // - GET /api/v1/namespaces//serviceaccounts/ // type TestAPIServer struct { - srv *httptest.Server - caCert string - returnFunc func() + srv *httptest.Server + caCert string mu sync.Mutex authorizedJWT string // token review and sa read @@ -48,12 +43,7 @@ type TestAPIServer struct { // random free port. func StartTestAPIServer(t testing.T) *TestAPIServer { s := &TestAPIServer{} - - ports := freeport.MustTake(1) - s.returnFunc = func() { - freeport.Return(ports) - } - s.srv = httptestNewUnstartedServerWithPort(s, ports[0]) + s.srv = httptest.NewUnstartedServer(s) s.srv.Config.ErrorLog = log.New(ioutil.Discard, "", 0) s.srv.StartTLS() @@ -101,10 +91,6 @@ func (s *TestAPIServer) SetAllowedServiceAccount( // Stop stops the running TestAPIServer. func (s *TestAPIServer) Stop() { s.srv.Close() - if s.returnFunc != nil { - s.returnFunc() - s.returnFunc = nil - } } // Addr returns the current base URL for the running webserver. @@ -545,19 +531,3 @@ func createStatus(status, message string, reason metav1.StatusReason, details *m Code: code, } } - -func httptestNewUnstartedServerWithPort(handler http.Handler, port int) *httptest.Server { - if port == 0 { - return httptest.NewUnstartedServer(handler) - } - addr := net.JoinHostPort("127.0.0.1", strconv.Itoa(port)) - l, err := net.Listen("tcp", addr) - if err != nil { - panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err)) - } - - return &httptest.Server{ - Listener: l, - Config: &http.Server{Handler: handler}, - } -} diff --git a/agent/consul/authmethod/ssoauth/sso_oss.go b/agent/consul/authmethod/ssoauth/sso_oss.go index 358d1eff5..2f6bbe12a 100644 --- a/agent/consul/authmethod/ssoauth/sso_oss.go +++ b/agent/consul/authmethod/ssoauth/sso_oss.go @@ -1,4 +1,5 @@ -//+build !consulent +//go:build !consulent +// +build !consulent package ssoauth diff --git a/agent/consul/authmethod/ssoauth/sso_test.go b/agent/consul/authmethod/ssoauth/sso_test.go index 1623f0904..39501b026 100644 --- a/agent/consul/authmethod/ssoauth/sso_test.go +++ b/agent/consul/authmethod/ssoauth/sso_test.go @@ -5,14 +5,14 @@ import ( "testing" "time" - "github.com/hashicorp/consul/agent/consul/authmethod" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" - "github.com/hashicorp/consul/sdk/freeport" - "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" "gopkg.in/square/go-jose.v2/jwt" + + "github.com/hashicorp/consul/agent/consul/authmethod" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" + "github.com/hashicorp/consul/sdk/testutil" ) func TestJWT_NewValidator(t *testing.T) { @@ -32,7 +32,7 @@ func TestJWT_NewValidator(t *testing.T) { return method } - oidcServer := startTestServer(t) + oidcServer := oidcauthtest.Start(t) // Note that we won't test ALL of the available config variations here. // The go-sso library has exhaustive tests. @@ -110,7 +110,7 @@ func TestJWT_ValidateLogin(t *testing.T) { return v } - oidcServer := startTestServer(t) + oidcServer := oidcauthtest.Start(t) pubKey, privKey := oidcServer.SigningKeys() cases := map[string]struct { @@ -260,11 +260,3 @@ func kv(a ...string) map[string]string { } return m } - -func startTestServer(t *testing.T) *oidcauthtest.Server { - ports := freeport.MustTake(1) - return oidcauthtest.Start(t, oidcauthtest.WithPort( - ports[0], - func() { freeport.Return(ports) }, - )) -} diff --git a/agent/consul/authmethod/testauth/testing_oss.go b/agent/consul/authmethod/testauth/testing_oss.go index 5ae0c8124..a3c9b4382 100644 --- a/agent/consul/authmethod/testauth/testing_oss.go +++ b/agent/consul/authmethod/testauth/testing_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package testauth diff --git a/agent/consul/auto_config_backend.go b/agent/consul/auto_config_backend.go index 3274000d1..aef6ad7ba 100644 --- a/agent/consul/auto_config_backend.go +++ b/agent/consul/auto_config_backend.go @@ -31,13 +31,16 @@ func (b autoConfigBackend) GetCARoots() (*structs.IndexedCARoots, error) { // DatacenterJoinAddresses will return all the strings suitable for usage in // retry join operations to connect to the the LAN or LAN segment gossip pool. -func (b autoConfigBackend) DatacenterJoinAddresses(segment string) ([]string, error) { +func (b autoConfigBackend) DatacenterJoinAddresses(partition, segment string) ([]string, error) { members, err := b.Server.LANMembers(LANMemberFilter{ Segment: segment, - Partition: "", // TODO(partitions): figure out what goes here + Partition: partition, }) if err != nil { - return nil, fmt.Errorf("Failed to retrieve members for segment %s - %w", segment, err) + if segment != "" { + return nil, fmt.Errorf("Failed to retrieve members for segment %s: %w", segment, err) + } + return nil, fmt.Errorf("Failed to retrieve members for partition %s: %w", structs.PartitionOrDefault(partition), err) } var joinAddrs []string diff --git a/agent/consul/auto_config_backend_test.go b/agent/consul/auto_config_backend_test.go index 2e82e8882..f5078494b 100644 --- a/agent/consul/auto_config_backend_test.go +++ b/agent/consul/auto_config_backend_test.go @@ -27,7 +27,7 @@ func TestAutoConfigBackend_DatacenterJoinAddresses(t *testing.T) { } backend := autoConfigBackend{Server: nodes.Servers[0]} - actual, err := backend.DatacenterJoinAddresses("") + actual, err := backend.DatacenterJoinAddresses("", "") require.NoError(t, err) require.ElementsMatch(t, expected, actual) } diff --git a/agent/consul/auto_config_endpoint.go b/agent/consul/auto_config_endpoint.go index cbcf7ac85..c0b92ec67 100644 --- a/agent/consul/auto_config_endpoint.go +++ b/agent/consul/auto_config_endpoint.go @@ -25,11 +25,16 @@ import ( type AutoConfigOptions struct { NodeName string SegmentName string + Partition string CSR *x509.CertificateRequest SpiffeID *connect.SpiffeIDAgent } +func (opts AutoConfigOptions) PartitionOrDefault() string { + return structs.PartitionOrDefault(opts.Partition) +} + type AutoConfigAuthorizer interface { // Authorizes the request and returns a struct containing the various // options for how to generate the configuration. @@ -57,8 +62,9 @@ func (a *jwtAuthorizer) Authorize(req *pbautoconf.AutoConfigRequest) (AutoConfig } varMap := map[string]string{ - "node": req.Node, - "segment": req.Segment, + "node": req.Node, + "segment": req.Segment, + "partition": req.PartitionOrDefault(), } for _, raw := range a.claimAssertions { @@ -86,6 +92,7 @@ func (a *jwtAuthorizer) Authorize(req *pbautoconf.AutoConfigRequest) (AutoConfig opts := AutoConfigOptions{ NodeName: req.Node, SegmentName: req.Segment, + Partition: req.Partition, } if req.CSR != "" { @@ -94,8 +101,12 @@ func (a *jwtAuthorizer) Authorize(req *pbautoconf.AutoConfigRequest) (AutoConfig return AutoConfigOptions{}, err } - if id.Agent != req.Node { - return AutoConfigOptions{}, fmt.Errorf("Spiffe ID agent name (%s) of the certificate signing request is not for the correct node (%s)", id.Agent, req.Node) + if id.Agent != req.Node || !structs.EqualPartitions(id.Partition, req.Partition) { + return AutoConfigOptions{}, + fmt.Errorf("Spiffe ID agent name (%s) of the certificate signing request is not for the correct node (%s)", + printNodeName(id.Agent, id.Partition), + printNodeName(req.Node, req.Partition), + ) } opts.CSR = csr @@ -107,7 +118,7 @@ func (a *jwtAuthorizer) Authorize(req *pbautoconf.AutoConfigRequest) (AutoConfig type AutoConfigBackend interface { CreateACLToken(template *structs.ACLToken) (*structs.ACLToken, error) - DatacenterJoinAddresses(segment string) ([]string, error) + DatacenterJoinAddresses(partition, segment string) ([]string, error) ForwardRPC(method string, info structs.RPCInfo, reply interface{}) (bool, error) GetCARoots() (*structs.IndexedCARoots, error) SignCertificate(csr *x509.CertificateRequest, id connect.CertURI) (*structs.IssuedCert, error) @@ -200,7 +211,7 @@ func (ac *AutoConfig) updateACLsInConfig(opts AutoConfigOptions, resp *pbautocon if ac.config.ACLsEnabled { // set up the token template - the ids and create template := structs.ACLToken{ - Description: fmt.Sprintf("Auto Config Token for Node %q", opts.NodeName), + Description: fmt.Sprintf("Auto Config Token for Node %q", printNodeName(opts.NodeName, opts.Partition)), Local: true, NodeIdentities: []*structs.ACLNodeIdentity{ { @@ -208,13 +219,12 @@ func (ac *AutoConfig) updateACLsInConfig(opts AutoConfigOptions, resp *pbautocon Datacenter: ac.config.Datacenter, }, }, - // TODO(partitions): support auto-config in different partitions - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(opts.PartitionOrDefault()), } token, err := ac.backend.CreateACLToken(&template) if err != nil { - return fmt.Errorf("Failed to generate an ACL token for node %q - %w", opts.NodeName, err) + return fmt.Errorf("Failed to generate an ACL token for node %q: %w", printNodeName(opts.NodeName, opts.Partition), err) } acl.Tokens = &pbconfig.ACLTokens{Agent: token.SecretID} @@ -227,7 +237,7 @@ func (ac *AutoConfig) updateACLsInConfig(opts AutoConfigOptions, resp *pbautocon // updateJoinAddressesInConfig determines the correct gossip endpoints that clients should // be connecting to for joining the cluster based on the segment given in the opts parameter. func (ac *AutoConfig) updateJoinAddressesInConfig(opts AutoConfigOptions, resp *pbautoconf.AutoConfigResponse) error { - joinAddrs, err := ac.backend.DatacenterJoinAddresses(opts.SegmentName) + joinAddrs, err := ac.backend.DatacenterJoinAddresses(opts.Partition, opts.SegmentName) if err != nil { return err } @@ -299,6 +309,7 @@ func (ac *AutoConfig) baseConfig(opts AutoConfigOptions, resp *pbautoconf.AutoCo resp.Config.PrimaryDatacenter = ac.config.PrimaryDatacenter resp.Config.NodeName = opts.NodeName resp.Config.SegmentName = opts.SegmentName + resp.Config.Partition = opts.Partition return nil } @@ -422,3 +433,10 @@ func mapstructureTranslateToProtobuf(in interface{}, out interface{}) error { return decoder.Decode(in) } + +func printNodeName(nodeName, partition string) string { + if structs.IsDefaultPartition(partition) { + return nodeName + } + return partition + "/" + nodeName +} diff --git a/agent/consul/auto_config_endpoint_test.go b/agent/consul/auto_config_endpoint_test.go index 58b8c063f..f082a70ed 100644 --- a/agent/consul/auto_config_endpoint_test.go +++ b/agent/consul/auto_config_endpoint_test.go @@ -38,8 +38,8 @@ func (m *mockAutoConfigBackend) CreateACLToken(template *structs.ACLToken) (*str return token, ret.Error(1) } -func (m *mockAutoConfigBackend) DatacenterJoinAddresses(segment string) ([]string, error) { - ret := m.Called(segment) +func (m *mockAutoConfigBackend) DatacenterJoinAddresses(partition, segment string) ([]string, error) { + ret := m.Called(partition, segment) // this handles converting an untyped nil to a typed nil addrs, _ := ret.Get(0).([]string) return addrs, ret.Error(1) @@ -215,6 +215,8 @@ func TestAutoConfigInitialConfiguration(t *testing.T) { err string } + defaultEntMeta := structs.DefaultEnterpriseMetaInDefaultPartition() + cases := map[string]testCase{ "wrong-datacenter": { request: pbautoconf.AutoConfigRequest{ @@ -304,6 +306,7 @@ func TestAutoConfigInitialConfiguration(t *testing.T) { expectedID := connect.SpiffeIDAgent{ Host: roots.TrustDomain, Agent: "test-node", + Partition: defaultEntMeta.PartitionOrDefault(), Datacenter: "dc1", } @@ -836,7 +839,7 @@ func TestAutoConfig_updateACLsInConfig(t *testing.T) { func TestAutoConfig_updateJoinAddressesInConfig(t *testing.T) { addrs := []string{"198.18.0.7:8300", "198.18.0.1:8300"} backend := &mockAutoConfigBackend{} - backend.On("DatacenterJoinAddresses", "").Return(addrs, nil).Once() + backend.On("DatacenterJoinAddresses", "", "").Return(addrs, nil).Once() ac := AutoConfig{backend: backend} diff --git a/agent/consul/autopilot_oss.go b/agent/consul/autopilot_oss.go index 09a1f76fc..d84cd2fdf 100644 --- a/agent/consul/autopilot_oss.go +++ b/agent/consul/autopilot_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index a48a8133a..4de46558a 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -71,76 +71,8 @@ type Catalog struct { logger hclog.Logger } -// nodePreApply does the verification of a node before it is applied to Raft. -func nodePreApply(nodeName, nodeID string) error { - if nodeName == "" { - return fmt.Errorf("Must provide node") - } - if nodeID != "" { - if _, err := uuid.ParseUUID(nodeID); err != nil { - return fmt.Errorf("Bad node ID: %v", err) - } - } - - return nil -} - -func servicePreApply(service *structs.NodeService, authz acl.Authorizer, authzCtxFill func(*acl.AuthorizerContext)) error { - // Validate the service. This is in addition to the below since - // the above just hasn't been moved over yet. We should move it over - // in time. - if err := service.Validate(); err != nil { - return err - } - - // If no service id, but service name, use default - if service.ID == "" && service.Service != "" { - service.ID = service.Service - } - - // Verify ServiceName provided if ID. - if service.ID != "" && service.Service == "" { - return fmt.Errorf("Must provide service name with ID") - } - - // Check the service address here and in the agent endpoint - // since service registration isn't synchronous. - if ipaddr.IsAny(service.Address) { - return fmt.Errorf("Invalid service address") - } - - var authzContext acl.AuthorizerContext - authzCtxFill(&authzContext) - - // Apply the ACL policy if any. The 'consul' service is excluded - // since it is managed automatically internally (that behavior - // is going away after version 0.8). We check this same policy - // later if version 0.8 is enabled, so we can eventually just - // delete this and do all the ACL checks down there. - if service.Service != structs.ConsulServiceName { - if authz.ServiceWrite(service.Service, &authzContext) != acl.Allow { - return acl.ErrPermissionDenied - } - } - - // Proxies must have write permission on their destination - if service.Kind == structs.ServiceKindConnectProxy { - if authz.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow { - return acl.ErrPermissionDenied - } - } - - return nil -} - -// checkPreApply does the verification of a check before it is applied to Raft. -func checkPreApply(check *structs.HealthCheck) { - if check.CheckID == "" && check.Name != "" { - check.CheckID = types.CheckID(check.Name) - } -} - -// Register is used register that a node is providing a given service. +// Register a service and/or check(s) in a node, creating the node if it doesn't exist. +// It is valid to pass no service or checks to simply create the node itself. func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error { if done, err := c.srv.ForwardRPC("Catalog.Register", args, reply); done { return err @@ -212,6 +144,75 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error return err } +// nodePreApply does the verification of a node before it is applied to Raft. +func nodePreApply(nodeName, nodeID string) error { + if nodeName == "" { + return fmt.Errorf("Must provide node") + } + if nodeID != "" { + if _, err := uuid.ParseUUID(nodeID); err != nil { + return fmt.Errorf("Bad node ID: %v", err) + } + } + + return nil +} + +func servicePreApply(service *structs.NodeService, authz acl.Authorizer, authzCtxFill func(*acl.AuthorizerContext)) error { + // Validate the service. This is in addition to the below since + // the above just hasn't been moved over yet. We should move it over + // in time. + if err := service.Validate(); err != nil { + return err + } + + // If no service id, but service name, use default + if service.ID == "" && service.Service != "" { + service.ID = service.Service + } + + // Verify ServiceName provided if ID. + if service.ID != "" && service.Service == "" { + return fmt.Errorf("Must provide service name with ID") + } + + // Check the service address here and in the agent endpoint + // since service registration isn't synchronous. + if ipaddr.IsAny(service.Address) { + return fmt.Errorf("Invalid service address") + } + + var authzContext acl.AuthorizerContext + authzCtxFill(&authzContext) + + // Apply the ACL policy if any. The 'consul' service is excluded + // since it is managed automatically internally (that behavior + // is going away after version 0.8). We check this same policy + // later if version 0.8 is enabled, so we can eventually just + // delete this and do all the ACL checks down there. + if service.Service != structs.ConsulServiceName { + if authz.ServiceWrite(service.Service, &authzContext) != acl.Allow { + return acl.ErrPermissionDenied + } + } + + // Proxies must have write permission on their destination + if service.Kind == structs.ServiceKindConnectProxy { + if authz.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow { + return acl.ErrPermissionDenied + } + } + + return nil +} + +// checkPreApply does the verification of a check before it is applied to Raft. +func checkPreApply(check *structs.HealthCheck) { + if check.CheckID == "" && check.Name != "" { + check.CheckID = types.CheckID(check.Name) + } +} + // vetRegisterWithACL applies the given ACL's policy to the catalog update and // determines if it is allowed. Since the catalog register request is so // dynamic, this is a pretty complex algorithm and was worth breaking out of the @@ -330,7 +331,13 @@ func vetRegisterWithACL( return nil } -// Deregister is used to remove a service registration for a given node. +// Deregister a service or check in a node, or the entire node itself. +// +// If a ServiceID is provided in the request, any associated Checks +// with that service are also deregistered. +// +// If a ServiceID or CheckID is not provided in the request, the entire +// node is deregistered. func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) error { if done, err := c.srv.ForwardRPC("Catalog.Deregister", args, reply); done { return err @@ -458,7 +465,7 @@ func (c *Catalog) ListDatacenters(args *structs.DatacentersRequest, reply *[]str return nil } -// ListNodes is used to query the nodes in a DC +// ListNodes is used to query the nodes in a DC. func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.IndexedNodes) error { if done, err := c.srv.ForwardRPC("Catalog.ListNodes", args, reply); done { return err @@ -488,16 +495,19 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde return nil } - if err := c.srv.filterACL(args.Token, reply); err != nil { - return err - } - raw, err := filter.Execute(reply.Nodes) if err != nil { return err } reply.Nodes = raw.(structs.Nodes) + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := c.srv.filterACL(args.Token, reply); err != nil { + return err + } + return c.srv.sortNodesByDistanceFrom(args.Source, reply.Nodes) }) } @@ -506,7 +516,8 @@ func isUnmodified(opts structs.QueryOptions, index uint64) bool { return opts.AllowNotModifiedResponse && opts.MinQueryIndex > 0 && opts.MinQueryIndex == index } -// ListServices is used to query the services in a DC +// ListServices is used to query the services in a DC. +// Returns services as a map of service names to available tags. func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.IndexedServices) error { if done, err := c.srv.ForwardRPC("Catalog.ListServices", args, reply); done { return err @@ -549,6 +560,8 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I }) } +// ServiceList is used to query the services in a DC. +// Returns services as a list of ServiceNames. func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.IndexedServiceList) error { if done, err := c.srv.ForwardRPC("Catalog.ServiceList", args, reply); done { return err @@ -567,7 +580,7 @@ func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.In &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, services, err := state.ServiceList(ws, nil, &args.EnterpriseMeta) + index, services, err := state.ServiceList(ws, &args.EnterpriseMeta) if err != nil { return err } @@ -578,7 +591,7 @@ func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.In }) } -// ServiceNodes returns all the nodes registered as part of a service +// ServiceNodes returns all the nodes registered as part of a service. func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceNodes) error { if done, err := c.srv.ForwardRPC("Catalog.ServiceNodes", args, reply); done { return err @@ -664,18 +677,20 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru reply.ServiceNodes = filtered } - if err := c.srv.filterACL(args.Token, reply); err != nil { - return err - } - // This is safe to do even when the filter is nil - its just a no-op then raw, err := filter.Execute(reply.ServiceNodes) if err != nil { return err } - reply.ServiceNodes = raw.(structs.ServiceNodes) + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := c.srv.filterACL(args.Token, reply); err != nil { + return err + } + return c.srv.sortNodesByDistanceFrom(args.Source, reply.ServiceNodes) }) @@ -716,7 +731,8 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru return err } -// NodeServices returns all the services registered as part of a node +// NodeServices returns all the services registered as part of a node. +// Returns NodeServices as a map of service IDs to services. func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs.IndexedNodeServices) error { if done, err := c.srv.ForwardRPC("Catalog.NodeServices", args, reply); done { return err @@ -750,11 +766,7 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs if err != nil { return err } - reply.Index, reply.NodeServices = index, services - if err := c.srv.filterACL(args.Token, reply); err != nil { - return err - } if reply.NodeServices != nil { raw, err := filter.Execute(reply.NodeServices.Services) @@ -764,10 +776,19 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs reply.NodeServices.Services = raw.(map[string]*structs.NodeService) } + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := c.srv.filterACL(args.Token, reply); err != nil { + return err + } + return nil }) } +// NodeServiceList returns all the services registered as part of a node. +// Returns NodeServices as a list of services. func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *structs.IndexedNodeServiceList) error { if done, err := c.srv.ForwardRPC("Catalog.NodeServiceList", args, reply); done { return err @@ -802,11 +823,8 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru return err } - if err := c.srv.filterACL(args.Token, &services); err != nil { - return err - } - reply.Index = index + if services != nil { reply.NodeServices = *services @@ -817,6 +835,13 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru reply.NodeServices.Services = raw.([]*structs.NodeService) } + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := c.srv.filterACL(args.Token, reply); err != nil { + return err + } + return nil }) } @@ -876,12 +901,35 @@ func (c *Catalog) GatewayServices(args *structs.ServiceSpecificRequest, reply *s if err != nil { return err } + reply.Index, reply.Services = index, services - if err := c.srv.filterACL(args.Token, &services); err != nil { + if err := c.srv.filterACL(args.Token, reply); err != nil { return err } - - reply.Index, reply.Services = index, services return nil }) } + +func (c *Catalog) VirtualIPForService(args *structs.ServiceSpecificRequest, reply *string) error { + if done, err := c.srv.ForwardRPC("Catalog.VirtualIPForService", args, reply); done { + return err + } + + var authzContext acl.AuthorizerContext + authz, err := c.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext) + if err != nil { + return err + } + + if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil { + return err + } + + if authz.ServiceRead(args.ServiceName, &authzContext) != acl.Allow { + return acl.ErrPermissionDenied + } + + state := c.srv.fsm.State() + *reply, err = state.VirtualIPForService(structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta)) + return err +} diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index 4433274ec..8b2e9101e 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -184,7 +184,7 @@ func TestCatalog_Register_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -263,10 +263,10 @@ node "foo" { func createToken(t *testing.T, cc rpc.ClientCodec, policyRules string) string { t.Helper() - return createTokenWithPolicyName(t, "the-policy", cc, policyRules) + return createTokenWithPolicyName(t, cc, "the-policy", policyRules, "root") } -func createTokenWithPolicyName(t *testing.T, policyName string, cc rpc.ClientCodec, policyRules string) string { +func createTokenWithPolicyName(t *testing.T, cc rpc.ClientCodec, policyName string, policyRules string, token string) string { t.Helper() reqPolicy := structs.ACLPolicySetRequest{ @@ -275,25 +275,25 @@ func createTokenWithPolicyName(t *testing.T, policyName string, cc rpc.ClientCod Name: policyName, Rules: policyRules, }, - WriteRequest: structs.WriteRequest{Token: "root"}, + WriteRequest: structs.WriteRequest{Token: token}, } err := msgpackrpc.CallWithCodec(cc, "ACL.PolicySet", &reqPolicy, &structs.ACLPolicy{}) require.NoError(t, err) - token, err := uuid.GenerateUUID() + secretId, err := uuid.GenerateUUID() require.NoError(t, err) reqToken := structs.ACLTokenSetRequest{ Datacenter: "dc1", ACLToken: structs.ACLToken{ - SecretID: token, + SecretID: secretId, Policies: []structs.ACLTokenPolicyLink{{Name: policyName}}, }, - WriteRequest: structs.WriteRequest{Token: "root"}, + WriteRequest: structs.WriteRequest{Token: token}, } err = msgpackrpc.CallWithCodec(cc, "ACL.TokenSet", &reqToken, &structs.ACLToken{}) require.NoError(t, err) - return token + return secretId } func TestCatalog_Register_ForwardLeader(t *testing.T) { @@ -452,7 +452,7 @@ func TestCatalog_Register_ConnectProxy_ACLDestinationServiceName(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -570,7 +570,7 @@ func TestCatalog_Deregister_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1297,7 +1297,7 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1307,42 +1307,48 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) { testrpc.WaitForLeader(t, s1.RPC, "dc1") - // We scope the reply in each of these since msgpack won't clear out an - // existing slice if the incoming one is nil, so it's best to start - // clean each time. + token := func(policy string) string { + rules := fmt.Sprintf( + `node "%s" { policy = "%s" }`, + s1.config.NodeName, + policy, + ) + return createTokenWithPolicyName(t, codec, policy, rules, "root") + } - // The node policy should not be ignored. args := structs.DCSpecificRequest{ Datacenter: "dc1", } - { - reply := structs.IndexedNodes{} + + t.Run("deny", func(t *testing.T) { + args.Token = token("deny") + + var reply structs.IndexedNodes if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil { t.Fatalf("err: %v", err) } if len(reply.Nodes) != 0 { t.Fatalf("bad: %v", reply.Nodes) } - } + if !reply.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } + }) - rules := fmt.Sprintf(` -node "%s" { - policy = "read" -} -`, s1.config.NodeName) - id := createToken(t, codec, rules) + t.Run("allow", func(t *testing.T) { + args.Token = token("read") - // Now try with the token and it will go through. - args.Token = id - { - reply := structs.IndexedNodes{} + var reply structs.IndexedNodes if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil { t.Fatalf("err: %v", err) } if len(reply.Nodes) != 1 { t.Fatalf("bad: %v", reply.Nodes) } - } + if reply.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should not true") + } + }) } func Benchmark_Catalog_ListNodes(t *testing.B) { @@ -1422,6 +1428,7 @@ func TestCatalog_ListServices(t *testing.T) { t.Fatalf("bad: %v", out) } require.False(t, out.QueryMeta.NotModified) + require.False(t, out.QueryMeta.ResultsFilteredByACLs) t.Run("with option AllowNotModifiedResponse", func(t *testing.T) { args.QueryOptions = structs.QueryOptions{ @@ -2402,7 +2409,7 @@ func TestCatalog_ListServiceNodes_ConnectProxy_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2474,6 +2481,7 @@ node "foo" { require.Len(t, resp.ServiceNodes, 1) v := resp.ServiceNodes[0] require.Equal(t, "foo-proxy", v.ServiceName) + require.True(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") } func TestCatalog_ListServiceNodes_ConnectNative(t *testing.T) { @@ -2691,7 +2699,7 @@ func testACLFilterServer(t *testing.T) (dir, token string, srv *Server, codec rp dir, srv = testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) @@ -2742,6 +2750,7 @@ node_prefix "" { CheckID: "service:bar", Name: "service:bar", ServiceID: "bar", + Status: api.HealthPassing, }, WriteRequest: structs.WriteRequest{Token: "root"}, } @@ -2777,6 +2786,9 @@ func TestCatalog_ListServices_FilterACL(t *testing.T) { if _, ok := reply.Services["bar"]; ok { t.Fatalf("bad: %#v", reply.Services) } + if !reply.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } } func TestCatalog_ServiceNodes_FilterACL(t *testing.T) { @@ -2825,6 +2837,7 @@ func TestCatalog_ServiceNodes_FilterACL(t *testing.T) { t.Fatalf("bad: %#v", reply.ServiceNodes) } } + require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") // We've already proven that we call the ACL filtering function so we // test node filtering down in acl.go for node cases. This also proves @@ -2833,7 +2846,7 @@ func TestCatalog_ServiceNodes_FilterACL(t *testing.T) { // for now until we change the sense of the version 8 ACL flag). } -func TestCatalog_NodeServices_ACLDeny(t *testing.T) { +func TestCatalog_NodeServices_ACL(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -2842,7 +2855,7 @@ func TestCatalog_NodeServices_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2852,43 +2865,46 @@ func TestCatalog_NodeServices_ACLDeny(t *testing.T) { testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) - // The node policy should not be ignored. + token := func(policy string) string { + rules := fmt.Sprintf(` + node "%s" { policy = "%s" } + service "consul" { policy = "%s" } + `, + s1.config.NodeName, + policy, + policy, + ) + return createTokenWithPolicyName(t, codec, policy, rules, "root") + } + args := structs.NodeSpecificRequest{ Datacenter: "dc1", Node: s1.config.NodeName, } - reply := structs.IndexedNodeServices{} - if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if reply.NodeServices != nil { - t.Fatalf("should not nil") - } - rules := fmt.Sprintf(` -node "%s" { - policy = "read" -} -`, s1.config.NodeName) - id := createToken(t, codec, rules) + t.Run("deny", func(t *testing.T) { + require := require.New(t) - // Now try with the token and it will go through. - args.Token = id - if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if reply.NodeServices == nil { - t.Fatalf("should not be nil") - } + args.Token = token("deny") - // Make sure an unknown node doesn't cause trouble. - args.Node = "nope" - if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if reply.NodeServices != nil { - t.Fatalf("should not nil") - } + var reply structs.IndexedNodeServices + err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &reply) + require.NoError(err) + require.Nil(reply.NodeServices) + require.True(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allow", func(t *testing.T) { + require := require.New(t) + + args.Token = token("read") + + var reply structs.IndexedNodeServices + err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &reply) + require.NoError(err) + require.NotNil(reply.NodeServices) + require.False(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) } func TestCatalog_NodeServices_FilterACL(t *testing.T) { @@ -3242,7 +3258,7 @@ func TestCatalog_GatewayServices_ACLFiltering(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -3379,6 +3395,7 @@ service "gateway" { var resp structs.IndexedGatewayServices assert.Nil(r, msgpackrpc.CallWithCodec(codec, "Catalog.GatewayServices", &req, &resp)) assert.Len(r, resp.Services, 0) + assert.True(r, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) rules = ` @@ -3402,6 +3419,7 @@ service "gateway" { var resp structs.IndexedGatewayServices assert.Nil(r, msgpackrpc.CallWithCodec(codec, "Catalog.GatewayServices", &req, &resp)) assert.Len(r, resp.Services, 2) + assert.True(r, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") expect := structs.GatewayServices{ { @@ -3905,3 +3923,100 @@ node "node" { }) } } + +func TestCatalog_VirtualIPForService(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.Build = "1.11.0" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + err := s1.fsm.State().EnsureRegistration(1, &structs.RegisterRequest{ + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "api", + Connect: structs.ServiceConnect{ + Native: true, + }, + }, + }) + require.NoError(t, err) + + args := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "api", + } + var out string + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.VirtualIPForService", &args, &out)) + require.Equal(t, "240.0.0.1", out) +} + +func TestCatalog_VirtualIPForService_ACLDeny(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + c.Build = "1.11.0" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + err := s1.fsm.State().EnsureRegistration(1, &structs.RegisterRequest{ + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "api", + Connect: structs.ServiceConnect{ + Native: true, + }, + }, + }) + require.NoError(t, err) + + // Call the endpoint with no token and expect permission denied. + args := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "api", + } + var out string + err = msgpackrpc.CallWithCodec(codec, "Catalog.VirtualIPForService", &args, &out) + require.Contains(t, err.Error(), acl.ErrPermissionDenied.Error()) + require.Equal(t, "", out) + + id := createToken(t, codec, ` + service "api" { + policy = "read" + }`) + + // Now try with the token and it will go through. + args.Token = id + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.VirtualIPForService", &args, &out)) + require.Equal(t, "240.0.0.1", out) + + // Make sure we still get permission denied for an unknown service. + args.ServiceName = "nope" + var out2 string + err = msgpackrpc.CallWithCodec(codec, "Catalog.VirtualIPForService", &args, &out2) + require.Contains(t, err.Error(), acl.ErrPermissionDenied.Error()) + require.Equal(t, "", out2) +} diff --git a/agent/consul/client.go b/agent/consul/client.go index 031308e19..ac64d704a 100644 --- a/agent/consul/client.go +++ b/agent/consul/client.go @@ -193,7 +193,13 @@ func (c *Client) Leave() error { // JoinLAN is used to have Consul join the inner-DC pool The target address // should be another node inside the DC listening on the Serf LAN address func (c *Client) JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (int, error) { - // TODO(partitions): assert that the partitions match + // Partitions definitely have to match. + if c.config.AgentEnterpriseMeta().PartitionOrDefault() != entMeta.PartitionOrDefault() { + return 0, fmt.Errorf("target partition %q must match client agent partition %q", + entMeta.PartitionOrDefault(), + c.config.AgentEnterpriseMeta().PartitionOrDefault(), + ) + } return c.serf.Join(addrs, true) } @@ -221,7 +227,10 @@ func (c *Client) LANMembers(filter LANMemberFilter) ([]serf.Member, error) { return nil, err } - // TODO(partitions): assert that the partitions match + // Partitions definitely have to match. + if c.config.AgentEnterpriseMeta().PartitionOrDefault() != filter.PartitionOrDefault() { + return nil, fmt.Errorf("partition %q not found", filter.PartitionOrDefault()) + } if !filter.AllSegments && filter.Segment != c.config.Segment { return nil, fmt.Errorf("segment %q not found", filter.Segment) @@ -232,7 +241,14 @@ func (c *Client) LANMembers(filter LANMemberFilter) ([]serf.Member, error) { // RemoveFailedNode is used to remove a failed node from the cluster. func (c *Client) RemoveFailedNode(node string, prune bool, entMeta *structs.EnterpriseMeta) error { - // TODO(partitions): assert that the partitions match + // Partitions definitely have to match. + if c.config.AgentEnterpriseMeta().PartitionOrDefault() != entMeta.PartitionOrDefault() { + return fmt.Errorf("client agent in partition %q cannot remove node in different partition %q", + c.config.AgentEnterpriseMeta().PartitionOrDefault(), entMeta.PartitionOrDefault()) + } + if !isSerfMember(c.serf, node) { + return fmt.Errorf("agent: No node found with name '%s'", node) + } if prune { return c.serf.RemoveFailedNodePrune(node) } @@ -371,9 +387,21 @@ func (c *Client) Stats() map[string]map[string]string { return stats } -// GetLANCoordinate returns the coordinate of the node in the LAN gossip pool. +// GetLANCoordinate returns the coordinate of the node in the LAN gossip +// pool. +// +// - Clients return a single coordinate for the single gossip pool they are +// in (default, segment, or partition). +// +// - Servers return one coordinate for their canonical gossip pool (i.e. +// default partition/segment) and one per segment they are also ancillary +// members of. +// +// NOTE: servers do not emit coordinates for partitioned gossip pools they +// are ancillary members of. +// +// NOTE: This assumes coordinates are enabled, so check that before calling. func (c *Client) GetLANCoordinate() (lib.CoordinateSet, error) { - // TODO(partitions): possibly something here lan, err := c.serf.GetCoordinate() if err != nil { return nil, err @@ -389,3 +417,11 @@ func (c *Client) ReloadConfig(config ReloadableConfig) error { c.rpcLimiter.Store(rate.NewLimiter(config.RPCRateLimit, config.RPCMaxBurst)) return nil } + +func (c *Client) AgentEnterpriseMeta() *structs.EnterpriseMeta { + return c.config.AgentEnterpriseMeta() +} + +func (c *Client) agentSegmentName() string { + return c.config.Segment +} diff --git a/agent/consul/client_serf.go b/agent/consul/client_serf.go index 3b632f6fd..55df7a547 100644 --- a/agent/consul/client_serf.go +++ b/agent/consul/client_serf.go @@ -49,11 +49,12 @@ func (c *Client) setupSerf(conf *serf.Config, ch chan serf.Event, path string) ( conf.ProtocolVersion = protocolVersionMap[c.config.ProtocolVersion] conf.RejoinAfterLeave = c.config.RejoinAfterLeave conf.Merge = &lanMergeDelegate{ - dc: c.config.Datacenter, - nodeID: c.config.NodeID, - nodeName: c.config.NodeName, - segment: c.config.Segment, - server: false, + dc: c.config.Datacenter, + nodeID: c.config.NodeID, + nodeName: c.config.NodeName, + segment: c.config.Segment, + server: false, + partition: c.config.AgentEnterpriseMeta().PartitionOrDefault(), } conf.SnapshotPath = filepath.Join(c.config.DataDir, path) @@ -65,6 +66,8 @@ func (c *Client) setupSerf(conf *serf.Config, ch chan serf.Event, path string) ( conf.ReconnectTimeoutOverride = libserf.NewReconnectOverride(c.logger) + enterpriseModifyClientSerfConfigLAN(c.config, conf) + return serf.Create(conf) } diff --git a/agent/consul/client_test.go b/agent/consul/client_test.go index d6ae206bc..79c3f1a7b 100644 --- a/agent/consul/client_test.go +++ b/agent/consul/client_test.go @@ -33,11 +33,7 @@ func testClientConfig(t *testing.T) (string, *Config) { dir := testutil.TempDir(t, "consul") config := DefaultConfig() - ports := freeport.MustTake(2) - t.Cleanup(func() { - freeport.Return(ports) - }) - + ports := freeport.GetN(t, 2) config.Datacenter = "dc1" config.DataDir = dir config.NodeName = uniqueNodeName(t.Name()) diff --git a/agent/consul/config.go b/agent/consul/config.go index 86c87f5d7..4b017da6b 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -180,10 +180,10 @@ type Config struct { // ACLEnabled is used to enable ACLs ACLsEnabled bool - // ACLMasterToken is used to bootstrap the ACL system. It should be specified + // ACLInitialManagementToken is used to bootstrap the ACL system. It should be specified // on the servers in the PrimaryDatacenter. When the leader comes online, it ensures - // that the Master token is available. This provides the initial token. - ACLMasterToken string + // that the initial management token is available. This provides the initial token. + ACLInitialManagementToken string // ACLTokenReplication is used to enabled token replication. // @@ -391,6 +391,8 @@ type Config struct { RPCConfig RPCConfig + RaftBoltDBConfig RaftBoltDBConfig + // Embedded Consul Enterprise specific configuration *EnterpriseConfig } @@ -603,3 +605,7 @@ type ReloadableConfig struct { RaftSnapshotInterval time.Duration RaftTrailingLogs int } + +type RaftBoltDBConfig struct { + NoFreelistSync bool +} diff --git a/agent/consul/config_endpoint.go b/agent/consul/config_endpoint.go index 8ae0dba3b..0fb5a6ef8 100644 --- a/agent/consul/config_endpoint.go +++ b/agent/consul/config_endpoint.go @@ -54,6 +54,11 @@ func (c *ConfigEntry) Apply(args *structs.ConfigEntryRequest, reply *bool) error return err } + err := gateWriteToSecondary(args.Datacenter, c.srv.config.Datacenter, c.srv.config.PrimaryDatacenter, args.Entry.GetKind()) + if err != nil { + return err + } + // Ensure that all config entry writes go to the primary datacenter. These will then // be replicated to all the other datacenters. args.Datacenter = c.srv.config.PrimaryDatacenter @@ -181,6 +186,7 @@ func (c *ConfigEntry) List(args *structs.ConfigEntryQuery, reply *structs.Indexe filteredEntries := make([]structs.ConfigEntry, 0, len(entries)) for _, entry := range entries { if !entry.CanRead(authz) { + reply.QueryMeta.ResultsFilteredByACLs = true continue } filteredEntries = append(filteredEntries, entry) @@ -241,6 +247,7 @@ func (c *ConfigEntry) ListAll(args *structs.ConfigEntryListAllRequest, reply *st filteredEntries := make([]structs.ConfigEntry, 0, len(entries)) for _, entry := range entries { if !entry.CanRead(authz) { + reply.QueryMeta.ResultsFilteredByACLs = true continue } // Doing this filter outside of memdb isn't terribly @@ -586,6 +593,33 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r }) } +func gateWriteToSecondary(targetDC, localDC, primaryDC, kind string) error { + // ExportedServices entries are gated from interactions from secondary DCs + // because non-default partitions cannot be created in secondaries + // and services cannot be exported to another datacenter. + if kind != structs.ExportedServices { + return nil + } + if localDC == "" { + // This should not happen because the datacenter is defaulted in DefaultConfig. + return fmt.Errorf("unknown local datacenter") + } + + if primaryDC == "" { + primaryDC = localDC + } + + switch { + case targetDC == "" && localDC != primaryDC: + return fmt.Errorf("exported-services writes in secondary datacenters must target the primary datacenter explicitly.") + + case targetDC != "" && targetDC != primaryDC: + return fmt.Errorf("exported-services writes must not target secondary datacenters.") + + } + return nil +} + // preflightCheck is meant to have kind-specific system validation outside of // content validation. The initial use case is restricting the ability to do // writes of service-intentions until the system is finished migration. diff --git a/agent/consul/config_endpoint_test.go b/agent/consul/config_endpoint_test.go index 59fa2cf17..1187120ac 100644 --- a/agent/consul/config_endpoint_test.go +++ b/agent/consul/config_endpoint_test.go @@ -1,6 +1,7 @@ package consul import ( + "fmt" "os" "sort" "testing" @@ -154,7 +155,7 @@ func TestConfigEntry_Apply_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -270,7 +271,7 @@ func TestConfigEntry_Get_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -470,7 +471,7 @@ func TestConfigEntry_List_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -517,6 +518,7 @@ operator = "read" require.True(ok) require.Equal("foo", serviceConf.Name) require.Equal(structs.ServiceDefaults, serviceConf.Kind) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") // Get the global proxy config. args.Kind = structs.ProxyDefaults @@ -528,6 +530,7 @@ operator = "read" require.True(ok) require.Equal(structs.ProxyConfigGlobal, proxyConf.Name) require.Equal(structs.ProxyDefaults, proxyConf.Kind) + require.False(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") } func TestConfigEntry_ListAll_ACLDeny(t *testing.T) { @@ -542,7 +545,7 @@ func TestConfigEntry_ListAll_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -600,6 +603,7 @@ operator = "read" require.Equal(structs.ServiceDefaults, svcConf.Kind) require.Equal(structs.ProxyConfigGlobal, proxyConf.Name) require.Equal(structs.ProxyDefaults, proxyConf.Kind) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") } func TestConfigEntry_Delete(t *testing.T) { @@ -746,7 +750,7 @@ func TestConfigEntry_Delete_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1955,7 +1959,7 @@ func TestConfigEntry_ResolveServiceConfig_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2058,3 +2062,145 @@ func runStep(t *testing.T, name string, fn func(t *testing.T)) { t.FailNow() } } + +func Test_gateWriteToSecondary(t *testing.T) { + type args struct { + targetDC string + localDC string + primaryDC string + kind string + } + type testCase struct { + name string + args args + wantErr string + } + + run := func(t *testing.T, tc testCase) { + err := gateWriteToSecondary(tc.args.targetDC, tc.args.localDC, tc.args.primaryDC, tc.args.kind) + if tc.wantErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantErr) + return + } + require.NoError(t, err) + } + + tt := []testCase{ + { + name: "primary to primary with implicit primary and target", + args: args{ + targetDC: "", + localDC: "dc1", + primaryDC: "", + kind: structs.ExportedServices, + }, + }, + { + name: "primary to primary with explicit primary and implicit target", + args: args{ + targetDC: "", + localDC: "dc1", + primaryDC: "dc1", + kind: structs.ExportedServices, + }, + }, + { + name: "primary to primary with all filled in", + args: args{ + targetDC: "dc1", + localDC: "dc1", + primaryDC: "dc1", + kind: structs.ExportedServices, + }, + }, + { + name: "primary to secondary with implicit primary and target", + args: args{ + targetDC: "dc2", + localDC: "dc1", + primaryDC: "", + kind: structs.ExportedServices, + }, + wantErr: "writes must not target secondary datacenters", + }, + { + name: "primary to secondary with all filled in", + args: args{ + targetDC: "dc2", + localDC: "dc1", + primaryDC: "dc1", + kind: structs.ExportedServices, + }, + wantErr: "writes must not target secondary datacenters", + }, + { + name: "secondary to secondary with all filled in", + args: args{ + targetDC: "dc2", + localDC: "dc2", + primaryDC: "dc1", + kind: structs.ExportedServices, + }, + wantErr: "writes must not target secondary datacenters", + }, + { + name: "implicit write to secondary", + args: args{ + targetDC: "", + localDC: "dc2", + primaryDC: "dc1", + kind: structs.ExportedServices, + }, + wantErr: "must target the primary datacenter explicitly", + }, + { + name: "empty local DC", + args: args{ + localDC: "", + kind: structs.ExportedServices, + }, + wantErr: "unknown local datacenter", + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func Test_gateWriteToSecondary_AllowedKinds(t *testing.T) { + type args struct { + targetDC string + localDC string + primaryDC string + kind string + } + + for _, kind := range structs.AllConfigEntryKinds { + if kind == structs.ExportedServices { + continue + } + + t.Run(fmt.Sprintf("%s-secondary-to-secondary", kind), func(t *testing.T) { + tcase := args{ + targetDC: "", + localDC: "dc2", + primaryDC: "dc1", + kind: kind, + } + require.NoError(t, gateWriteToSecondary(tcase.targetDC, tcase.localDC, tcase.primaryDC, tcase.kind)) + }) + + t.Run(fmt.Sprintf("%s-primary-to-secondary", kind), func(t *testing.T) { + tcase := args{ + targetDC: "dc2", + localDC: "dc1", + primaryDC: "dc1", + kind: kind, + } + require.NoError(t, gateWriteToSecondary(tcase.targetDC, tcase.localDC, tcase.primaryDC, tcase.kind)) + }) + } +} diff --git a/agent/consul/config_oss.go b/agent/consul/config_oss.go index a07a2813b..63d3cb2a3 100644 --- a/agent/consul/config_oss.go +++ b/agent/consul/config_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul diff --git a/agent/consul/config_replication.go b/agent/consul/config_replication.go index 536f75dc7..243cd8bb3 100644 --- a/agent/consul/config_replication.go +++ b/agent/consul/config_replication.go @@ -92,6 +92,10 @@ func (s *Server) reconcileLocalConfig(ctx context.Context, configs []structs.Con defer ticker.Stop() for i, entry := range configs { + // Exported services only apply to the primary datacenter. + if entry.GetKind() == structs.ExportedServices { + continue + } req := structs.ConfigEntryRequest{ Op: op, Datacenter: s.config.Datacenter, diff --git a/agent/consul/config_replication_test.go b/agent/consul/config_replication_test.go index fa10d4efe..5231d43a4 100644 --- a/agent/consul/config_replication_test.go +++ b/agent/consul/config_replication_test.go @@ -6,10 +6,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" - "github.com/stretchr/testify/require" ) func TestReplication_ConfigSort(t *testing.T) { diff --git a/agent/consul/config_test.go b/agent/consul/config_test.go index 774889534..c536684c0 100644 --- a/agent/consul/config_test.go +++ b/agent/consul/config_test.go @@ -19,26 +19,28 @@ func TestCloneSerfLANConfig(t *testing.T) { "Alive", "AwarenessMaxMultiplier", "Conflict", - "DNSConfigPath", "Delegate", "DelegateProtocolMax", "DelegateProtocolMin", "DelegateProtocolVersion", "DisableTcpPings", "DisableTcpPingsForNode", + "DNSConfigPath", "EnableCompression", "Events", "GossipToTheDeadTime", "HandoffQueueDepth", "IndirectChecks", - "LogOutput", + "Label", "Logger", + "LogOutput", "Merge", "Name", "Ping", "ProtocolVersion", "PushPullInterval", "RequireNodeNames", + "SkipInboundLabelCheck", "SuspicionMaxTimeoutMult", "TCPTimeout", "Transport", diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index c257c9fc6..d5e52f9e3 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -163,7 +163,7 @@ func TestConnectCAConfig_GetSet_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = TestDefaultMasterToken + c.ACLInitialManagementToken = TestDefaultMasterToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1108,7 +1108,7 @@ func TestConnectCASignValidation(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/coordinate_endpoint.go b/agent/consul/coordinate_endpoint.go index 42819479a..b35d8b260 100644 --- a/agent/consul/coordinate_endpoint.go +++ b/agent/consul/coordinate_endpoint.go @@ -24,8 +24,9 @@ type Coordinate struct { logger hclog.Logger // updates holds pending coordinate updates for the given nodes. This is - // keyed by node:segment so we can get a coordinate for each segment for - // servers, and we only track the latest update per node:segment. + // keyed by partition/node:segment so we can get a coordinate for each + // segment for servers, and we only track the latest update per + // partition/node:segment. updates map[string]*structs.CoordinateUpdateRequest // updatesLock synchronizes access to the updates map. @@ -132,7 +133,7 @@ func (c *Coordinate) Update(args *structs.CoordinateUpdateRequest, reply *struct // Since this is a coordinate coming from some place else we harden this // and look for dimensionality problems proactively. - coord, err := c.srv.serfLAN.GetCoordinate() + coord, err := c.srv.GetMatchingLANCoordinate(args.PartitionOrDefault(), args.Segment) if err != nil { return err } @@ -141,7 +142,8 @@ func (c *Coordinate) Update(args *structs.CoordinateUpdateRequest, reply *struct } // Fetch the ACL token, if any, and enforce the node policy if enabled. - authz, err := c.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil) + var authzContext acl.AuthorizerContext + authz, err := c.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext) if err != nil { return err } @@ -150,14 +152,12 @@ func (c *Coordinate) Update(args *structs.CoordinateUpdateRequest, reply *struct return err } - var authzContext acl.AuthorizerContext - args.FillAuthzContext(&authzContext) if authz.NodeWrite(args.Node, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } // Add the coordinate to the map of pending updates. - key := fmt.Sprintf("%s:%s", args.Node, args.Segment) + key := fmt.Sprintf("%s/%s:%s", args.PartitionOrDefault(), args.Node, args.Segment) c.updatesLock.Lock() c.updates[key] = args c.updatesLock.Unlock() @@ -174,8 +174,6 @@ func (c *Coordinate) ListDatacenters(args *struct{}, reply *[]structs.Datacenter return err } - // TODO(partitions): should we filter any of this out? - var out []structs.DatacenterMap // Strip the datacenter suffixes from all the node names. @@ -237,8 +235,8 @@ func (c *Coordinate) Node(args *structs.NodeSpecificRequest, reply *structs.Inde } // Fetch the ACL token, if any, and enforce the node policy if enabled. - - authz, err := c.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil) + var authzContext acl.AuthorizerContext + authz, err := c.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext) if err != nil { return err } @@ -247,14 +245,10 @@ func (c *Coordinate) Node(args *structs.NodeSpecificRequest, reply *structs.Inde return err } - var authzContext acl.AuthorizerContext - args.FillAuthzContext(&authzContext) if authz.NodeRead(args.Node, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } - // TODO(partitions): do we have to add EnterpriseMeta to the reply like in Catalog.ListServices? - return c.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { diff --git a/agent/consul/coordinate_endpoint_test.go b/agent/consul/coordinate_endpoint_test.go index f40ee102d..c75e05d73 100644 --- a/agent/consul/coordinate_endpoint_test.go +++ b/agent/consul/coordinate_endpoint_test.go @@ -191,7 +191,7 @@ func TestCoordinate_Update_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -349,7 +349,7 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -452,6 +452,9 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) { if len(resp.Coordinates) != 1 || resp.Coordinates[0].Node != "foo" { t.Fatalf("bad: %#v", resp.Coordinates) } + if !resp.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } } func TestCoordinate_Node(t *testing.T) { @@ -521,7 +524,7 @@ func TestCoordinate_Node_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/discovery_chain_endpoint_test.go b/agent/consul/discovery_chain_endpoint_test.go index 275aff03a..d1ecb2cbe 100644 --- a/agent/consul/discovery_chain_endpoint_test.go +++ b/agent/consul/discovery_chain_endpoint_test.go @@ -26,7 +26,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { c.PrimaryDatacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/discoverychain/compile.go b/agent/consul/discoverychain/compile.go index 99f4c357d..12de7f45d 100644 --- a/agent/consul/discoverychain/compile.go +++ b/agent/consul/discoverychain/compile.go @@ -185,7 +185,7 @@ type customizationMarkers struct { // the String() method on the type itself. It is this way to be more // consistent with other string ids within the discovery chain. func serviceIDString(sid structs.ServiceID) string { - return fmt.Sprintf("%s.%s", sid.ID, sid.NamespaceOrDefault()) + return fmt.Sprintf("%s.%s.%s", sid.ID, sid.NamespaceOrDefault(), sid.PartitionOrDefault()) } func (m *customizationMarkers) IsZero() bool { @@ -213,10 +213,10 @@ func (c *compiler) recordServiceProtocol(sid structs.ServiceID) error { if serviceDefault := c.entries.GetService(sid); serviceDefault != nil { return c.recordProtocol(sid, serviceDefault.Protocol) } - if c.entries.GlobalProxy != nil { + if proxyDefault := c.entries.GetProxyDefaults(sid.PartitionOrDefault()); proxyDefault != nil { var cfg proxyConfig // Ignore errors and fallback on defaults if it does happen. - _ = mapstructure.WeakDecode(c.entries.GlobalProxy.Config, &cfg) + _ = mapstructure.WeakDecode(proxyDefault.Config, &cfg) if cfg.Protocol != "" { return c.recordProtocol(sid, cfg.Protocol) } @@ -567,11 +567,12 @@ func (c *compiler) assembleChain() error { dest = &structs.ServiceRouteDestination{ Service: c.serviceName, Namespace: router.NamespaceOrDefault(), + Partition: router.PartitionOrDefault(), } } svc := defaultIfEmpty(dest.Service, c.serviceName) destNamespace := defaultIfEmpty(dest.Namespace, router.NamespaceOrDefault()) - destPartition := router.PartitionOrDefault() + destPartition := defaultIfEmpty(dest.Partition, router.PartitionOrDefault()) // Check to see if the destination is eligible for splitting. var ( @@ -602,7 +603,7 @@ func (c *compiler) assembleChain() error { } defaultRoute := &structs.DiscoveryRoute{ - Definition: newDefaultServiceRoute(router.Name, router.NamespaceOrDefault()), + Definition: newDefaultServiceRoute(router.Name, router.NamespaceOrDefault(), router.PartitionOrDefault()), NextNode: defaultDestinationNode.MapKey(), } routeNode.Routes = append(routeNode.Routes, defaultRoute) @@ -613,7 +614,7 @@ func (c *compiler) assembleChain() error { return nil } -func newDefaultServiceRoute(serviceName string, namespace string) *structs.ServiceRoute { +func newDefaultServiceRoute(serviceName, namespace, partition string) *structs.ServiceRoute { return &structs.ServiceRoute{ Match: &structs.ServiceRouteMatch{ HTTP: &structs.ServiceRouteHTTPMatch{ @@ -623,6 +624,7 @@ func newDefaultServiceRoute(serviceName string, namespace string) *structs.Servi Destination: &structs.ServiceRouteDestination{ Service: serviceName, Namespace: namespace, + Partition: partition, }, } } @@ -836,7 +838,7 @@ RESOLVE_AGAIN: target, redirect.Service, redirect.ServiceSubset, - target.Partition, + redirect.Partition, redirect.Namespace, redirect.Datacenter, ) @@ -940,9 +942,9 @@ RESOLVE_AGAIN: if serviceDefault := c.entries.GetService(targetID); serviceDefault != nil { target.MeshGateway = serviceDefault.MeshGateway } - - if c.entries.GlobalProxy != nil && target.MeshGateway.Mode == structs.MeshGatewayModeDefault { - target.MeshGateway.Mode = c.entries.GlobalProxy.MeshGateway.Mode + proxyDefault := c.entries.GetProxyDefaults(targetID.PartitionOrDefault()) + if proxyDefault != nil && target.MeshGateway.Mode == structs.MeshGatewayModeDefault { + target.MeshGateway.Mode = proxyDefault.MeshGateway.Mode } if c.overrideMeshGateway.Mode != structs.MeshGatewayModeDefault { diff --git a/agent/consul/discoverychain/compile_oss.go b/agent/consul/discoverychain/compile_oss.go index 51949daf2..4aa43eb27 100644 --- a/agent/consul/discoverychain/compile_oss.go +++ b/agent/consul/discoverychain/compile_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package discoverychain diff --git a/agent/consul/discoverychain/compile_test.go b/agent/consul/discoverychain/compile_test.go index 360b40e48..1cd575815 100644 --- a/agent/consul/discoverychain/compile_test.go +++ b/agent/consul/discoverychain/compile_test.go @@ -158,14 +158,14 @@ func testcase_JustRouterWithDefaults() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { - Definition: newDefaultServiceRoute("main", "default"), + Definition: newDefaultServiceRoute("main", "default", "default"), NextNode: "resolver:main.default.default.dc1", }, }, @@ -210,11 +210,11 @@ func testcase_JustRouterWithNoDestination() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { Definition: &structs.ServiceRoute{ @@ -227,7 +227,7 @@ func testcase_JustRouterWithNoDestination() compileTestCase { NextNode: "resolver:main.default.default.dc1", }, { - Definition: newDefaultServiceRoute("main", "default"), + Definition: newDefaultServiceRoute("main", "default", "default"), NextNode: "resolver:main.default.default.dc1", }, }, @@ -270,14 +270,14 @@ func testcase_RouterWithDefaults_NoSplit_WithResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { - Definition: newDefaultServiceRoute("main", "default"), + Definition: newDefaultServiceRoute("main", "default", "default"), NextNode: "resolver:main.default.default.dc1", }, }, @@ -321,21 +321,21 @@ func testcase_RouterWithDefaults_WithNoopSplit_DefaultResolver() compileTestCase expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { - Definition: newDefaultServiceRoute("main", "default"), - NextNode: "splitter:main.default", + Definition: newDefaultServiceRoute("main", "default", "default"), + NextNode: "splitter:main.default.default", }, }, }, - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -386,21 +386,21 @@ func testcase_NoopSplit_DefaultResolver_ProtocolFromProxyDefaults() compileTestC expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { - Definition: newDefaultServiceRoute("main", "default"), - NextNode: "splitter:main.default", + Definition: newDefaultServiceRoute("main", "default", "default"), + NextNode: "splitter:main.default.default", }, }, }, - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -458,21 +458,21 @@ func testcase_RouterWithDefaults_WithNoopSplit_WithResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { - Definition: newDefaultServiceRoute("main", "default"), - NextNode: "splitter:main.default", + Definition: newDefaultServiceRoute("main", "default", "default"), + NextNode: "splitter:main.default.default", }, }, }, - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -542,18 +542,18 @@ func testcase_RouteBypassesSplit() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { Definition: &router.Routes[0], NextNode: "resolver:bypass.other.default.default.dc1", }, { - Definition: newDefaultServiceRoute("main", "default"), + Definition: newDefaultServiceRoute("main", "default", "default"), NextNode: "resolver:main.default.default.dc1", }, }, @@ -605,11 +605,11 @@ func testcase_NoopSplit_DefaultResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -661,11 +661,11 @@ func testcase_NoopSplit_WithResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -724,11 +724,11 @@ func testcase_SubsetSplit() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -801,11 +801,11 @@ func testcase_ServiceSplit() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -898,11 +898,11 @@ func testcase_SplitBypassesSplit() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -1053,13 +1053,14 @@ func testcase_DatacenterRedirect() compileTestCase { func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase { entries := newEntries() - entries.GlobalProxy = &structs.ProxyConfigEntry{ + entries.AddProxyDefaults(&structs.ProxyConfigEntry{ Kind: structs.ProxyDefaults, Name: structs.ProxyConfigGlobal, MeshGateway: structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, }, - } + }) + entries.AddResolvers( &structs.ServiceResolverConfigEntry{ Kind: "service-resolver", @@ -1300,13 +1301,15 @@ func testcase_DatacenterFailover() compileTestCase { func testcase_DatacenterFailover_WithMeshGateways() compileTestCase { entries := newEntries() - entries.GlobalProxy = &structs.ProxyConfigEntry{ + + entries.AddProxyDefaults(&structs.ProxyConfigEntry{ Kind: structs.ProxyDefaults, Name: structs.ProxyConfigGlobal, MeshGateway: structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, }, - } + }) + entries.AddResolvers( &structs.ServiceResolverConfigEntry{ Kind: "service-resolver", @@ -1384,11 +1387,11 @@ func testcase_NoopSplit_WithDefaultSubset() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -1446,7 +1449,8 @@ func testcase_DefaultResolver() compileTestCase { func testcase_DefaultResolver_WithProxyDefaults() compileTestCase { entries := newEntries() - entries.GlobalProxy = &structs.ProxyConfigEntry{ + + entries.AddProxyDefaults(&structs.ProxyConfigEntry{ Kind: structs.ProxyDefaults, Name: structs.ProxyConfigGlobal, Config: map[string]interface{}{ @@ -1455,7 +1459,7 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase { MeshGateway: structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, }, - } + }) expect := &structs.CompiledDiscoveryChain{ Protocol: "grpc", @@ -1699,11 +1703,11 @@ func testcase_MultiDatacenterCanary() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -1880,11 +1884,11 @@ func testcase_AllBellsAndWhistles() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { Definition: &router.Routes[0], @@ -1892,17 +1896,17 @@ func testcase_AllBellsAndWhistles() compileTestCase { }, { Definition: &router.Routes[1], - NextNode: "splitter:svc-split.default", + NextNode: "splitter:svc-split.default.default", }, { - Definition: newDefaultServiceRoute("main", "default"), + Definition: newDefaultServiceRoute("main", "default", "default"), NextNode: "resolver:default-subset.main.default.default.dc1", }, }, }, - "splitter:svc-split.default": { + "splitter:svc-split.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "svc-split.default", + Name: "svc-split.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -2455,11 +2459,11 @@ func testcase_LBSplitterAndResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -2642,13 +2646,13 @@ func newSimpleRoute(name string, muts ...func(*structs.ServiceRoute)) structs.Se } func setGlobalProxyProtocol(entries *structs.DiscoveryChainConfigEntries, protocol string) { - entries.GlobalProxy = &structs.ProxyConfigEntry{ + entries.AddProxyDefaults(&structs.ProxyConfigEntry{ Kind: structs.ProxyDefaults, Name: structs.ProxyConfigGlobal, Config: map[string]interface{}{ "protocol": protocol, }, - } + }) } func setServiceProtocol(entries *structs.DiscoveryChainConfigEntries, name, protocol string) { diff --git a/agent/consul/enterprise_client_oss.go b/agent/consul/enterprise_client_oss.go index bedbe8734..3dcf34732 100644 --- a/agent/consul/enterprise_client_oss.go +++ b/agent/consul/enterprise_client_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul @@ -12,6 +13,10 @@ func (c *Client) initEnterprise(_ Deps) error { return nil } +func enterpriseModifyClientSerfConfigLAN(_ *Config, _ *serf.Config) { + // nothing +} + func (c *Client) startEnterprise() error { return nil } @@ -20,10 +25,6 @@ func (c *Client) handleEnterpriseUserEvents(event serf.UserEvent) bool { return false } -func (_ *Client) addEnterpriseSerfTags(_ map[string]string) { - // do nothing -} - func (c *Client) enterpriseStats() map[string]map[string]string { return nil } diff --git a/agent/consul/enterprise_config_oss.go b/agent/consul/enterprise_config_oss.go index 8cd1e7124..a58998406 100644 --- a/agent/consul/enterprise_config_oss.go +++ b/agent/consul/enterprise_config_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul diff --git a/agent/consul/enterprise_server_oss.go b/agent/consul/enterprise_server_oss.go index f729fd810..cad141c11 100644 --- a/agent/consul/enterprise_server_oss.go +++ b/agent/consul/enterprise_server_oss.go @@ -84,6 +84,31 @@ func (s *Server) validateEnterpriseIntentionNamespace(ns string, _ bool) error { return errors.New("Namespaces is a Consul Enterprise feature") } +// setupSerfLAN is used to setup and initialize a Serf for the LAN +func (s *Server) setupSerfLAN(config *Config) error { + var err error + // Initialize the LAN Serf for the default network segment. + s.serfLAN, _, err = s.setupSerf(setupSerfOptions{ + Config: config.SerfLANConfig, + EventCh: s.eventChLAN, + SnapshotPath: serfLANSnapshot, + Listener: s.Listener, + WAN: false, + Segment: "", + Partition: "", + }) + if err != nil { + return err + } + return nil +} + +func (s *Server) shutdownSerfLAN() { + if s.serfLAN != nil { + s.serfLAN.Shutdown() + } +} + func addEnterpriseSerfTags(_ map[string]string, _ *structs.EnterpriseMeta) { // do nothing } diff --git a/agent/consul/enterprise_server_oss_test.go b/agent/consul/enterprise_server_oss_test.go index a92e12684..46a73b6d1 100644 --- a/agent/consul/enterprise_server_oss_test.go +++ b/agent/consul/enterprise_server_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul diff --git a/agent/consul/federation_state_endpoint_test.go b/agent/consul/federation_state_endpoint_test.go index 9bee48f6a..a8544869c 100644 --- a/agent/consul/federation_state_endpoint_test.go +++ b/agent/consul/federation_state_endpoint_test.go @@ -116,7 +116,7 @@ func TestFederationState_Apply_Upsert_ACLDeny(t *testing.T) { c.DisableFederationStateAntiEntropy = true c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -237,7 +237,7 @@ func TestFederationState_Get_ACLDeny(t *testing.T) { c.DisableFederationStateAntiEntropy = true c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -409,7 +409,7 @@ func TestFederationState_List_ACLDeny(t *testing.T) { c.PrimaryDatacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -425,7 +425,7 @@ func TestFederationState_List_ACLDeny(t *testing.T) { c.PrimaryDatacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir2) @@ -502,9 +502,10 @@ func TestFederationState_List_ACLDeny(t *testing.T) { type tcase struct { token string - listDenied bool - listEmpty bool - gwListEmpty bool + listDenied bool + listEmpty bool + gwListEmpty bool + gwFilteredByACLs bool } cases := map[string]tcase{ @@ -514,27 +515,31 @@ func TestFederationState_List_ACLDeny(t *testing.T) { gwListEmpty: true, }, "no perms": { - token: nadaToken.SecretID, - listDenied: true, - gwListEmpty: true, + token: nadaToken.SecretID, + listDenied: true, + gwListEmpty: true, + gwFilteredByACLs: true, }, "service:read": { - token: svcReadToken.SecretID, - listDenied: true, - gwListEmpty: true, + token: svcReadToken.SecretID, + listDenied: true, + gwListEmpty: true, + gwFilteredByACLs: true, }, "node:read": { - token: nodeReadToken.SecretID, - listDenied: true, - gwListEmpty: true, + token: nodeReadToken.SecretID, + listDenied: true, + gwListEmpty: true, + gwFilteredByACLs: true, }, "service:read and node:read": { token: svcAndNodeReadToken.SecretID, listDenied: true, }, "operator:read": { - token: opReadToken.SecretID, - gwListEmpty: true, + token: opReadToken.SecretID, + gwListEmpty: true, + gwFilteredByACLs: true, }, "master token": { token: "root", @@ -585,6 +590,11 @@ func TestFederationState_List_ACLDeny(t *testing.T) { require.NoError(t, err) require.Equal(t, expectedMeshGateways.DatacenterNodes, out.DatacenterNodes) } + require.Equal(t, + tc.gwFilteredByACLs, + out.QueryMeta.ResultsFilteredByACLs, + "ResultsFilteredByACLs should be %v", tc.gwFilteredByACLs, + ) }) }) } @@ -685,7 +695,7 @@ func TestFederationState_Apply_Delete_ACLDeny(t *testing.T) { c.DisableFederationStateAntiEntropy = true c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/fsm/snapshot_oss.go b/agent/consul/fsm/snapshot_oss.go index 81c26d3b7..7ebec8a33 100644 --- a/agent/consul/fsm/snapshot_oss.go +++ b/agent/consul/fsm/snapshot_oss.go @@ -33,9 +33,14 @@ func init() { registerRestorer(structs.ACLAuthMethodSetRequestType, restoreAuthMethod) registerRestorer(structs.FederationStateRequestType, restoreFederationState) registerRestorer(structs.SystemMetadataRequestType, restoreSystemMetadata) + registerRestorer(structs.ServiceVirtualIPRequestType, restoreServiceVirtualIP) + registerRestorer(structs.FreeVirtualIPRequestType, restoreFreeVirtualIP) } func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) error { + if err := s.persistVirtualIPs(sink, encoder); err != nil { + return err + } if err := s.persistNodes(sink, encoder); err != nil { return err } @@ -510,6 +515,38 @@ func (s *snapshot) persistIndex(sink raft.SnapshotSink, encoder *codec.Encoder) return nil } +func (s *snapshot) persistVirtualIPs(sink raft.SnapshotSink, encoder *codec.Encoder) error { + serviceVIPs, err := s.state.ServiceVirtualIPs() + if err != nil { + return err + } + + for entry := serviceVIPs.Next(); entry != nil; entry = serviceVIPs.Next() { + if _, err := sink.Write([]byte{byte(structs.ServiceVirtualIPRequestType)}); err != nil { + return err + } + if err := encoder.Encode(entry.(state.ServiceVirtualIP)); err != nil { + return err + } + } + + freeVIPs, err := s.state.FreeVirtualIPs() + if err != nil { + return err + } + + for entry := freeVIPs.Next(); entry != nil; entry = freeVIPs.Next() { + if _, err := sink.Write([]byte{byte(structs.FreeVirtualIPRequestType)}); err != nil { + return err + } + if err := encoder.Encode(entry.(state.FreeVirtualIP)); err != nil { + return err + } + } + + return nil +} + func restoreRegistration(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { var req structs.RegisterRequest if err := decoder.Decode(&req); err != nil { @@ -790,3 +827,25 @@ func restoreSystemMetadata(header *SnapshotHeader, restore *state.Restore, decod } return restore.SystemMetadataEntry(&req) } + +func restoreServiceVirtualIP(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { + var req state.ServiceVirtualIP + if err := decoder.Decode(&req); err != nil { + return err + } + if err := restore.ServiceVirtualIP(req); err != nil { + return err + } + return nil +} + +func restoreFreeVirtualIP(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { + var req state.FreeVirtualIP + if err := decoder.Decode(&req); err != nil { + return err + } + if err := restore.FreeVirtualIP(req); err != nil { + return err + } + return nil +} diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index 996cf2fd2..652706865 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -60,6 +60,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { Port: 80, Connect: connectConf, }) + fsm.state.EnsureService(4, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"primary"}, Address: "127.0.0.1", Port: 5000}) fsm.state.EnsureService(5, "baz", &structs.NodeService{ID: "web", Service: "web", Tags: nil, Address: "127.0.0.2", Port: 80}) fsm.state.EnsureService(6, "baz", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"secondary"}, Address: "127.0.0.2", Port: 5000}) @@ -434,6 +435,43 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { } require.NoError(t, fsm.state.EnsureConfigEntry(27, meshConfig)) + // Connect-native services for virtual IP generation + systemMetadataEntry = &structs.SystemMetadataEntry{ + Key: structs.SystemMetadataVirtualIPsEnabled, + Value: "true", + } + require.NoError(t, fsm.state.SystemMetadataSet(28, systemMetadataEntry)) + + fsm.state.EnsureService(29, "foo", &structs.NodeService{ + ID: "frontend", + Service: "frontend", + Address: "127.0.0.1", + Port: 8000, + Connect: connectConf, + }) + vip, err := fsm.state.VirtualIPForService(structs.NewServiceName("frontend", nil)) + require.NoError(t, err) + require.Equal(t, vip, "240.0.0.1") + + fsm.state.EnsureService(30, "foo", &structs.NodeService{ + ID: "backend", + Service: "backend", + Address: "127.0.0.1", + Port: 9000, + Connect: connectConf, + }) + vip, err = fsm.state.VirtualIPForService(structs.NewServiceName("backend", nil)) + require.NoError(t, err) + require.Equal(t, vip, "240.0.0.2") + + _, serviceNames, err := fsm.state.ServiceNamesOfKind(nil, structs.ServiceKindTypical) + require.NoError(t, err) + + expect := []string{"backend", "db", "frontend", "web"} + for i, sn := range serviceNames { + require.Equal(t, expect[i], sn.Service.Name) + } + // Snapshot snap, err := fsm.Snapshot() require.NoError(t, err) @@ -519,7 +557,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { _, fooSrv, err := fsm2.state.NodeServices(nil, "foo", nil) require.NoError(t, err) - require.Len(t, fooSrv.Services, 2) + require.Len(t, fooSrv.Services, 4) require.Contains(t, fooSrv.Services["db"].Tags, "primary") require.True(t, stringslice.Contains(fooSrv.Services["db"].Tags, "primary")) require.Equal(t, 5001, fooSrv.Services["db"].Port) @@ -538,6 +576,14 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Equal(t, uint64(7), checks[0].CreateIndex) require.Equal(t, uint64(25), checks[0].ModifyIndex) + // Verify virtual IPs are consistent. + vip, err = fsm2.state.VirtualIPForService(structs.NewServiceName("frontend", nil)) + require.NoError(t, err) + require.Equal(t, vip, "240.0.0.1") + vip, err = fsm2.state.VirtualIPForService(structs.NewServiceName("backend", nil)) + require.NoError(t, err) + require.Equal(t, vip, "240.0.0.2") + // Verify key is set _, d, err := fsm2.state.KVSGet(nil, "/test", nil) require.NoError(t, err) @@ -652,10 +698,10 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Len(t, roots, 2) // Verify provider state is restored. - _, state, err := fsm2.state.CAProviderState("asdf") + _, provider, err := fsm2.state.CAProviderState("asdf") require.NoError(t, err) - require.Equal(t, "foo", state.PrivateKey) - require.Equal(t, "bar", state.RootCert) + require.Equal(t, "foo", provider.PrivateKey) + require.Equal(t, "bar", provider.RootCert) // Verify CA configuration is restored. _, caConf, err := fsm2.state.CAConfig(nil) @@ -700,8 +746,8 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { // Verify system metadata is restored. _, systemMetadataLoaded, err := fsm2.state.SystemMetadataList(nil) require.NoError(t, err) - require.Len(t, systemMetadataLoaded, 1) - require.Equal(t, systemMetadataEntry, systemMetadataLoaded[0]) + require.Len(t, systemMetadataLoaded, 2) + require.Equal(t, systemMetadataEntry, systemMetadataLoaded[1]) // Verify service-intentions is restored _, serviceIxnEntry, err := fsm2.state.ConfigEntry(nil, structs.ServiceIntentions, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) @@ -713,6 +759,14 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.NoError(t, err) require.Equal(t, meshConfig, meshConfigEntry) + _, restoredServiceNames, err := fsm2.state.ServiceNamesOfKind(nil, structs.ServiceKindTypical) + require.NoError(t, err) + + expect = []string{"backend", "db", "frontend", "web"} + for i, sn := range restoredServiceNames { + require.Equal(t, expect[i], sn.Service.Name) + } + // Snapshot snap, err = fsm2.Snapshot() require.NoError(t, err) diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index 5ef0e9e3b..2fe95e6db 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -55,9 +55,6 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest, return err } reply.Index, reply.HealthChecks = index, checks - if err := h.srv.filterACL(args.Token, reply); err != nil { - return err - } raw, err := filter.Execute(reply.HealthChecks) if err != nil { @@ -65,6 +62,13 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest, } reply.HealthChecks = raw.(structs.HealthChecks) + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := h.srv.filterACL(args.Token, reply); err != nil { + return err + } + return h.srv.sortNodesByDistanceFrom(args.Source, reply.HealthChecks) }) } @@ -99,15 +103,20 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest, return err } reply.Index, reply.HealthChecks = index, checks - if err := h.srv.filterACL(args.Token, reply); err != nil { - return err - } raw, err := filter.Execute(reply.HealthChecks) if err != nil { return err } reply.HealthChecks = raw.(structs.HealthChecks) + + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := h.srv.filterACL(args.Token, reply); err != nil { + return err + } + return nil }) } @@ -156,9 +165,6 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest, return err } reply.Index, reply.HealthChecks = index, checks - if err := h.srv.filterACL(args.Token, reply); err != nil { - return err - } raw, err := filter.Execute(reply.HealthChecks) if err != nil { @@ -166,6 +172,13 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest, } reply.HealthChecks = raw.(structs.HealthChecks) + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := h.srv.filterACL(args.Token, reply); err != nil { + return err + } + return h.srv.sortNodesByDistanceFrom(args.Source, reply.HealthChecks) }) } @@ -232,16 +245,19 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc reply.Nodes = nodeMetaFilter(args.NodeMetaFilters, reply.Nodes) } - if err := h.srv.filterACL(args.Token, reply); err != nil { - return err - } - raw, err := filter.Execute(reply.Nodes) if err != nil { return err } reply.Nodes = raw.(structs.CheckServiceNodes) + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := h.srv.filterACL(args.Token, reply); err != nil { + return err + } + return h.srv.sortNodesByDistanceFrom(args.Source, reply.Nodes) }) diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index ac85c68d2..0a6100052 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -983,7 +983,7 @@ func TestHealth_ServiceNodes_ConnectProxy_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1289,7 +1289,7 @@ func TestHealth_ServiceNodes_Ingress_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1431,6 +1431,9 @@ func TestHealth_NodeChecks_FilterACL(t *testing.T) { } t.Parallel() + + require := require.New(t) + dir, token, srv, codec := testACLFilterServer(t) defer os.RemoveAll(dir) defer srv.Shutdown() @@ -1442,9 +1445,9 @@ func TestHealth_NodeChecks_FilterACL(t *testing.T) { QueryOptions: structs.QueryOptions{Token: token}, } reply := structs.IndexedHealthChecks{} - if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil { - t.Fatalf("err: %s", err) - } + err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply) + require.NoError(err) + found := false for _, chk := range reply.HealthChecks { switch chk.ServiceName { @@ -1454,9 +1457,8 @@ func TestHealth_NodeChecks_FilterACL(t *testing.T) { t.Fatalf("bad: %#v", reply.HealthChecks) } } - if !found { - t.Fatalf("bad: %#v", reply.HealthChecks) - } + require.True(found, "bad: %#v", reply.HealthChecks) + require.True(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") // We've already proven that we call the ACL filtering function so we // test node filtering down in acl.go for node cases. This also proves @@ -1471,6 +1473,9 @@ func TestHealth_ServiceChecks_FilterACL(t *testing.T) { } t.Parallel() + + require := require.New(t) + dir, token, srv, codec := testACLFilterServer(t) defer os.RemoveAll(dir) defer srv.Shutdown() @@ -1482,9 +1487,9 @@ func TestHealth_ServiceChecks_FilterACL(t *testing.T) { QueryOptions: structs.QueryOptions{Token: token}, } reply := structs.IndexedHealthChecks{} - if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply); err != nil { - t.Fatalf("err: %s", err) - } + err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply) + require.NoError(err) + found := false for _, chk := range reply.HealthChecks { if chk.ServiceName == "foo" { @@ -1492,18 +1497,14 @@ func TestHealth_ServiceChecks_FilterACL(t *testing.T) { break } } - if !found { - t.Fatalf("bad: %#v", reply.HealthChecks) - } + require.True(found, "bad: %#v", reply.HealthChecks) opt.ServiceName = "bar" reply = structs.IndexedHealthChecks{} - if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply); err != nil { - t.Fatalf("err: %s", err) - } - if len(reply.HealthChecks) != 0 { - t.Fatalf("bad: %#v", reply.HealthChecks) - } + err = msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply) + require.NoError(err) + require.Empty(reply.HealthChecks) + require.True(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") // We've already proven that we call the ACL filtering function so we // test node filtering down in acl.go for node cases. This also proves @@ -1518,6 +1519,9 @@ func TestHealth_ServiceNodes_FilterACL(t *testing.T) { } t.Parallel() + + require := require.New(t) + dir, token, srv, codec := testACLFilterServer(t) defer os.RemoveAll(dir) defer srv.Shutdown() @@ -1529,21 +1533,16 @@ func TestHealth_ServiceNodes_FilterACL(t *testing.T) { QueryOptions: structs.QueryOptions{Token: token}, } reply := structs.IndexedCheckServiceNodes{} - if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply); err != nil { - t.Fatalf("err: %s", err) - } - if len(reply.Nodes) != 1 { - t.Fatalf("bad: %#v", reply.Nodes) - } + err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply) + require.NoError(err) + require.Len(reply.Nodes, 1) opt.ServiceName = "bar" reply = structs.IndexedCheckServiceNodes{} - if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply); err != nil { - t.Fatalf("err: %s", err) - } - if len(reply.Nodes) != 0 { - t.Fatalf("bad: %#v", reply.Nodes) - } + err = msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply) + require.NoError(err) + require.Empty(reply.Nodes) + require.True(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") // We've already proven that we call the ACL filtering function so we // test node filtering down in acl.go for node cases. This also proves @@ -1558,6 +1557,9 @@ func TestHealth_ChecksInState_FilterACL(t *testing.T) { } t.Parallel() + + require := require.New(t) + dir, token, srv, codec := testACLFilterServer(t) defer os.RemoveAll(dir) defer srv.Shutdown() @@ -1569,9 +1571,8 @@ func TestHealth_ChecksInState_FilterACL(t *testing.T) { QueryOptions: structs.QueryOptions{Token: token}, } reply := structs.IndexedHealthChecks{} - if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &opt, &reply); err != nil { - t.Fatalf("err: %s", err) - } + err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &opt, &reply) + require.NoError(err) found := false for _, chk := range reply.HealthChecks { @@ -1582,9 +1583,8 @@ func TestHealth_ChecksInState_FilterACL(t *testing.T) { t.Fatalf("bad service 'bar': %#v", reply.HealthChecks) } } - if !found { - t.Fatalf("missing service 'foo': %#v", reply.HealthChecks) - } + require.True(found, "missing service 'foo': %#v", reply.HealthChecks) + require.True(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") // We've already proven that we call the ACL filtering function so we // test node filtering down in acl.go for node cases. This also proves diff --git a/agent/consul/helper_test.go b/agent/consul/helper_test.go index 11309b1f7..c97133580 100644 --- a/agent/consul/helper_test.go +++ b/agent/consul/helper_test.go @@ -87,11 +87,19 @@ func wantRaft(servers []*Server) error { // joinAddrLAN returns the address other servers can // use to join the cluster on the LAN interface. -func joinAddrLAN(s *Server) string { +func joinAddrLAN(s clientOrServer) string { if s == nil { - panic("no server") + panic("no client or server") + } + var port int + switch x := s.(type) { + case *Server: + port = x.config.SerfLANConfig.MemberlistConfig.BindPort + case *Client: + port = x.config.SerfLANConfig.MemberlistConfig.BindPort + default: + panic(fmt.Sprintf("unhandled type %T", s)) } - port := s.config.SerfLANConfig.MemberlistConfig.BindPort return fmt.Sprintf("127.0.0.1:%d", port) } @@ -110,6 +118,8 @@ func joinAddrWAN(s *Server) string { type clientOrServer interface { JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (int, error) LANMembersInAgentPartition() []serf.Member + AgentEnterpriseMeta() *structs.EnterpriseMeta + agentSegmentName() string } // joinLAN is a convenience function for @@ -117,27 +127,54 @@ type clientOrServer interface { // member.JoinLAN("127.0.0.1:"+leader.config.SerfLANConfig.MemberlistConfig.BindPort) func joinLAN(t *testing.T, member clientOrServer, leader *Server) { t.Helper() + joinLANWithOptions(t, member, leader, true) +} +func joinLANWithNoMembershipChecks(t *testing.T, member clientOrServer, leader *Server) { + t.Helper() + joinLANWithOptions(t, member, leader, false) +} +func joinLANWithOptions(t *testing.T, member clientOrServer, leader *Server, doMembershipChecks bool) { + t.Helper() if member == nil || leader == nil { panic("no server") } - var memberAddr string - switch x := member.(type) { - case *Server: - memberAddr = joinAddrLAN(x) - case *Client: - memberAddr = fmt.Sprintf("127.0.0.1:%d", x.config.SerfLANConfig.MemberlistConfig.BindPort) - } + memberAddr := joinAddrLAN(member) + + var ( + memberEntMeta = member.AgentEnterpriseMeta() + memberPartition = memberEntMeta.PartitionOrDefault() + memberSegment = member.agentSegmentName() + ) + leaderAddr := joinAddrLAN(leader) - if _, err := member.JoinLAN([]string{leaderAddr}, nil); err != nil { + if memberSegment != "" { + leaderAddr = leader.LANSegmentAddr(memberSegment) + } + if _, err := member.JoinLAN([]string{leaderAddr}, memberEntMeta); err != nil { t.Fatal(err) } + + if !doMembershipChecks { + return + } + + f := LANMemberFilter{ + Partition: memberPartition, + Segment: memberSegment, + } retry.Run(t, func(r *retry.R) { - if !seeEachOther(leader.LANMembersInAgentPartition(), member.LANMembersInAgentPartition(), leaderAddr, memberAddr) { + leaderView, err := leader.LANMembers(f) + require.NoError(r, err) + + if !seeEachOther(leaderView, member.LANMembersInAgentPartition(), leaderAddr, memberAddr) { r.Fatalf("leader and member cannot see each other on LAN") } }) - if !seeEachOther(leader.LANMembersInAgentPartition(), member.LANMembersInAgentPartition(), leaderAddr, memberAddr) { + + leaderView, err := leader.LANMembers(f) + require.NoError(t, err) + if !seeEachOther(leaderView, member.LANMembersInAgentPartition(), leaderAddr, memberAddr) { t.Fatalf("leader and member cannot see each other on LAN") } } @@ -147,6 +184,14 @@ func joinLAN(t *testing.T, member clientOrServer, leader *Server) { // member.JoinWAN("127.0.0.1:"+leader.config.SerfWANConfig.MemberlistConfig.BindPort) func joinWAN(t *testing.T, member, leader *Server) { t.Helper() + joinWANWithOptions(t, member, leader, true) +} +func joinWANWithNoMembershipChecks(t *testing.T, member, leader *Server) { + t.Helper() + joinWANWithOptions(t, member, leader, false) +} +func joinWANWithOptions(t *testing.T, member, leader *Server, doMembershipChecks bool) { + t.Helper() if member == nil || leader == nil { panic("no server") @@ -155,6 +200,11 @@ func joinWAN(t *testing.T, member, leader *Server) { if _, err := member.JoinWAN([]string{leaderAddr}); err != nil { t.Fatal(err) } + + if !doMembershipChecks { + return + } + retry.Run(t, func(r *retry.R) { if !seeEachOther(leader.WANMembers(), member.WANMembers(), leaderAddr, memberAddr) { r.Fatalf("leader and member cannot see each other on WAN") diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index f5ffaaa3e..862637de5 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -550,17 +550,19 @@ func (s *Intention) List(args *structs.IntentionListRequest, reply *structs.Inde } else { reply.DataOrigin = structs.IntentionDataOriginLegacy } - - if err := s.srv.filterACL(args.Token, reply); err != nil { - return err - } - raw, err := filter.Execute(reply.Intentions) if err != nil { return err } reply.Intentions = raw.(structs.Intentions) + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := s.srv.filterACL(args.Token, reply); err != nil { + return err + } + return nil }, ) @@ -605,10 +607,10 @@ func (s *Intention) Match(args *structs.IntentionQueryRequest, reply *structs.In } var authzContext acl.AuthorizerContext - // Go through each entry to ensure we have intention:read for the resource. + // Go through each entry to ensure we have intentions:read for the resource. // TODO - should we do this instead of filtering the result set? This will only allow - // queries for which the token has intention:read permissions on the requested side + // queries for which the token has intentions:read permissions on the requested side // of the service. Should it instead return all matches that it would be able to list. // if so we should remove this and call filterACL instead. Based on how this is used // its probably fine. If you have intention read on the source just do a source type diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 59f450ff9..ec3941348 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -867,7 +867,7 @@ func TestIntentionApply_aclDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1257,7 +1257,7 @@ func TestIntentionApply_aclDelete(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1323,7 +1323,7 @@ func TestIntentionApply_aclUpdate(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1377,7 +1377,7 @@ func TestIntentionApply_aclManagement(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1422,7 +1422,7 @@ func TestIntentionApply_aclUpdateChange(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1472,7 +1472,7 @@ func TestIntentionGet_acl(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1635,6 +1635,7 @@ func TestIntentionList_acl(t *testing.T) { var resp structs.IndexedIntentions require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) require.Len(t, resp.Intentions, 0) + require.False(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) // Test with management token @@ -1646,6 +1647,7 @@ func TestIntentionList_acl(t *testing.T) { var resp structs.IndexedIntentions require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) require.Len(t, resp.Intentions, 3) + require.False(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) // Test with user token @@ -1657,6 +1659,7 @@ func TestIntentionList_acl(t *testing.T) { var resp structs.IndexedIntentions require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) require.Len(t, resp.Intentions, 1) + require.True(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("filtered", func(t *testing.T) { @@ -1671,6 +1674,7 @@ func TestIntentionList_acl(t *testing.T) { var resp structs.IndexedIntentions require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) require.Len(t, resp.Intentions, 1) + require.False(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) } @@ -1875,7 +1879,7 @@ func TestIntentionCheck_defaultACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1911,7 +1915,7 @@ func TestIntentionCheck_defaultACLAllow(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" }) defer os.RemoveAll(dir1) @@ -1947,7 +1951,7 @@ func TestIntentionCheck_aclDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index 03b8a5520..14fbc4be4 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -6,7 +6,6 @@ import ( bexpr "github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/serf/serf" "github.com/hashicorp/consul/acl" @@ -73,18 +72,21 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest, if err != nil { return err } - reply.Index, reply.Dump = index, dump - if err := m.srv.filterACL(args.Token, reply); err != nil { - return err - } raw, err := filter.Execute(reply.Dump) if err != nil { return err } - reply.Dump = raw.(structs.NodeDump) + + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := m.srv.filterACL(args.Token, reply); err != nil { + return err + } + return nil }) } @@ -115,10 +117,6 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs. } reply.Nodes = nodes - if err := m.srv.filterACL(args.Token, &reply.Nodes); err != nil { - return err - } - // Get, store, and filter gateway services idx, gatewayServices, err := state.DumpGatewayServices(ws) if err != nil { @@ -131,16 +129,19 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs. } reply.Index = maxIdx - if err := m.srv.filterACL(args.Token, &reply.Gateways); err != nil { - return err - } - raw, err := filter.Execute(reply.Nodes) if err != nil { return err } - reply.Nodes = raw.(structs.CheckServiceNodes) + + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := m.srv.filterACL(args.Token, reply); err != nil { + return err + } + return nil }) } @@ -412,22 +413,13 @@ func (m *Internal) EventFire(args *structs.EventFireRequest, } // Set the query meta data - m.srv.setQueryMeta(&reply.QueryMeta) + m.srv.setQueryMeta(&reply.QueryMeta, args.Token) // Add the consul prefix to the event name eventName := userEventName(args.Name) // Fire the event on all LAN segments - segments := m.srv.LANSegments() - var errs error - for name, segment := range segments { - err := segment.UserEvent(eventName, args.Payload, false) - if err != nil { - err = fmt.Errorf("error broadcasting event to segment %q: %v", name, err) - errs = multierror.Append(errs, err) - } - } - return errs + return m.srv.LANSendUserEvent(eventName, args.Payload, false) } // KeyringOperation will query the WAN and LAN gossip keyrings of all nodes. @@ -492,14 +484,18 @@ func (m *Internal) KeyringOperation( func (m *Internal) executeKeyringOpLAN(args *structs.KeyringRequest) []*structs.KeyringResponse { responses := []*structs.KeyringResponse{} - segments := m.srv.LANSegments() - for name, segment := range segments { - mgr := segment.KeyManager() + _ = m.srv.DoWithLANSerfs(func(poolName, poolKind string, pool *serf.Serf) error { + mgr := pool.KeyManager() serfResp, err := m.executeKeyringOpMgr(mgr, args) resp := translateKeyResponseToKeyringResponse(serfResp, m.srv.config.Datacenter, err) - resp.Segment = name + if poolKind == PoolKindSegment { + resp.Segment = poolName + } else { + resp.Partition = poolName + } responses = append(responses, &resp) - } + return nil + }, nil) return responses } diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index d5a49525a..f9105304f 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -461,7 +461,7 @@ func TestInternal_NodeInfo_FilterACL(t *testing.T) { QueryOptions: structs.QueryOptions{Token: token}, } reply := structs.IndexedNodeDump{} - if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil { + if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeInfo", &opt, &reply); err != nil { t.Fatalf("err: %s", err) } for _, info := range reply.Dump { @@ -492,6 +492,10 @@ func TestInternal_NodeInfo_FilterACL(t *testing.T) { } } + if !reply.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } + // We've already proven that we call the ACL filtering function so we // test node filtering down in acl.go for node cases. This also proves // that we respect the version 8 ACL flag, since the test server sets @@ -515,7 +519,7 @@ func TestInternal_NodeDump_FilterACL(t *testing.T) { QueryOptions: structs.QueryOptions{Token: token}, } reply := structs.IndexedNodeDump{} - if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil { + if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &opt, &reply); err != nil { t.Fatalf("err: %s", err) } for _, info := range reply.Dump { @@ -546,6 +550,10 @@ func TestInternal_NodeDump_FilterACL(t *testing.T) { } } + if !reply.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } + // We've already proven that we call the ACL filtering function so we // test node filtering down in acl.go for node cases. This also proves // that we respect the version 8 ACL flag, since the test server sets @@ -562,7 +570,7 @@ func TestInternal_EventFire_Token(t *testing.T) { dir, srv := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDownPolicy = "deny" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) @@ -750,6 +758,217 @@ func TestInternal_ServiceDump_Kind(t *testing.T) { }) } +func TestInternal_ServiceDump_ACL(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + dir, s := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir) + defer s.Shutdown() + codec := rpcClient(t, s) + defer codec.Close() + + testrpc.WaitForLeader(t, s.RPC, "dc1") + + registrations := []*structs.RegisterRequest{ + // Service `redis` on `node1` + { + Datacenter: "dc1", + Node: "node1", + ID: types.NodeID("e0155642-135d-4739-9853-a1ee6c9f945b"), + Address: "192.18.1.1", + Service: &structs.NodeService{ + Kind: structs.ServiceKindTypical, + ID: "redis", + Service: "redis", + Port: 5678, + }, + Check: &structs.HealthCheck{ + Name: "redis check", + Status: api.HealthPassing, + ServiceID: "redis", + }, + }, + // Ingress gateway `igw` on `node2` + { + Datacenter: "dc1", + Node: "node2", + ID: types.NodeID("3a9d7530-20d4-443a-98d3-c10fe78f09f4"), + Address: "192.18.1.2", + Service: &structs.NodeService{ + Kind: structs.ServiceKindIngressGateway, + ID: "igw", + Service: "igw", + }, + Check: &structs.HealthCheck{ + Name: "igw check", + Status: api.HealthPassing, + ServiceID: "igw", + }, + }, + } + for _, reg := range registrations { + reg.Token = "root" + err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", reg, nil) + require.NoError(t, err) + } + + { + req := structs.ConfigEntryRequest{ + Datacenter: "dc1", + Entry: &structs.IngressGatewayConfigEntry{ + Kind: structs.IngressGateway, + Name: "igw", + Listeners: []structs.IngressListener{ + { + Port: 8765, + Protocol: "tcp", + Services: []structs.IngressService{ + {Name: "redis"}, + }, + }, + }, + }, + } + req.Token = "root" + + var out bool + err := msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out) + require.NoError(t, err) + } + + tokenWithRules := func(t *testing.T, rules string) string { + t.Helper() + tok, err := upsertTestTokenWithPolicyRules(codec, "root", "dc1", rules) + require.NoError(t, err) + return tok.SecretID + } + + t.Run("can read all", func(t *testing.T) { + require := require.New(t) + + token := tokenWithRules(t, ` + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } + `) + + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var out structs.IndexedNodesWithGateways + err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out) + require.NoError(err) + require.NotEmpty(out.Nodes) + require.NotEmpty(out.Gateways) + require.False(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("cannot read service node", func(t *testing.T) { + require := require.New(t) + + token := tokenWithRules(t, ` + node "node1" { + policy = "deny" + } + service "redis" { + policy = "read" + } + `) + + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var out structs.IndexedNodesWithGateways + err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out) + require.NoError(err) + require.Empty(out.Nodes) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("cannot read service", func(t *testing.T) { + require := require.New(t) + + token := tokenWithRules(t, ` + node "node1" { + policy = "read" + } + service "redis" { + policy = "deny" + } + `) + + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var out structs.IndexedNodesWithGateways + err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out) + require.NoError(err) + require.Empty(out.Nodes) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("cannot read gateway node", func(t *testing.T) { + require := require.New(t) + + token := tokenWithRules(t, ` + node "node2" { + policy = "deny" + } + service "mgw" { + policy = "read" + } + `) + + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var out structs.IndexedNodesWithGateways + err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out) + require.NoError(err) + require.Empty(out.Gateways) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("cannot read gateway", func(t *testing.T) { + require := require.New(t) + + token := tokenWithRules(t, ` + node "node2" { + policy = "read" + } + service "mgw" { + policy = "deny" + } + `) + + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var out structs.IndexedNodesWithGateways + err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out) + require.NoError(err) + require.Empty(out.Gateways) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) +} + func TestInternal_GatewayServiceDump_Terminating(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -963,7 +1182,7 @@ func TestInternal_GatewayServiceDump_Terminating_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1082,6 +1301,7 @@ func TestInternal_GatewayServiceDump_Terminating_ACL(t *testing.T) { require.Equal(t, nodes[0].Node.Node, "bar") require.Equal(t, nodes[0].Service.Service, "db") require.Equal(t, nodes[0].Checks[0].Status, api.HealthWarning) + require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") } func TestInternal_GatewayServiceDump_Ingress(t *testing.T) { @@ -1308,7 +1528,7 @@ func TestInternal_GatewayServiceDump_Ingress_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1728,6 +1948,7 @@ func TestInternal_ServiceTopology(t *testing.T) { var out structs.IndexedServiceTopology require.NoError(r, msgpackrpc.CallWithCodec(codec, "Internal.ServiceTopology", &args, &out)) require.False(r, out.FilteredByACLs) + require.False(r, out.QueryMeta.ResultsFilteredByACLs) require.Equal(r, "http", out.ServiceTopology.MetricsProtocol) // foo/api, foo/api-proxy @@ -1767,6 +1988,7 @@ func TestInternal_ServiceTopology(t *testing.T) { var out structs.IndexedServiceTopology require.NoError(r, msgpackrpc.CallWithCodec(codec, "Internal.ServiceTopology", &args, &out)) require.False(r, out.FilteredByACLs) + require.False(r, out.QueryMeta.ResultsFilteredByACLs) require.Equal(r, "http", out.ServiceTopology.MetricsProtocol) // edge/ingress @@ -1822,6 +2044,7 @@ func TestInternal_ServiceTopology(t *testing.T) { var out structs.IndexedServiceTopology require.NoError(r, msgpackrpc.CallWithCodec(codec, "Internal.ServiceTopology", &args, &out)) require.False(r, out.FilteredByACLs) + require.False(r, out.QueryMeta.ResultsFilteredByACLs) require.Equal(r, "http", out.ServiceTopology.MetricsProtocol) // foo/api, foo/api-proxy @@ -1875,6 +2098,7 @@ func TestInternal_ServiceTopology(t *testing.T) { var out structs.IndexedServiceTopology require.NoError(r, msgpackrpc.CallWithCodec(codec, "Internal.ServiceTopology", &args, &out)) require.False(r, out.FilteredByACLs) + require.False(r, out.QueryMeta.ResultsFilteredByACLs) require.Equal(r, "http", out.ServiceTopology.MetricsProtocol) require.Len(r, out.ServiceTopology.Upstreams, 0) @@ -1931,6 +2155,7 @@ func TestInternal_ServiceTopology_RoutingConfig(t *testing.T) { var out structs.IndexedServiceTopology require.NoError(r, msgpackrpc.CallWithCodec(codec, "Internal.ServiceTopology", &args, &out)) require.False(r, out.FilteredByACLs) + require.False(r, out.QueryMeta.ResultsFilteredByACLs) require.Equal(r, "http", out.ServiceTopology.MetricsProtocol) require.Empty(r, out.ServiceTopology.Downstreams) @@ -1967,7 +2192,7 @@ func TestInternal_ServiceTopology_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = TestDefaultMasterToken + c.ACLInitialManagementToken = TestDefaultMasterToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2010,6 +2235,7 @@ service "web" { policy = "read" } require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceTopology", &args, &out)) require.True(t, out.FilteredByACLs) + require.True(t, out.QueryMeta.ResultsFilteredByACLs) require.Equal(t, "http", out.ServiceTopology.MetricsProtocol) // The web-proxy upstream gets filtered out from both bar and baz @@ -2030,6 +2256,7 @@ service "web" { policy = "read" } require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceTopology", &args, &out)) require.True(t, out.FilteredByACLs) + require.True(t, out.QueryMeta.ResultsFilteredByACLs) require.Equal(t, "http", out.ServiceTopology.MetricsProtocol) // The redis upstream gets filtered out but the api and proxy downstream are returned @@ -2104,7 +2331,7 @@ func TestInternal_IntentionUpstreams_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = TestDefaultMasterToken + c.ACLInitialManagementToken = TestDefaultMasterToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/kvs_endpoint.go b/agent/consul/kvs_endpoint.go index 5f99b3667..2023ea1ea 100644 --- a/agent/consul/kvs_endpoint.go +++ b/agent/consul/kvs_endpoint.go @@ -204,7 +204,10 @@ func (k *KVS) List(args *structs.KeyRequest, reply *structs.IndexedDirEntries) e if err != nil { return err } + + total := len(ent) ent = FilterDirEnt(authz, ent) + reply.QueryMeta.ResultsFilteredByACLs = total != len(ent) if len(ent) == 0 { // Must provide non-zero index to prevent blocking @@ -263,7 +266,9 @@ func (k *KVS) ListKeys(args *structs.KeyListRequest, reply *structs.IndexedKeyLi reply.Index = index } + total := len(entries) entries = FilterDirEnt(authz, entries) + reply.QueryMeta.ResultsFilteredByACLs = total != len(entries) // Collect the keys from the filtered entries prefixLen := len(args.Prefix) diff --git a/agent/consul/kvs_endpoint_test.go b/agent/consul/kvs_endpoint_test.go index 9fa6e37fe..4723cfdb8 100644 --- a/agent/consul/kvs_endpoint_test.go +++ b/agent/consul/kvs_endpoint_test.go @@ -84,7 +84,7 @@ func TestKVS_Apply_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -189,7 +189,7 @@ func TestKVS_Get_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -287,6 +287,9 @@ func TestKVSEndpoint_List(t *testing.T) { t.Fatalf("bad: %v", d) } } + if dirent.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should not be true") + } // Try listing a nonexistent prefix getR.Key = "/nope" @@ -410,7 +413,7 @@ func TestKVSEndpoint_List_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -475,6 +478,9 @@ func TestKVSEndpoint_List_ACLDeny(t *testing.T) { } } } + if !dirent.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } } func TestKVSEndpoint_List_ACLEnableKeyListPolicy(t *testing.T) { @@ -486,7 +492,7 @@ func TestKVSEndpoint_List_ACLEnableKeyListPolicy(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.ACLEnableKeyListPolicy = true }) @@ -652,6 +658,9 @@ func TestKVSEndpoint_ListKeys(t *testing.T) { if dirent.Keys[2] != "/test/sub/" { t.Fatalf("Bad: %v", dirent.Keys) } + if dirent.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should not be true") + } // Try listing a nonexistent prefix getR.Prefix = "/nope" @@ -675,7 +684,7 @@ func TestKVSEndpoint_ListKeys_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -734,6 +743,9 @@ func TestKVSEndpoint_ListKeys_ACLDeny(t *testing.T) { if dirent.Keys[1] != "test" { t.Fatalf("Bad: %v", dirent.Keys) } + if !dirent.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } } func TestKVS_Apply_LockDelay(t *testing.T) { diff --git a/agent/consul/leader.go b/agent/consul/leader.go index c78ab4ed8..2861c6cbe 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -55,6 +55,8 @@ var ( // minCentralizedConfigVersion is the minimum Consul version in which centralized // config is supported minCentralizedConfigVersion = version.Must(version.NewVersion("1.5.0")) + + minVirtualIPVersion = version.Must(version.NewVersion("1.11.0")) ) // monitorLeadership is used to monitor if we acquire or lose our role @@ -134,13 +136,8 @@ func (s *Server) leaderLoop(stopCh chan struct{}) { // Fire a user event indicating a new leader payload := []byte(s.config.NodeName) - for name, segment := range s.LANSegments() { - if err := segment.UserEvent(newLeaderEvent, payload, false); err != nil { - s.logger.Warn("failed to broadcast new leader event on segment", - "segment", name, - "error", err, - ) - } + if err := s.LANSendUserEvent(newLeaderEvent, payload, false); err != nil { + s.logger.Warn("failed to broadcast new leader event", "error", err) } // Reconcile channel is only used once initial reconcile @@ -191,6 +188,10 @@ RECONCILE: s.logger.Error("failed to reconcile", "error", err) goto WAIT } + if err := s.setVirtualIPFlag(); err != nil { + s.logger.Error("failed to set virtual IP flag", "error", err) + goto WAIT + } // Initial reconcile worked, now we can process the channel // updates @@ -218,6 +219,7 @@ WAIT: goto RECONCILE case member := <-reconcileCh: s.reconcileMember(member) + s.setVirtualIPFlag() case index := <-s.tombstoneGC.ExpireCh(): go s.reapTombstones(index) case errCh := <-s.reassertLeaderCh: @@ -320,6 +322,10 @@ func (s *Server) establishLeadership(ctx context.Context) error { return err } + if err := s.setVirtualIPFlag(); err != nil { + return err + } + s.setConsistentReadReady() s.logger.Debug("successfully established leadership", "duration", time.Since(start)) @@ -425,28 +431,28 @@ func (s *Server) initializeACLs(ctx context.Context) error { s.logger.Info("Created ACL 'global-management' policy") } - // Check for configured master token. - if master := s.config.ACLMasterToken; len(master) > 0 { + // Check for configured initial management token. + if initialManagement := s.config.ACLInitialManagementToken; len(initialManagement) > 0 { state := s.fsm.State() - if _, err := uuid.ParseUUID(master); err != nil { - s.logger.Warn("Configuring a non-UUID master token is deprecated") + if _, err := uuid.ParseUUID(initialManagement); err != nil { + s.logger.Warn("Configuring a non-UUID initial management token is deprecated") } - _, token, err := state.ACLTokenGetBySecret(nil, master, nil) + _, token, err := state.ACLTokenGetBySecret(nil, initialManagement, nil) if err != nil { - return fmt.Errorf("failed to get master token: %v", err) + return fmt.Errorf("failed to get initial management token: %v", err) } // Ignoring expiration times to avoid an insertion collision. if token == nil { accessor, err := lib.GenerateUUID(s.checkTokenUUID) if err != nil { - return fmt.Errorf("failed to generate the accessor ID for the master token: %v", err) + return fmt.Errorf("failed to generate the accessor ID for the initial management token: %v", err) } token := structs.ACLToken{ AccessorID: accessor, - SecretID: master, - Description: "Master Token", + SecretID: initialManagement, + Description: "Initial Management Token", Policies: []structs.ACLTokenPolicyLink{ { ID: structs.ACLPolicyGlobalManagementID, @@ -466,12 +472,12 @@ func (s *Server) initializeACLs(ctx context.Context) error { ResetIndex: 0, } if _, err := s.raftApply(structs.ACLBootstrapRequestType, &req); err == nil { - s.logger.Info("Bootstrapped ACL master token from configuration") + s.logger.Info("Bootstrapped ACL initial management token from configuration") done = true } else { if err.Error() != structs.ACLBootstrapNotAllowedErr.Error() && err.Error() != structs.ACLBootstrapInvalidResetIndexErr.Error() { - return fmt.Errorf("failed to bootstrap master token: %v", err) + return fmt.Errorf("failed to bootstrap initial management token: %v", err) } } } @@ -483,10 +489,10 @@ func (s *Server) initializeACLs(ctx context.Context) error { CAS: false, } if _, err := s.raftApply(structs.ACLTokenSetRequestType, &req); err != nil { - return fmt.Errorf("failed to create master token: %v", err) + return fmt.Errorf("failed to create initial management token: %v", err) } - s.logger.Info("Created ACL master token from configuration") + s.logger.Info("Created ACL initial management token from configuration") } } } @@ -886,6 +892,25 @@ func (s *Server) bootstrapConfigEntries(entries []structs.ConfigEntry) error { return nil } +func (s *Server) setVirtualIPFlag() error { + // Return early if the flag is already set. + val, err := s.getSystemMetadata(structs.SystemMetadataVirtualIPsEnabled) + if err != nil { + return err + } + if val != "" { + return nil + } + + if ok, _ := ServersInDCMeetMinimumVersion(s, s.config.Datacenter, minVirtualIPVersion); !ok { + s.logger.Warn(fmt.Sprintf("can't allocate Virtual IPs until all servers >= %s", + minVirtualIPVersion.String())) + return nil + } + + return s.setSystemMetadataKey(structs.SystemMetadataVirtualIPsEnabled, "true") +} + // reconcileReaped is used to reconcile nodes that have failed and been reaped // from Serf but remain in the catalog. This is done by looking for unknown nodes with serfHealth checks registered. // We generate a "reap" event to cause the node to be cleaned up. @@ -1001,6 +1026,7 @@ func (s *Server) reconcileMember(member serf.Member) error { return nil } } + return nil } diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index 03b2acbc9..dc712a9e5 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -38,13 +38,15 @@ const ( // easier testing. type caServerDelegate interface { ca.ConsulProviderStateDelegate + + State() *state.Store IsLeader() bool ApplyCALeafRequest() (uint64, error) forwardDC(method, dc string, args interface{}, reply interface{}) error generateCASignRequest(csr string) *structs.CASignRequest - checkServersProvider + ServersSupportMultiDCConnectCA() error } // CAManager is a wrapper around CA operations such as updating roots, an intermediate @@ -68,10 +70,9 @@ type CAManager struct { providerRoot *structs.CARoot // stateLock protects the internal state used for administrative CA tasks. - stateLock sync.Mutex - state caState - primaryRoots structs.IndexedCARoots // The most recently seen state of the root CAs from the primary datacenter. - actingSecondaryCA bool // True if this datacenter has been initialized as a secondary CA. + stateLock sync.Mutex + state caState + primaryRoots structs.IndexedCARoots // The most recently seen state of the root CAs from the primary datacenter. leaderRoutineManager *routine.Manager // providerShim is used to test CAManager with a fake provider. @@ -127,6 +128,22 @@ func (c *caDelegateWithState) generateCASignRequest(csr string) *structs.CASignR } } +func (c *caDelegateWithState) ServersSupportMultiDCConnectCA() error { + versionOk, primaryFound := ServersInDCMeetMinimumVersion(c.Server, c.Server.config.PrimaryDatacenter, minMultiDCConnectVersion) + if !primaryFound { + return fmt.Errorf("primary datacenter is unreachable") + } + if !versionOk { + return fmt.Errorf("all servers in the primary datacenter are not at the minimum version %v", minMultiDCConnectVersion) + } + return nil +} + +func (c *caDelegateWithState) ProviderState(id string) (*structs.CAConsulProviderState, error) { + _, s, err := c.fsm.State().CAProviderState(id) + return s, err +} + func NewCAManager(delegate caServerDelegate, leaderRoutineManager *routine.Manager, logger hclog.Logger, config *Config) *CAManager { return &CAManager{ delegate: delegate, @@ -173,19 +190,15 @@ func (e *caStateError) Error() string { } // secondarySetPrimaryRoots updates the most recently seen roots from the primary. -func (c *CAManager) secondarySetPrimaryRoots(newRoots structs.IndexedCARoots) error { +func (c *CAManager) secondarySetPrimaryRoots(newRoots structs.IndexedCARoots) { + // TODO: this could be a different lock, as long as its the same lock in secondaryGetPrimaryRoots c.stateLock.Lock() defer c.stateLock.Unlock() - - if c.state == caStateInitializing || c.state == caStateReconfig { - c.primaryRoots = newRoots - } else { - return fmt.Errorf("Cannot update primary roots in state %q", c.state) - } - return nil + c.primaryRoots = newRoots } func (c *CAManager) secondaryGetPrimaryRoots() structs.IndexedCARoots { + // TODO: this could be a different lock, as long as its the same lock in secondarySetPrimaryRoots c.stateLock.Lock() defer c.stateLock.Unlock() return c.primaryRoots @@ -202,7 +215,8 @@ func (c *CAManager) initializeCAConfig() (*structs.CAConfiguration, error) { } if config == nil { config = c.serverConf.CAConfig - if config.ClusterID == "" { + + if c.serverConf.Datacenter == c.serverConf.PrimaryDatacenter && config.ClusterID == "" { id, err := uuid.GenerateUUID() if err != nil { return nil, err @@ -226,12 +240,9 @@ func (c *CAManager) initializeCAConfig() (*structs.CAConfiguration, error) { Op: structs.CAOpSetConfig, Config: config, } - if resp, err := c.delegate.ApplyCARequest(&req); err != nil { + if _, err := c.delegate.ApplyCARequest(&req); err != nil { return nil, err - } else if respErr, ok := resp.(error); ok { - return nil, respErr } - return config, nil } @@ -300,7 +311,7 @@ func (c *CAManager) Start(ctx context.Context) { // Attempt to initialize the Connect CA now. This will // happen during leader establishment and it would be great // if the CA was ready to go once that process was finished. - if err := c.InitializeCA(); err != nil { + if err := c.Initialize(); err != nil { c.logger.Error("Failed to initialize Connect CA", "error", err) // we failed to fully initialize the CA so we need to spawn a @@ -327,7 +338,6 @@ func (c *CAManager) Stop() { c.setState(caStateUninitialized, false) c.primaryRoots = structs.IndexedCARoots{} - c.actingSecondaryCA = false c.setCAProvider(nil, nil) } @@ -341,7 +351,7 @@ func (c *CAManager) startPostInitializeRoutines(ctx context.Context) { } func (c *CAManager) backgroundCAInitialization(ctx context.Context) error { - retryLoopBackoffAbortOnSuccess(ctx, c.InitializeCA, func(err error) { + retryLoopBackoffAbortOnSuccess(ctx, c.Initialize, func(err error) { c.logger.Error("Failed to initialize Connect CA", "routine", backgroundCAInitializationRoutineName, "error", err, @@ -358,10 +368,10 @@ func (c *CAManager) backgroundCAInitialization(ctx context.Context) error { return nil } -// InitializeCA sets up the CA provider when gaining leadership, either bootstrapping +// Initialize sets up the CA provider when gaining leadership, either bootstrapping // the CA if this is the primary DC or making a remote RPC for intermediate signing // if this is a secondary DC. -func (c *CAManager) InitializeCA() (reterr error) { +func (c *CAManager) Initialize() (reterr error) { // Bail if connect isn't enabled. if !c.serverConf.ConnectEnabled { return nil @@ -408,18 +418,8 @@ func (c *CAManager) InitializeCA() (reterr error) { } func (c *CAManager) secondaryInitialize(provider ca.Provider, conf *structs.CAConfiguration) error { - // If this isn't the primary DC, run the secondary DC routine if the primary has already been upgraded to at least 1.6.0 - versionOk, foundPrimary := ServersInDCMeetMinimumVersion(c.delegate, c.serverConf.PrimaryDatacenter, minMultiDCConnectVersion) - if !foundPrimary { - c.logger.Warn("primary datacenter is configured but unreachable - deferring initialization of the secondary datacenter CA") - // return nil because we will initialize the secondary CA later - return nil - } else if !versionOk { - // return nil because we will initialize the secondary CA later - c.logger.Warn("servers in the primary datacenter are not at least at the minimum version - deferring initialization of the secondary datacenter CA", - "min_version", minMultiDCConnectVersion.String(), - ) - return nil + if err := c.delegate.ServersSupportMultiDCConnectCA(); err != nil { + return fmt.Errorf("initialization will be deferred: %w", err) } // Get the root CA to see if we need to refresh our intermediate. @@ -430,9 +430,7 @@ func (c *CAManager) secondaryInitialize(provider ca.Provider, conf *structs.CACo if err := c.delegate.forwardDC("ConnectCA.Roots", c.serverConf.PrimaryDatacenter, &args, &roots); err != nil { return err } - if err := c.secondarySetPrimaryRoots(roots); err != nil { - return err - } + c.secondarySetPrimaryRoots(roots) // Configure the CA provider and initialize the intermediate certificate if necessary. if err := c.secondaryInitializeProvider(provider, roots); err != nil { @@ -519,12 +517,26 @@ func (c *CAManager) primaryInitialize(provider ca.Provider, conf *structs.CAConf } } + var rootUpdateRequired bool + // Versions prior to 1.9.3, 1.8.8, and 1.7.12 incorrectly used the primary // rootCA's subjectKeyID here instead of the intermediate. For // provider=consul this didn't matter since there are no intermediates in // the primaryDC, but for vault it does matter. expectedSigningKeyID := connect.EncodeSigningKeyID(intermediateCert.SubjectKeyId) - needsSigningKeyUpdate := (rootCA.SigningKeyID != expectedSigningKeyID) + if rootCA.SigningKeyID != expectedSigningKeyID { + c.logger.Info("Correcting stored CARoot values", + "previous-signing-key", rootCA.SigningKeyID, "updated-signing-key", expectedSigningKeyID) + rootCA.SigningKeyID = expectedSigningKeyID + rootUpdateRequired = true + } + + // Add the local leaf signing cert to the rootCA struct. This handles both + // upgrades of existing state, and new rootCA. + if c.getLeafSigningCertFromRoot(rootCA) != interPEM { + rootCA.IntermediateCerts = append(rootCA.IntermediateCerts, interPEM) + rootUpdateRequired = true + } // Check if the CA root is already initialized and exit if it is, // adding on any existing intermediate certs since they aren't directly @@ -536,26 +548,21 @@ func (c *CAManager) primaryInitialize(provider ca.Provider, conf *structs.CAConf if err != nil { return err } - if activeRoot != nil && needsSigningKeyUpdate { - c.logger.Info("Correcting stored SigningKeyID value", "previous", rootCA.SigningKeyID, "updated", expectedSigningKeyID) - - } else if activeRoot != nil && !needsSigningKeyUpdate { + if activeRoot != nil && !rootUpdateRequired { // This state shouldn't be possible to get into because we update the root and // CA config in the same FSM operation. if activeRoot.ID != rootCA.ID { return fmt.Errorf("stored CA root %q is not the active root (%s)", rootCA.ID, activeRoot.ID) } + // TODO: why doesn't this c.setCAProvider(provider, activeRoot) ? rootCA.IntermediateCerts = activeRoot.IntermediateCerts c.setCAProvider(provider, rootCA) + c.logger.Info("initialized primary datacenter CA from existing CARoot with provider", "provider", conf.Provider) return nil } - if needsSigningKeyUpdate { - rootCA.SigningKeyID = expectedSigningKeyID - } - // Get the highest index idx, _, err := state.CARoots(nil) if err != nil { @@ -563,17 +570,13 @@ func (c *CAManager) primaryInitialize(provider ca.Provider, conf *structs.CAConf } // Store the root cert in raft - resp, err := c.delegate.ApplyCARequest(&structs.CARequest{ + _, err = c.delegate.ApplyCARequest(&structs.CARequest{ Op: structs.CAOpSetRoots, Index: idx, Roots: []*structs.CARoot{rootCA}, }) if err != nil { - c.logger.Error("Raft apply failed", "error", err) - return err - } - if respErr, ok := resp.(error); ok { - return respErr + return fmt.Errorf("raft apply failed: %w", err) } c.setCAProvider(provider, rootCA) @@ -583,6 +586,22 @@ func (c *CAManager) primaryInitialize(provider ca.Provider, conf *structs.CAConf return nil } +// getLeafSigningCertFromRoot returns the PEM encoded certificate that should be used to +// sign leaf certificates in the local datacenter. The SubjectKeyId of the +// returned cert should always match the SigningKeyID of the CARoot. +// +// TODO: fix the data model so that we don't need this complicated lookup to +// find the leaf signing cert. See github.com/hashicorp/consul/issues/11347. +func (c *CAManager) getLeafSigningCertFromRoot(root *structs.CARoot) string { + if !c.isIntermediateUsedToSignLeaf() { + return root.RootCert + } + if len(root.IntermediateCerts) == 0 { + return "" + } + return root.IntermediateCerts[len(root.IntermediateCerts)-1] +} + // secondaryInitializeIntermediateCA runs the routine for generating an intermediate CA CSR and getting // it signed by the primary DC if the root CA of the primary DC has changed since the last // intermediate. It should only be called while the state lock is held by setting the state @@ -775,9 +794,6 @@ func (c *CAManager) persistNewRootAndConfig(provider ca.Provider, newActiveRoot if err != nil { return err } - if respErr, ok := resp.(error); ok { - return respErr - } if respOk, ok := resp.(bool); ok && !respOk { return fmt.Errorf("could not atomically update roots and config") } @@ -804,7 +820,7 @@ func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error) } }() - // Attempt to initialize the config if we failed to do so in InitializeCA for some reason + // Attempt to initialize the config if we failed to do so in Initialize for some reason _, err = c.initializeCAConfig() if err != nil { return err @@ -916,13 +932,10 @@ func (c *CAManager) primaryUpdateRootCA(newProvider ca.Provider, args *structs.C // If the root didn't change, just update the config and return. if root != nil && root.ID == newActiveRoot.ID { args.Op = structs.CAOpSetConfig - resp, err := c.delegate.ApplyCARequest(args) + _, err := c.delegate.ApplyCARequest(args) if err != nil { return err } - if respErr, ok := resp.(error); ok { - return respErr - } // If the config has been committed, update the local provider instance c.setCAProvider(newProvider, newActiveRoot) @@ -992,7 +1005,9 @@ func (c *CAManager) primaryUpdateRootCA(newProvider ca.Provider, args *structs.C return err } if intermediate != newRootPEM { - newActiveRoot.IntermediateCerts = append(newActiveRoot.IntermediateCerts, intermediate) + if err := setLeafSigningCert(newActiveRoot, intermediate); err != nil { + return err + } } // Update the roots and CA config in the state store at the same time @@ -1020,9 +1035,6 @@ func (c *CAManager) primaryUpdateRootCA(newProvider ca.Provider, args *structs.C if err != nil { return err } - if respErr, ok := resp.(error); ok { - return respErr - } if respOk, ok := resp.(bool); ok && !respOk { return fmt.Errorf("could not atomically update roots and config") } @@ -1050,16 +1062,10 @@ func (c *CAManager) primaryRenewIntermediate(provider ca.Provider, newActiveRoot return fmt.Errorf("error generating new intermediate cert: %v", err) } - intermediateCert, err := connect.ParseCert(intermediatePEM) - if err != nil { - return fmt.Errorf("error parsing intermediate cert: %v", err) + if err := setLeafSigningCert(newActiveRoot, intermediatePEM); err != nil { + return err } - // Append the new intermediate to our local active root entry. This is - // where the root representations start to diverge. - newActiveRoot.IntermediateCerts = append(newActiveRoot.IntermediateCerts, intermediatePEM) - newActiveRoot.SigningKeyID = connect.EncodeSigningKeyID(intermediateCert.SubjectKeyId) - c.logger.Info("generated new intermediate certificate for primary datacenter") return nil } @@ -1083,20 +1089,28 @@ func (c *CAManager) secondaryRenewIntermediate(provider ca.Provider, newActiveRo return fmt.Errorf("Failed to set the intermediate certificate with the CA provider: %v", err) } - intermediateCert, err := connect.ParseCert(intermediatePEM) - if err != nil { - return fmt.Errorf("error parsing intermediate cert: %v", err) + if err := setLeafSigningCert(newActiveRoot, intermediatePEM); err != nil { + return err } - // Append the new intermediate to our local active root entry. This is - // where the root representations start to diverge. - newActiveRoot.IntermediateCerts = append(newActiveRoot.IntermediateCerts, intermediatePEM) - newActiveRoot.SigningKeyID = connect.EncodeSigningKeyID(intermediateCert.SubjectKeyId) - c.logger.Info("received new intermediate certificate from primary datacenter") return nil } +// setLeafSigningCert updates the CARoot by appending the pem to the list of +// intermediate certificates, and setting the SigningKeyID to the encoded +// SubjectKeyId of the certificate. +func setLeafSigningCert(caRoot *structs.CARoot, pem string) error { + cert, err := connect.ParseCert(pem) + if err != nil { + return fmt.Errorf("error parsing leaf signing cert: %w", err) + } + + caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem) + caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId) + return nil +} + // intermediateCertRenewalWatch periodically attempts to renew the intermediate cert. func (c *CAManager) intermediateCertRenewalWatch(ctx context.Context) error { isPrimary := c.serverConf.Datacenter == c.serverConf.PrimaryDatacenter @@ -1135,7 +1149,7 @@ func (c *CAManager) RenewIntermediate(ctx context.Context, isPrimary bool) error return nil } // If this isn't the primary, make sure the CA has been initialized. - if !isPrimary && !c.secondaryIsCAConfigured() { + if !isPrimary && !c.secondaryHasProviderRoots() { return fmt.Errorf("secondary CA is not yet configured.") } @@ -1148,10 +1162,8 @@ func (c *CAManager) RenewIntermediate(ctx context.Context, isPrimary bool) error // If this is the primary, check if this is a provider that uses an intermediate cert. If // it isn't, we don't need to check for a renewal. - if isPrimary { - if _, ok := provider.(ca.PrimaryUsesIntermediate); !ok { - return nil - } + if isPrimary && !primaryUsesIntermediate(provider) { + return nil } activeIntermediate, err := provider.ActiveIntermediate() @@ -1254,38 +1266,35 @@ func (c *CAManager) secondaryUpdateRoots(roots structs.IndexedCARoots) error { defer c.setState(caStateInitialized, false) // Update the cached primary roots now that the lock is held. - if err := c.secondarySetPrimaryRoots(roots); err != nil { - return err - } + c.secondarySetPrimaryRoots(roots) - // Check to see if the primary has been upgraded in case we're waiting to switch to - // secondary mode. provider, _ := c.getCAProvider() if provider == nil { // this happens when leadership is being revoked and this go routine will be stopped return nil } - if !c.secondaryIsCAConfigured() { - versionOk, primaryFound := ServersInDCMeetMinimumVersion(c.delegate, c.serverConf.PrimaryDatacenter, minMultiDCConnectVersion) - if !primaryFound { - return fmt.Errorf("Primary datacenter is unreachable - deferring secondary CA initialization") - } - - if versionOk { - if err := c.secondaryInitializeProvider(provider, roots); err != nil { - return fmt.Errorf("Failed to initialize secondary CA provider: %v", err) - } - } - } // Run the secondary CA init routine to see if we need to request a new // intermediate. - if c.secondaryIsCAConfigured() { + if c.secondaryHasProviderRoots() { if err := c.secondaryInitializeIntermediateCA(provider, nil); err != nil { return fmt.Errorf("Failed to initialize the secondary CA: %v", err) } + return nil } + // Attempt to initialize now that we have updated roots. This is an optimization + // so that we don't have to wait for the Initialize retry backoff if we were + // waiting on roots from the primary to be able to complete initialization. + if err := c.delegate.ServersSupportMultiDCConnectCA(); err != nil { + return fmt.Errorf("failed to initialize while updating primary roots: %w", err) + } + if err := c.secondaryInitializeProvider(provider, roots); err != nil { + return fmt.Errorf("Failed to initialize secondary CA provider: %v", err) + } + if err := c.secondaryInitializeIntermediateCA(provider, nil); err != nil { + return fmt.Errorf("Failed to initialize the secondary CA: %v", err) + } return nil } @@ -1311,29 +1320,17 @@ func (c *CAManager) secondaryInitializeProvider(provider ca.Provider, roots stru if err := provider.Configure(pCfg); err != nil { return fmt.Errorf("error configuring provider: %v", err) } - - return c.secondarySetCAConfigured() -} - -// secondarySetCAConfigured sets the flag for acting as a secondary CA to true. -func (c *CAManager) secondarySetCAConfigured() error { - c.stateLock.Lock() - defer c.stateLock.Unlock() - - if c.state == caStateInitializing || c.state == caStateReconfig { - c.actingSecondaryCA = true - } else { - return fmt.Errorf("Cannot update secondary CA flag in state %q", c.state) - } - return nil } -// secondaryIsCAConfigured returns true if we have been initialized as a secondary datacenter's CA. -func (c *CAManager) secondaryIsCAConfigured() bool { - c.stateLock.Lock() - defer c.stateLock.Unlock() - return c.actingSecondaryCA +// secondaryHasProviderRoots returns true after providerRoot has been set. This +// method is used to detect when the secondary has received the roots from the +// primary DC. +func (c *CAManager) secondaryHasProviderRoots() bool { + // TODO: this could potentially also use primaryRoots instead of providerRoot + c.providerLock.Lock() + defer c.providerLock.Unlock() + return c.providerRoot != nil } type connectSignRateLimiter struct { @@ -1403,7 +1400,7 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne if err != nil { return nil, err } - signingID := connect.SpiffeIDSigningForCluster(config) + signingID := connect.SpiffeIDSigningForCluster(config.ClusterID) serviceID, isService := spiffeID.(*connect.SpiffeIDService) agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent) if !isService && !isAgent { @@ -1550,3 +1547,16 @@ func (c *CAManager) checkExpired(pem string) error { } return nil } + +func primaryUsesIntermediate(provider ca.Provider) bool { + _, ok := provider.(ca.PrimaryUsesIntermediate) + return ok +} + +func (c *CAManager) isIntermediateUsedToSignLeaf() bool { + if c.serverConf.Datacenter != c.serverConf.PrimaryDatacenter { + return true + } + provider, _ := c.getCAProvider() + return primaryUsesIntermediate(provider) +} diff --git a/agent/consul/leader_connect_ca_test.go b/agent/consul/leader_connect_ca_test.go index c69373e51..846144460 100644 --- a/agent/consul/leader_connect_ca_test.go +++ b/agent/consul/leader_connect_ca_test.go @@ -14,18 +14,18 @@ import ( "testing" "time" - "github.com/hashicorp/go-version" - "github.com/hashicorp/serf/serf" + "github.com/hashicorp/consul/testrpc" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/connect" ca "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/consul/state" - "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" ) // TODO(kyhavlov): replace with t.Deadline() @@ -56,16 +56,17 @@ func (m *mockCAServerDelegate) State() *state.Store { return m.store } +func (m *mockCAServerDelegate) ProviderState(id string) (*structs.CAConsulProviderState, error) { + _, s, err := m.store.CAProviderState(id) + return s, err +} + func (m *mockCAServerDelegate) IsLeader() bool { return true } -func (m *mockCAServerDelegate) CheckServers(datacenter string, fn func(*metadata.Server) bool) { - ver, _ := version.NewVersion("1.6.0") - fn(&metadata.Server{ - Status: serf.StatusAlive, - Build: *ver, - }) +func (m *mockCAServerDelegate) ServersSupportMultiDCConnectCA() error { + return nil } func (m *mockCAServerDelegate) ApplyCALeafRequest() (uint64, error) { @@ -223,7 +224,7 @@ func initTestManager(t *testing.T, manager *CAManager, delegate *mockCAServerDel t.Helper() initCh := make(chan struct{}) go func() { - require.NoError(t, manager.InitializeCA()) + require.NoError(t, manager.Initialize()) close(initCh) }() for i := 0; i < 5; i++ { @@ -253,12 +254,12 @@ func TestCAManager_Initialize(t *testing.T) { rootPEM: delegate.primaryRoot.RootCert, } - // Call InitializeCA and then confirm the RPCs and provider calls + // Call Initialize and then confirm the RPCs and provider calls // happen in the expected order. require.Equal(t, caStateUninitialized, manager.state) errCh := make(chan error) go func() { - err := manager.InitializeCA() + err := manager.Initialize() assert.NoError(t, err) errCh <- err }() @@ -271,7 +272,7 @@ func TestCAManager_Initialize(t *testing.T) { waitForCh(t, delegate.callbackCh, "raftApply/ConnectCA") waitForEmptyCh(t, delegate.callbackCh) - // Make sure the InitializeCA call returned successfully. + // Make sure the Initialize call returned successfully. select { case err := <-errCh: require.NoError(t, err) @@ -464,3 +465,89 @@ func TestCADelegateWithState_GenerateCASignRequest(t *testing.T) { req := d.generateCASignRequest("A") require.Equal(t, "east", req.RequestDatacenter()) } + +func TestCAManager_Initialize_Logging(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + _, conf1 := testServerConfig(t) + + // Setup dummy logger to catch output + var buf bytes.Buffer + logger := testutil.LoggerWithOutput(t, &buf) + + deps := newDefaultDeps(t, conf1) + deps.Logger = logger + + s1, err := NewServer(conf1, deps) + require.NoError(t, err) + defer s1.Shutdown() + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Wait til CA root is setup + retry.Run(t, func(r *retry.R) { + var out structs.IndexedCARoots + r.Check(s1.RPC("ConnectCA.Roots", structs.DCSpecificRequest{ + Datacenter: conf1.Datacenter, + }, &out)) + }) + + require.Contains(t, buf.String(), "consul CA provider configured") +} + +func TestCAManager_UpdateConfiguration_Vault_Primary(t *testing.T) { + ca.SkipIfVaultNotPresent(t) + vault := ca.NewTestVaultServer(t) + + _, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.CAConfig = &structs.CAConfiguration{ + Provider: "vault", + Config: map[string]interface{}{ + "Address": vault.Addr, + "Token": vault.RootToken, + "RootPKIPath": "pki-root/", + "IntermediatePKIPath": "pki-intermediate/", + }, + } + }) + defer func() { + s1.Shutdown() + s1.leaderRoutineManager.Wait() + }() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + _, origRoot, err := s1.fsm.State().CARootActive(nil) + require.NoError(t, err) + require.Len(t, origRoot.IntermediateCerts, 1) + + cert, err := connect.ParseCert(s1.caManager.getLeafSigningCertFromRoot(origRoot)) + require.NoError(t, err) + require.Equal(t, connect.HexString(cert.SubjectKeyId), origRoot.SigningKeyID) + + err = s1.caManager.UpdateConfiguration(&structs.CARequest{ + Config: &structs.CAConfiguration{ + Provider: "vault", + Config: map[string]interface{}{ + "Address": vault.Addr, + "Token": vault.RootToken, + "RootPKIPath": "pki-root-2/", + "IntermediatePKIPath": "pki-intermediate-2/", + }, + }, + }) + require.NoError(t, err) + + _, newRoot, err := s1.fsm.State().CARootActive(nil) + require.NoError(t, err) + require.Len(t, newRoot.IntermediateCerts, 2, + "expected one cross-sign cert and one local leaf sign cert") + require.NotEqual(t, origRoot.ID, newRoot.ID) + + cert, err = connect.ParseCert(s1.caManager.getLeafSigningCertFromRoot(newRoot)) + require.NoError(t, err) + require.Equal(t, connect.HexString(cert.SubjectKeyId), newRoot.SigningKeyID) +} diff --git a/agent/consul/leader_connect_test.go b/agent/consul/leader_connect_test.go index 0151d068c..abaa45b6b 100644 --- a/agent/consul/leader_connect_test.go +++ b/agent/consul/leader_connect_test.go @@ -24,7 +24,7 @@ import ( "github.com/hashicorp/consul/testrpc" ) -func TestLeader_Builtin_PrimaryCA_ChangeKeyConfig(t *testing.T) { +func TestConnectCA_ConfigurationSet_ChangeKeyConfig_Primary(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -175,7 +175,7 @@ func TestLeader_Builtin_PrimaryCA_ChangeKeyConfig(t *testing.T) { } -func TestLeader_SecondaryCA_Initialize(t *testing.T) { +func TestCAManager_Initialize_Secondary(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -204,7 +204,7 @@ func TestLeader_SecondaryCA_Initialize(t *testing.T) { c.PrimaryDatacenter = "primary" c.Build = "1.6.0" c.ACLsEnabled = true - c.ACLMasterToken = masterToken + c.ACLInitialManagementToken = masterToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.CAConfig.Config["PrivateKeyType"] = tc.keyType c.CAConfig.Config["PrivateKeyBits"] = tc.keyBits @@ -330,7 +330,7 @@ func getCAProviderWithLock(s *Server) (ca.Provider, *structs.CARoot) { return s.caManager.getCAProvider() } -func TestLeader_Vault_PrimaryCA_IntermediateRenew(t *testing.T) { +func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -396,23 +396,34 @@ func TestLeader_Vault_PrimaryCA_IntermediateRenew(t *testing.T) { provider, _ := getCAProviderWithLock(s1) intermediatePEM, err := provider.ActiveIntermediate() require.NoError(err) - _, err = connect.ParseCert(intermediatePEM) + intermediateCert, err := connect.ParseCert(intermediatePEM) require.NoError(err) + // Check that the state store has the correct intermediate + store := s1.caManager.delegate.State() + _, activeRoot, err := store.CARootActive(nil) + require.NoError(err) + require.Equal(intermediatePEM, s1.caManager.getLeafSigningCertFromRoot(activeRoot)) + require.Equal(connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID) + // Wait for dc1's intermediate to be refreshed. // It is possible that test fails when the blocking query doesn't return. retry.Run(t, func(r *retry.R) { provider, _ = getCAProviderWithLock(s1) newIntermediatePEM, err := provider.ActiveIntermediate() r.Check(err) - _, err = connect.ParseCert(intermediatePEM) - r.Check(err) if newIntermediatePEM == intermediatePEM { r.Fatal("not a renewed intermediate") } + intermediateCert, err = connect.ParseCert(newIntermediatePEM) + r.Check(err) intermediatePEM = newIntermediatePEM }) + + _, activeRoot, err = store.CARootActive(nil) require.NoError(err) + require.Equal(intermediatePEM, s1.caManager.getLeafSigningCertFromRoot(activeRoot)) + require.Equal(connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID) // Get the root from dc1 and validate a chain of: // dc1 leaf -> dc1 intermediate -> dc1 root @@ -439,6 +450,8 @@ func TestLeader_Vault_PrimaryCA_IntermediateRenew(t *testing.T) { // Check that the leaf signed by the new intermediate can be verified using the // returned cert chain (signed intermediate + remote root). intermediatePool := x509.NewCertPool() + // TODO: do not explicitly add the intermediatePEM, we should have it available + // from leafPEM. Use connect.ParseLeafCerts to do the right thing. intermediatePool.AppendCertsFromPEM([]byte(intermediatePEM)) rootPool := x509.NewCertPool() rootPool.AppendCertsFromPEM([]byte(caRoot.RootCert)) @@ -450,7 +463,7 @@ func TestLeader_Vault_PrimaryCA_IntermediateRenew(t *testing.T) { require.NoError(err) } -func TestLeader_SecondaryCA_IntermediateRenew(t *testing.T) { +func TestCAManager_RenewIntermediate_Secondary(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -515,10 +528,10 @@ func TestLeader_SecondaryCA_IntermediateRenew(t *testing.T) { secondaryProvider, _ := getCAProviderWithLock(s2) intermediatePEM, err := secondaryProvider.ActiveIntermediate() require.NoError(err) - cert, err := connect.ParseCert(intermediatePEM) + intermediateCert, err := connect.ParseCert(intermediatePEM) require.NoError(err) - currentCertSerialNumber := cert.SerialNumber - currentCertAuthorityKeyId := cert.AuthorityKeyId + currentCertSerialNumber := intermediateCert.SerialNumber + currentCertAuthorityKeyId := intermediateCert.AuthorityKeyId // Capture the current root var originalRoot *structs.CARoot @@ -532,6 +545,12 @@ func TestLeader_SecondaryCA_IntermediateRenew(t *testing.T) { waitForActiveCARoot(t, s1, originalRoot) waitForActiveCARoot(t, s2, originalRoot) + store := s2.fsm.State() + _, activeRoot, err := store.CARootActive(nil) + require.NoError(err) + require.Equal(intermediatePEM, s2.caManager.getLeafSigningCertFromRoot(activeRoot)) + require.Equal(connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID) + // Wait for dc2's intermediate to be refreshed. // It is possible that test fails when the blocking query doesn't return. // When https://github.com/hashicorp/consul/pull/3777 is merged @@ -548,8 +567,13 @@ func TestLeader_SecondaryCA_IntermediateRenew(t *testing.T) { currentCertAuthorityKeyId = cert.AuthorityKeyId r.Fatal("not a renewed intermediate") } + intermediateCert = cert }) + + _, activeRoot, err = store.CARootActive(nil) require.NoError(err) + require.Equal(intermediatePEM, s2.caManager.getLeafSigningCertFromRoot(activeRoot)) + require.Equal(connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID) // Get the root from dc1 and validate a chain of: // dc2 leaf -> dc2 intermediate -> dc1 root @@ -570,24 +594,26 @@ func TestLeader_SecondaryCA_IntermediateRenew(t *testing.T) { leafPEM, err := secondaryProvider.Sign(leafCsr) require.NoError(err) - cert, err = connect.ParseCert(leafPEM) + intermediateCert, err = connect.ParseCert(leafPEM) require.NoError(err) // Check that the leaf signed by the new intermediate can be verified using the // returned cert chain (signed intermediate + remote root). intermediatePool := x509.NewCertPool() + // TODO: do not explicitly add the intermediatePEM, we should have it available + // from leafPEM. Use connect.ParseLeafCerts to do the right thing. intermediatePool.AppendCertsFromPEM([]byte(intermediatePEM)) rootPool := x509.NewCertPool() rootPool.AppendCertsFromPEM([]byte(caRoot.RootCert)) - _, err = cert.Verify(x509.VerifyOptions{ + _, err = intermediateCert.Verify(x509.VerifyOptions{ Intermediates: intermediatePool, Roots: rootPool, }) require.NoError(err) } -func TestLeader_SecondaryCA_IntermediateRefresh(t *testing.T) { +func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -738,7 +764,7 @@ func TestLeader_SecondaryCA_IntermediateRefresh(t *testing.T) { require.NoError(err) } -func TestLeader_Vault_PrimaryCA_FixSigningKeyID_OnRestart(t *testing.T) { +func TestCAManager_Initialize_Vault_FixesSigningKeyID_Primary(t *testing.T) { ca.SkipIfVaultNotPresent(t) if testing.Short() { @@ -840,7 +866,7 @@ func TestLeader_Vault_PrimaryCA_FixSigningKeyID_OnRestart(t *testing.T) { }) } -func TestLeader_SecondaryCA_FixSigningKeyID_via_IntermediateRefresh(t *testing.T) { +func TestCAManager_Initialize_FixesSigningKeyID_Secondary(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -941,7 +967,7 @@ func TestLeader_SecondaryCA_FixSigningKeyID_via_IntermediateRefresh(t *testing.T }) } -func TestLeader_SecondaryCA_TransitionFromPrimary(t *testing.T) { +func TestCAManager_Initialize_TransitionFromPrimaryToSecondary(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -1033,7 +1059,7 @@ func TestLeader_SecondaryCA_TransitionFromPrimary(t *testing.T) { }) } -func TestLeader_SecondaryCA_UpgradeBeforePrimary(t *testing.T) { +func TestCAManager_Initialize_SecondaryBeforePrimary(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -1094,7 +1120,6 @@ func TestLeader_SecondaryCA_UpgradeBeforePrimary(t *testing.T) { // Wait for the secondary transition to happen and then verify the secondary DC // has both roots present. - secondaryProvider, _ := getCAProviderWithLock(s2) retry.Run(t, func(r *retry.R) { state1 := s1.fsm.State() _, roots1, err := state1.CARoots(nil) @@ -1110,15 +1135,18 @@ func TestLeader_SecondaryCA_UpgradeBeforePrimary(t *testing.T) { require.Equal(r, roots1[0].ID, roots2[0].ID) require.Equal(r, roots1[0].RootCert, roots2[0].RootCert) + secondaryProvider, _ := getCAProviderWithLock(s2) inter, err := secondaryProvider.ActiveIntermediate() require.NoError(r, err) require.NotEmpty(r, inter, "should have valid intermediate") }) - _, caRoot := getCAProviderWithLock(s1) + secondaryProvider, _ := getCAProviderWithLock(s2) intermediatePEM, err := secondaryProvider.ActiveIntermediate() require.NoError(t, err) + _, caRoot := getCAProviderWithLock(s1) + // Have dc2 sign a leaf cert and make sure the chain is correct. spiffeService := &connect.SpiffeIDService{ Host: "node1", @@ -1240,7 +1268,7 @@ func TestLeader_CARootPruning(t *testing.T) { require.NotEqual(roots[0].ID, oldRoot.ID) } -func TestLeader_PersistIntermediateCAs(t *testing.T) { +func TestConnectCA_ConfigurationSet_PersistsRoots(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -1323,7 +1351,7 @@ func TestLeader_PersistIntermediateCAs(t *testing.T) { }) } -func TestLeader_ParseCARoot(t *testing.T) { +func TestParseCARoot(t *testing.T) { type test struct { name string pem string @@ -1406,7 +1434,7 @@ func readTestData(t *testing.T, name string) string { return string(bs) } -func TestLeader_lessThanHalfTimePassed(t *testing.T) { +func TestLessThanHalfTimePassed(t *testing.T) { now := time.Now() require.False(t, lessThanHalfTimePassed(now, now.Add(-10*time.Second), now.Add(-5*time.Second))) require.False(t, lessThanHalfTimePassed(now, now.Add(-10*time.Second), now)) @@ -1416,7 +1444,7 @@ func TestLeader_lessThanHalfTimePassed(t *testing.T) { require.True(t, lessThanHalfTimePassed(now, now.Add(-10*time.Second), now.Add(20*time.Second))) } -func TestLeader_retryLoopBackoffHandleSuccess(t *testing.T) { +func TestRetryLoopBackoffHandleSuccess(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } @@ -1460,7 +1488,7 @@ func TestLeader_retryLoopBackoffHandleSuccess(t *testing.T) { } } -func TestLeader_Vault_BadCAConfigShouldntPreventLeaderEstablishment(t *testing.T) { +func TestCAManager_Initialize_Vault_BadCAConfigDoesNotPreventLeaderEstablishment(t *testing.T) { ca.SkipIfVaultNotPresent(t) testVault := ca.NewTestVaultServer(t) @@ -1517,7 +1545,7 @@ func TestLeader_Vault_BadCAConfigShouldntPreventLeaderEstablishment(t *testing.T require.NotNil(t, activeRoot) } -func TestLeader_Consul_BadCAConfigShouldntPreventLeaderEstablishment(t *testing.T) { +func TestCAManager_Initialize_BadCAConfigDoesNotPreventLeaderEstablishment(t *testing.T) { ca.SkipIfVaultNotPresent(t) _, s1 := testServerWithConfig(t, func(c *Config) { @@ -1561,7 +1589,7 @@ func TestLeader_Consul_BadCAConfigShouldntPreventLeaderEstablishment(t *testing. require.NotNil(t, activeRoot) } -func TestLeader_Consul_ForceWithoutCrossSigning(t *testing.T) { +func TestConnectCA_ConfigurationSet_ForceWithoutCrossSigning(t *testing.T) { require := require.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) @@ -1617,7 +1645,7 @@ func TestLeader_Consul_ForceWithoutCrossSigning(t *testing.T) { } } -func TestLeader_Vault_ForceWithoutCrossSigning(t *testing.T) { +func TestConnectCA_ConfigurationSet_Vault_ForceWithoutCrossSigning(t *testing.T) { ca.SkipIfVaultNotPresent(t) require := require.New(t) diff --git a/agent/consul/leader_federation_state_ae_test.go b/agent/consul/leader_federation_state_ae_test.go index 402fe2241..d7f6d108f 100644 --- a/agent/consul/leader_federation_state_ae_test.go +++ b/agent/consul/leader_federation_state_ae_test.go @@ -359,7 +359,7 @@ func TestLeader_FederationStateAntiEntropyPruning_ACLDeny(t *testing.T) { c.PrimaryDatacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -373,7 +373,7 @@ func TestLeader_FederationStateAntiEntropyPruning_ACLDeny(t *testing.T) { c.PrimaryDatacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir2) diff --git a/agent/consul/leader_intentions_oss.go b/agent/consul/leader_intentions_oss.go index a2eefdad3..db9c742bd 100644 --- a/agent/consul/leader_intentions_oss.go +++ b/agent/consul/leader_intentions_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul diff --git a/agent/consul/leader_intentions_oss_test.go b/agent/consul/leader_intentions_oss_test.go index 58517e403..ea9b8b6a4 100644 --- a/agent/consul/leader_intentions_oss_test.go +++ b/agent/consul/leader_intentions_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul diff --git a/agent/consul/leader_intentions_test.go b/agent/consul/leader_intentions_test.go index 79f1d771e..363c2036c 100644 --- a/agent/consul/leader_intentions_test.go +++ b/agent/consul/leader_intentions_test.go @@ -29,7 +29,7 @@ func TestLeader_ReplicateIntentions(t *testing.T) { c.Datacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.Build = "1.6.0" c.OverrideInitialSerfTags = func(tags map[string]string) { diff --git a/agent/consul/leader_oss_test.go b/agent/consul/leader_oss_test.go new file mode 100644 index 000000000..6aab96f89 --- /dev/null +++ b/agent/consul/leader_oss_test.go @@ -0,0 +1,14 @@ +//go:build !consulent +// +build !consulent + +package consul + +import libserf "github.com/hashicorp/consul/lib/serf" + +func updateSerfTags(s *Server, key, value string) { + libserf.UpdateTag(s.serfLAN, key, value) + + if s.serfWAN != nil { + libserf.UpdateTag(s.serfWAN, key, value) + } +} diff --git a/agent/consul/leader_test.go b/agent/consul/leader_test.go index 635999b33..dd748b370 100644 --- a/agent/consul/leader_test.go +++ b/agent/consul/leader_test.go @@ -17,7 +17,6 @@ import ( "github.com/hashicorp/consul/agent/structs" tokenStore "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/api" - libserf "github.com/hashicorp/consul/lib/serf" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" @@ -32,7 +31,7 @@ func TestLeader_RegisterMember(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -107,7 +106,7 @@ func TestLeader_FailedMember(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -172,7 +171,7 @@ func TestLeader_LeftMember(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -222,7 +221,7 @@ func TestLeader_ReapMember(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -287,7 +286,7 @@ func TestLeader_CheckServersMeta(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = true }) @@ -297,7 +296,7 @@ func TestLeader_CheckServersMeta(t *testing.T) { dir2, s2 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = false }) @@ -307,7 +306,7 @@ func TestLeader_CheckServersMeta(t *testing.T) { dir3, s3 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = false }) @@ -395,7 +394,7 @@ func TestLeader_ReapServer(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = true }) @@ -405,7 +404,7 @@ func TestLeader_ReapServer(t *testing.T) { dir2, s2 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = false }) @@ -415,7 +414,7 @@ func TestLeader_ReapServer(t *testing.T) { dir3, s3 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = false }) @@ -474,7 +473,7 @@ func TestLeader_Reconcile_ReapMember(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -527,7 +526,7 @@ func TestLeader_Reconcile(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -876,7 +875,7 @@ func TestLeader_ReapTombstones(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.TombstoneTTL = 50 * time.Millisecond c.TombstoneTTLGranularity = 10 * time.Millisecond @@ -1181,7 +1180,7 @@ func TestLeader_ACL_Initialization(t *testing.T) { c.Datacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = tt.master + c.ACLInitialManagementToken = tt.master } dir1, s1 := testServerWithConfig(t, conf) defer os.RemoveAll(dir1) @@ -1226,7 +1225,7 @@ func TestLeader_ACLUpgrade_IsStickyEvenIfSerfTagsRegress(t *testing.T) { c.Datacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -1770,14 +1769,6 @@ func TestDatacenterSupportsFederationStates(t *testing.T) { }) } -func updateSerfTags(s *Server, key, value string) { - libserf.UpdateTag(s.serfLAN, key, value) - - if s.serfWAN != nil { - libserf.UpdateTag(s.serfWAN, key, value) - } -} - func TestDatacenterSupportsIntentionsAsConfigEntries(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -2129,3 +2120,86 @@ func TestDatacenterSupportsIntentionsAsConfigEntries(t *testing.T) { ) }) } + +func TestLeader_EnableVirtualIPs(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + conf := func(c *Config) { + c.Bootstrap = false + c.BootstrapExpect = 3 + c.Datacenter = "dc1" + c.Build = "1.11.0" + } + dir1, s1 := testServerWithConfig(t, conf) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + dir2, s2 := testServerWithConfig(t, conf) + defer os.RemoveAll(dir2) + defer s2.Shutdown() + + dir3, s3 := testServerWithConfig(t, func(c *Config) { + conf(c) + c.Build = "1.10.0" + }) + defer os.RemoveAll(dir3) + defer s3.Shutdown() + + // Try to join and wait for all servers to get promoted + joinLAN(t, s2, s1) + joinLAN(t, s3, s1) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Should have nothing stored. + state := s1.fsm.State() + _, entry, err := state.SystemMetadataGet(nil, structs.SystemMetadataVirtualIPsEnabled) + require.NoError(t, err) + require.Nil(t, entry) + + // Register a connect-native service and make sure we don't have a virtual IP yet. + err = state.EnsureRegistration(10, &structs.RegisterRequest{ + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "api", + Connect: structs.ServiceConnect{ + Native: true, + }, + }, + }) + require.NoError(t, err) + + vip, err := state.VirtualIPForService(structs.NewServiceName("api", nil)) + require.NoError(t, err) + require.Equal(t, "", vip) + + // Leave s3 and wait for the version to get updated. + require.NoError(t, s3.Leave()) + retry.Run(t, func(r *retry.R) { + _, entry, err := state.SystemMetadataGet(nil, structs.SystemMetadataVirtualIPsEnabled) + require.NoError(r, err) + require.NotNil(r, entry) + require.Equal(r, "true", entry.Value) + }) + + // Update the connect-native service - now there should be a virtual IP assigned. + err = state.EnsureRegistration(20, &structs.RegisterRequest{ + Node: "foo", + Address: "127.0.0.2", + Service: &structs.NodeService{ + Service: "api", + Connect: structs.ServiceConnect{ + Native: true, + }, + }, + }) + require.NoError(t, err) + + vip, err = state.VirtualIPForService(structs.NewServiceName("api", nil)) + require.NoError(t, err) + require.Equal(t, "240.0.0.1", vip) +} diff --git a/agent/consul/merge.go b/agent/consul/merge.go index 16bc55ddc..306305881 100644 --- a/agent/consul/merge.go +++ b/agent/consul/merge.go @@ -2,6 +2,7 @@ package consul import ( "fmt" + "sync" "github.com/hashicorp/go-version" "github.com/hashicorp/serf/serf" @@ -14,16 +15,10 @@ import ( // ring. We check that the peers are in the same datacenter and abort the // merge if there is a mis-match. type lanMergeDelegate struct { - dc string - nodeID types.NodeID - nodeName string - segment string - - // TODO(partitions): use server and partition to reject gossip messages - // from nodes in the wrong partition depending upon the role the node is - // playing. For example servers will always be in the default partition, - // but all clients in all partitions should be aware of the servers so that - // general RPC routing works. + dc string + nodeID types.NodeID + nodeName string + segment string server bool partition string } @@ -81,9 +76,8 @@ func (md *lanMergeDelegate) NotifyMerge(members []*serf.Member) error { } } - if segment := m.Tags["segment"]; segment != md.segment { - return fmt.Errorf("Member '%s' part of wrong segment '%s' (expected '%s')", - m.Name, segment, md.segment) + if err := md.enterpriseNotifyMergeMember(m); err != nil { + return err } } return nil @@ -93,14 +87,41 @@ func (md *lanMergeDelegate) NotifyMerge(members []*serf.Member) error { // ring. We check that the peers are server nodes and abort the merge // otherwise. type wanMergeDelegate struct { + localDatacenter string + + federationDisabledLock sync.Mutex + federationDisabled bool +} + +// SetWANFederationDisabled selectively disables the wan pool from accepting +// non-local members. If the toggle changed the current value it returns true. +func (md *wanMergeDelegate) SetWANFederationDisabled(disabled bool) bool { + md.federationDisabledLock.Lock() + prior := md.federationDisabled + md.federationDisabled = disabled + md.federationDisabledLock.Unlock() + + return prior != disabled } func (md *wanMergeDelegate) NotifyMerge(members []*serf.Member) error { + // Deliberately hold this lock during the entire merge so calls to + // SetWANFederationDisabled returning immediately imply that the flag takes + // effect for all future merges. + md.federationDisabledLock.Lock() + defer md.federationDisabledLock.Unlock() + for _, m := range members { - ok, _ := metadata.IsConsulServer(*m) + ok, srv := metadata.IsConsulServer(*m) if !ok { return fmt.Errorf("Member '%s' is not a server", m.Name) } + + if md.federationDisabled { + if srv.Datacenter != md.localDatacenter { + return fmt.Errorf("Member '%s' part of wrong datacenter '%s'; WAN federation is disabled", m.Name, srv.Datacenter) + } + } } return nil } diff --git a/agent/consul/merge_oss.go b/agent/consul/merge_oss.go new file mode 100644 index 000000000..515bbbcd1 --- /dev/null +++ b/agent/consul/merge_oss.go @@ -0,0 +1,22 @@ +//go:build !consulent +// +build !consulent + +package consul + +import ( + "fmt" + + "github.com/hashicorp/serf/serf" +) + +func (md *lanMergeDelegate) enterpriseNotifyMergeMember(m *serf.Member) error { + if memberPartition := m.Tags["ap"]; memberPartition != "" { + return fmt.Errorf("Member '%s' part of partition '%s'; Partitions are a Consul Enterprise feature", + m.Name, memberPartition) + } + if segment := m.Tags["segment"]; segment != "" { + return fmt.Errorf("Member '%s' part of segment '%s'; Network Segments are a Consul Enterprise feature", + m.Name, segment) + } + return nil +} diff --git a/agent/consul/merge_oss_test.go b/agent/consul/merge_oss_test.go new file mode 100644 index 000000000..99333c7dd --- /dev/null +++ b/agent/consul/merge_oss_test.go @@ -0,0 +1,76 @@ +//go:build !consulent +// +build !consulent + +package consul + +import ( + "testing" + + "github.com/hashicorp/serf/serf" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/types" +) + +func TestMerge_OSS_LAN(t *testing.T) { + type testcase struct { + segment string + server bool + partition string + members []*serf.Member + expect string + } + + const thisNodeID = "ee954a2f-80de-4b34-8780-97b942a50a99" + + run := func(t *testing.T, tc testcase) { + delegate := &lanMergeDelegate{ + dc: "dc1", + nodeID: types.NodeID(thisNodeID), + nodeName: "node0", + segment: tc.segment, + server: tc.server, + partition: tc.partition, + } + + err := delegate.NotifyMerge(tc.members) + + if tc.expect == "" { + require.NoError(t, err) + } else { + testutil.RequireErrorContains(t, err, tc.expect) + } + } + + cases := map[string]testcase{ + "node in a segment": { + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc1", + name: "node1", + build: "0.7.5", + segment: "alpha", + }), + }, + expect: `Member 'node1' part of segment 'alpha'; Network Segments are a Consul Enterprise feature`, + }, + "node in a partition": { + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc1", + name: "node1", + build: "0.7.5", + partition: "part1", + }), + }, + expect: `Member 'node1' part of partition 'part1'; Partitions are a Consul Enterprise feature`, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/agent/consul/merge_test.go b/agent/consul/merge_test.go index 91e86a124..1a8c57bd8 100644 --- a/agent/consul/merge_test.go +++ b/agent/consul/merge_test.go @@ -1,190 +1,264 @@ package consul import ( - "strings" "testing" - "github.com/hashicorp/consul/types" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/serf/serf" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/types" ) -func makeNode(dc, name, id string, server bool, build string) *serf.Member { - var role string - if server { - role = "consul" - } else { - role = "node" +func TestMerge_LAN(t *testing.T) { + type testcase struct { + members []*serf.Member + expect string } - return &serf.Member{ - Name: name, + const thisNodeID = "ee954a2f-80de-4b34-8780-97b942a50a99" + + run := func(t *testing.T, tc testcase) { + delegate := &lanMergeDelegate{ + dc: "dc1", + nodeID: types.NodeID(thisNodeID), + nodeName: "node0", + } + + err := delegate.NotifyMerge(tc.members) + + if tc.expect == "" { + require.NoError(t, err) + } else { + testutil.RequireErrorContains(t, err, tc.expect) + } + } + + cases := map[string]testcase{ + "client in the wrong datacenter": { + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc2", + name: "node1", + server: false, + build: "0.7.5", + }), + }, + expect: "wrong datacenter", + }, + "server in the wrong datacenter": { + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc2", + name: "node1", + server: true, + build: "0.7.5", + }), + }, + expect: "wrong datacenter", + }, + "node ID conflict with delegate's ID": { + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc1", + name: "node1", + id: thisNodeID, + server: true, + build: "0.7.5", + }), + }, + expect: "with this agent's ID", + }, + "cluster with existing conflicting node IDs": { + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc1", + name: "node1", + id: "6185913b-98d7-4441-bd8f-f7f7d854a4af", + server: true, + build: "0.8.5", + }), + makeTestNode(t, testMember{ + dc: "dc1", + name: "node2", + id: "6185913b-98d7-4441-bd8f-f7f7d854a4af", + server: true, + build: "0.9.0", + }), + }, + expect: "with member", + }, + "cluster with existing conflicting node IDs, but version is old enough to skip the check": { + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc1", + name: "node1", + id: "6185913b-98d7-4441-bd8f-f7f7d854a4af", + server: true, + build: "0.8.5", + }), + makeTestNode(t, testMember{ + dc: "dc1", + name: "node2", + id: "6185913b-98d7-4441-bd8f-f7f7d854a4af", + server: true, + build: "0.8.4", + }), + }, + expect: "with member", + }, + "good cluster": { + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc1", + name: "node1", + server: true, + build: "0.8.5", + }), + makeTestNode(t, testMember{ + dc: "dc1", + name: "node2", + server: true, + build: "0.8.5", + }), + }, + expect: "", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestMerge_WAN(t *testing.T) { + type testcase struct { + members []*serf.Member + expect string + setupFn func(t *testing.T, delegate *wanMergeDelegate) + } + + run := func(t *testing.T, tc testcase) { + delegate := &wanMergeDelegate{ + localDatacenter: "dc1", + } + if tc.setupFn != nil { + tc.setupFn(t, delegate) + } + err := delegate.NotifyMerge(tc.members) + if tc.expect == "" { + require.NoError(t, err) + } else { + testutil.RequireErrorContains(t, err, tc.expect) + } + } + + cases := map[string]testcase{ + "not a server": { + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc2", + name: "node1", + server: false, + build: "0.7.5", + }), + }, + expect: "not a server", + }, + "good cluster": { + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc2", + name: "node1", + server: true, + build: "0.7.5", + }), + makeTestNode(t, testMember{ + dc: "dc3", + name: "node2", + server: true, + build: "0.7.5", + }), + }, + }, + "federation disabled and local join allowed": { + setupFn: func(t *testing.T, delegate *wanMergeDelegate) { + delegate.SetWANFederationDisabled(true) + }, + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc1", + name: "node1", + server: true, + build: "0.7.5", + }), + }, + }, + "federation disabled and remote join blocked": { + setupFn: func(t *testing.T, delegate *wanMergeDelegate) { + delegate.SetWANFederationDisabled(true) + }, + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc2", + name: "node1", + server: true, + build: "0.7.5", + }), + }, + expect: `WAN federation is disabled`, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} + +type testMember struct { + dc string + name string + id string + server bool + build string + segment string + partition string +} + +func (tm testMember) role() string { + if tm.server { + return "consul" + } + return "node" +} + +func makeTestNode(t *testing.T, tm testMember) *serf.Member { + if tm.id == "" { + uuid, err := uuid.GenerateUUID() + require.NoError(t, err) + tm.id = uuid + } + m := &serf.Member{ + Name: tm.name, Tags: map[string]string{ - "role": role, - "dc": dc, - "id": id, + "role": tm.role(), + "dc": tm.dc, + "id": tm.id, "port": "8300", - "build": build, + "segment": tm.segment, + "build": tm.build, "vsn": "2", "vsn_max": "3", "vsn_min": "2", }, } -} - -func TestMerge_LAN(t *testing.T) { - t.Parallel() - cases := []struct { - members []*serf.Member - expect string - }{ - // Client in the wrong datacenter. - { - members: []*serf.Member{ - makeNode("dc2", - "node1", - "96430788-246f-4379-94ce-257f7429e340", - false, - "0.7.5"), - }, - expect: "wrong datacenter", - }, - // Server in the wrong datacenter. - { - members: []*serf.Member{ - makeNode("dc2", - "node1", - "96430788-246f-4379-94ce-257f7429e340", - true, - "0.7.5"), - }, - expect: "wrong datacenter", - }, - // Node ID conflict with delegate's ID. - { - members: []*serf.Member{ - makeNode("dc1", - "node1", - "ee954a2f-80de-4b34-8780-97b942a50a99", - true, - "0.7.5"), - }, - expect: "with this agent's ID", - }, - // Cluster with existing conflicting node IDs. - { - members: []*serf.Member{ - makeNode("dc1", - "node1", - "6185913b-98d7-4441-bd8f-f7f7d854a4af", - true, - "0.8.5"), - makeNode("dc1", - "node2", - "6185913b-98d7-4441-bd8f-f7f7d854a4af", - true, - "0.9.0"), - }, - expect: "with member", - }, - // Cluster with existing conflicting node IDs, but version is - // old enough to skip the check. - { - members: []*serf.Member{ - makeNode("dc1", - "node1", - "6185913b-98d7-4441-bd8f-f7f7d854a4af", - true, - "0.8.5"), - makeNode("dc1", - "node2", - "6185913b-98d7-4441-bd8f-f7f7d854a4af", - true, - "0.8.4"), - }, - expect: "with member", - }, - // Good cluster. - { - members: []*serf.Member{ - makeNode("dc1", - "node1", - "6185913b-98d7-4441-bd8f-f7f7d854a4af", - true, - "0.8.5"), - makeNode("dc1", - "node2", - "cda916bc-a357-4a19-b886-59419fcee50c", - true, - "0.8.5"), - }, - expect: "", - }, - } - - delegate := &lanMergeDelegate{ - dc: "dc1", - nodeID: types.NodeID("ee954a2f-80de-4b34-8780-97b942a50a99"), - nodeName: "node0", - segment: "", - } - for i, c := range cases { - if err := delegate.NotifyMerge(c.members); c.expect == "" { - if err != nil { - t.Fatalf("case %d: err: %v", i+1, err) - } - } else { - if err == nil || !strings.Contains(err.Error(), c.expect) { - t.Fatalf("case %d: err: %v", i+1, err) - } - } - } -} - -func TestMerge_WAN(t *testing.T) { - t.Parallel() - cases := []struct { - members []*serf.Member - expect string - }{ - // Not a server - { - members: []*serf.Member{ - makeNode("dc2", - "node1", - "96430788-246f-4379-94ce-257f7429e340", - false, - "0.7.5"), - }, - expect: "not a server", - }, - // Good cluster. - { - members: []*serf.Member{ - makeNode("dc2", - "node1", - "6185913b-98d7-4441-bd8f-f7f7d854a4af", - true, - "0.7.5"), - makeNode("dc3", - "node2", - "cda916bc-a357-4a19-b886-59419fcee50c", - true, - "0.7.5"), - }, - expect: "", - }, - } - - delegate := &wanMergeDelegate{} - for i, c := range cases { - if err := delegate.NotifyMerge(c.members); c.expect == "" { - if err != nil { - t.Fatalf("case %d: err: %v", i+1, err) - } - } else { - if err == nil || !strings.Contains(err.Error(), c.expect) { - t.Fatalf("case %d: err: %v", i+1, err) - } - } - } + if tm.partition != "" { + m.Tags["ap"] = tm.partition + } + return m } diff --git a/agent/consul/operator_autopilot_endpoint_test.go b/agent/consul/operator_autopilot_endpoint_test.go index d1a9e96b6..501f0f15d 100644 --- a/agent/consul/operator_autopilot_endpoint_test.go +++ b/agent/consul/operator_autopilot_endpoint_test.go @@ -54,7 +54,7 @@ func TestOperator_Autopilot_GetConfiguration_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.AutopilotConfig.CleanupDeadServers = false }) @@ -138,7 +138,7 @@ func TestOperator_Autopilot_SetConfiguration_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.AutopilotConfig.CleanupDeadServers = false }) diff --git a/agent/consul/operator_raft_endpoint_test.go b/agent/consul/operator_raft_endpoint_test.go index da5bcc121..1b944b3fc 100644 --- a/agent/consul/operator_raft_endpoint_test.go +++ b/agent/consul/operator_raft_endpoint_test.go @@ -72,7 +72,7 @@ func TestOperator_RaftGetConfiguration_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -140,13 +140,10 @@ func TestOperator_RaftRemovePeerByAddress(t *testing.T) { testrpc.WaitForLeader(t, s1.RPC, "dc1") - ports := freeport.MustTake(1) - defer freeport.Return(ports) - // Try to remove a peer that's not there. arg := structs.RaftRemovePeerRequest{ Datacenter: "dc1", - Address: raft.ServerAddress(fmt.Sprintf("127.0.0.1:%d", ports[0])), + Address: raft.ServerAddress(fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))), } var reply struct{} err := msgpackrpc.CallWithCodec(codec, "Operator.RaftRemovePeerByAddress", &arg, &reply) @@ -202,7 +199,7 @@ func TestOperator_RaftRemovePeerByAddress_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -263,10 +260,7 @@ func TestOperator_RaftRemovePeerByID(t *testing.T) { // Add it manually to Raft. { - ports := freeport.MustTake(1) - defer freeport.Return(ports) - - future := s1.raft.AddVoter(arg.ID, raft.ServerAddress(fmt.Sprintf("127.0.0.1:%d", ports[0])), 0, 0) + future := s1.raft.AddVoter(arg.ID, raft.ServerAddress(fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))), 0, 0) if err := future.Error(); err != nil { t.Fatalf("err: %v", err) } @@ -311,7 +305,7 @@ func TestOperator_RaftRemovePeerByID_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.RaftConfig.ProtocolVersion = 3 }) diff --git a/agent/consul/options_oss.go b/agent/consul/options_oss.go index 7e33e6cd7..0718d309f 100644 --- a/agent/consul/options_oss.go +++ b/agent/consul/options_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package consul diff --git a/agent/consul/prepared_query/walk_oss_test.go b/agent/consul/prepared_query/walk_oss_test.go index 16c9b2c57..0049a62cc 100644 --- a/agent/consul/prepared_query/walk_oss_test.go +++ b/agent/consul/prepared_query/walk_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package prepared_query diff --git a/agent/consul/prepared_query_endpoint.go b/agent/consul/prepared_query_endpoint.go index 5df85ef60..7c6620239 100644 --- a/agent/consul/prepared_query_endpoint.go +++ b/agent/consul/prepared_query_endpoint.go @@ -299,7 +299,7 @@ func (p *PreparedQuery) Explain(args *structs.PreparedQueryExecuteRequest, defer metrics.MeasureSince([]string{"prepared-query", "explain"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. - p.srv.setQueryMeta(&reply.QueryMeta) + p.srv.setQueryMeta(&reply.QueryMeta, args.Token) if args.RequireConsistent { if err := p.srv.consistentRead(); err != nil { return err @@ -346,7 +346,6 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, defer metrics.MeasureSince([]string{"prepared-query", "execute"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. - p.srv.setQueryMeta(&reply.QueryMeta) if args.RequireConsistent { if err := p.srv.consistentRead(); err != nil { return err @@ -374,7 +373,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, if query.Token != "" { token = query.Token } - if err := p.srv.filterACL(token, &reply.Nodes); err != nil { + if err := p.srv.filterACL(token, reply); err != nil { return err } @@ -383,6 +382,9 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, // might not be worth the code complexity and behavior differences, // though, since this is essentially a misconfiguration. + // We have to do this ourselves since we are not doing a blocking RPC. + p.srv.setQueryMeta(&reply.QueryMeta, token) + // Shuffle the results in case coordinates are not available if they // requested an RTT sort. reply.Nodes.Shuffle() @@ -481,7 +483,6 @@ func (p *PreparedQuery) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRe defer metrics.MeasureSince([]string{"prepared-query", "execute_remote"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. - p.srv.setQueryMeta(&reply.QueryMeta) if args.RequireConsistent { if err := p.srv.consistentRead(); err != nil { return err @@ -499,10 +500,13 @@ func (p *PreparedQuery) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRe if args.Query.Token != "" { token = args.Query.Token } - if err := p.srv.filterACL(token, &reply.Nodes); err != nil { + if err := p.srv.filterACL(token, reply); err != nil { return err } + // We have to do this ourselves since we are not doing a blocking RPC. + p.srv.setQueryMeta(&reply.QueryMeta, token) + // We don't bother trying to do an RTT sort here since we are by // definition in another DC. We just shuffle to make sure that we // balance the load across the results. diff --git a/agent/consul/prepared_query_endpoint_test.go b/agent/consul/prepared_query_endpoint_test.go index 33858726c..9d82fdfa2 100644 --- a/agent/consul/prepared_query_endpoint_test.go +++ b/agent/consul/prepared_query_endpoint_test.go @@ -200,7 +200,7 @@ func TestPreparedQuery_Apply_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -629,7 +629,7 @@ func TestPreparedQuery_ACLDeny_Catchall_Template(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -831,7 +831,7 @@ func TestPreparedQuery_Get(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1072,7 +1072,7 @@ func TestPreparedQuery_List(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1167,6 +1167,31 @@ func TestPreparedQuery_List(t *testing.T) { } } + // Same for a token without access to the query. + { + token := createTokenWithPolicyName(t, codec, "deny-queries", ` + query_prefix "" { + policy = "deny" + } + `, "root") + + req := &structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var resp structs.IndexedPreparedQueries + if err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.List", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp.Queries) != 0 { + t.Fatalf("bad: %v", resp) + } + if !resp.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } + } + // But a management token should work, and be able to see the captured // token. query.Query.Token = "le-token" @@ -1268,7 +1293,7 @@ func TestPreparedQuery_Explain(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1392,7 +1417,7 @@ func TestPreparedQuery_Execute(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1430,13 +1455,13 @@ func TestPreparedQuery_Execute(t *testing.T) { testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) testrpc.WaitForLeader(t, s1.RPC, "dc2", testrpc.WithToken("root")) - execNoNodesToken := createTokenWithPolicyName(t, "no-nodes", codec1, `service_prefix "foo" { policy = "read" }`) + execNoNodesToken := createTokenWithPolicyName(t, codec1, "no-nodes", `service_prefix "foo" { policy = "read" }`, "root") rules := ` service_prefix "foo" { policy = "read" } node_prefix "" { policy = "read" } ` - execToken := createTokenWithPolicyName(t, "with-read", codec1, rules) - denyToken := createTokenWithPolicyName(t, "with-deny", codec1, `service_prefix "foo" { policy = "deny" }`) + execToken := createTokenWithPolicyName(t, codec1, "with-read", rules, "root") + denyToken := createTokenWithPolicyName(t, codec1, "with-deny", `service_prefix "foo" { policy = "deny" }`, "root") newSessionDC1 := func(t *testing.T) string { t.Helper() @@ -2124,6 +2149,7 @@ func TestPreparedQuery_Execute(t *testing.T) { require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) expectNodes(t, &query, &reply, 0) + require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("normal operation again with exec token", func(t *testing.T) { @@ -2246,6 +2272,20 @@ func TestPreparedQuery_Execute(t *testing.T) { expectFailoverNodes(t, &query, &reply, 0) }) + t.Run("nodes in response from dc2 are filtered by ACL token", func(t *testing.T) { + req := structs.PreparedQueryExecuteRequest{ + Datacenter: "dc1", + QueryIDOrName: query.Query.ID, + QueryOptions: structs.QueryOptions{Token: execNoNodesToken}, + } + + var reply structs.PreparedQueryExecuteResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + + expectFailoverNodes(t, &query, &reply, 0) + require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + // Bake the exec token into the query. query.Query.Token = execToken require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) @@ -2659,7 +2699,7 @@ func TestPreparedQuery_Wrapper(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2669,7 +2709,7 @@ func TestPreparedQuery_Wrapper(t *testing.T) { c.Datacenter = "dc2" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir2) diff --git a/agent/consul/rpc.go b/agent/consul/rpc.go index c8d733a28..9a93b562e 100644 --- a/agent/consul/rpc.go +++ b/agent/consul/rpc.go @@ -938,8 +938,6 @@ func (s *Server) blockingQuery(queryOpts structs.QueryOptionsCompat, queryMeta s RUN_QUERY: // Setup blocking loop - // Update the query metadata. - s.setQueryMeta(queryMeta) // Validate // If the read must be consistent we verify that we are still the leader. @@ -968,6 +966,10 @@ RUN_QUERY: // Execute the queryFn err := fn(ws, state) + + // Update the query metadata. + s.setQueryMeta(queryMeta, queryOpts.GetToken()) + // Note we check queryOpts.MinQueryIndex is greater than zero to determine if // blocking was requested by client, NOT meta.Index since the state function // might return zero if something is not initialized and care wasn't taken to @@ -1001,7 +1003,9 @@ RUN_QUERY: } // setQueryMeta is used to populate the QueryMeta data for an RPC call -func (s *Server) setQueryMeta(m structs.QueryMetaCompat) { +// +// Note: This method must be called *after* filtering query results with ACLs. +func (s *Server) setQueryMeta(m structs.QueryMetaCompat, token string) { if s.IsLeader() { m.SetLastContact(0) m.SetKnownLeader(true) @@ -1009,6 +1013,7 @@ func (s *Server) setQueryMeta(m structs.QueryMetaCompat) { m.SetLastContact(time.Since(s.raft.LastContact())) m.SetKnownLeader(s.raft.Leader() != "") } + maskResultsFilteredByACLs(token, m) } // consistentRead is used to ensure we do not perform a stale @@ -1043,3 +1048,31 @@ func (s *Server) consistentRead() error { return structs.ErrNotReadyForConsistentReads } + +// maskResultsFilteredByACLs blanks out the ResultsFilteredByACLs flag if the +// request is unauthenticated, to limit information leaking. +// +// Endpoints that support bexpr filtering could be used in combination with +// this flag/header to discover the existence of resources to which the user +// does not have access, therefore we only expose it when the user presents +// a valid ACL token. This doesn't completely remove the risk (by nature the +// purpose of this flag is to let the user know there are resources they can +// not access) but it prevents completely unauthenticated users from doing so. +// +// Notes: +// +// * The definition of "unauthenticated" here is incomplete, as it doesn't +// account for the fact that operators can modify the anonymous token with +// custom policies, or set namespace default policies. As these scenarios +// are less common and this flag is a best-effort UX improvement, we think +// the trade-off for reduced complexity is acceptable. +// +// * This method assumes that the given token has already been validated (and +// will only check whether it is blank or not). It's a safe assumption because +// ResultsFilteredByACLs is only set to try when applying the already-resolved +// token's policies. +func maskResultsFilteredByACLs(token string, meta structs.QueryMetaCompat) { + if token == "" { + meta.SetResultsFilteredByACLs(false) + } +} diff --git a/agent/consul/rpc_test.go b/agent/consul/rpc_test.go index f3b013bcd..bde9b4d9e 100644 --- a/agent/consul/rpc_test.go +++ b/agent/consul/rpc_test.go @@ -36,6 +36,7 @@ import ( "github.com/hashicorp/consul/agent/structs" tokenStore "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" @@ -369,6 +370,39 @@ func TestRPC_blockingQuery(t *testing.T) { t.Fatalf("bad: %d", calls) } } + + t.Run("ResultsFilteredByACLs is reset for unauthenticated calls", func(t *testing.T) { + opts := structs.QueryOptions{ + Token: "", + } + var meta structs.QueryMeta + fn := func(_ memdb.WatchSet, _ *state.Store) error { + meta.ResultsFilteredByACLs = true + return nil + } + + err := s.blockingQuery(&opts, &meta, fn) + require.NoError(err) + require.False(meta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be reset for unauthenticated calls") + }) + + t.Run("ResultsFilteredByACLs is honored for authenticated calls", func(t *testing.T) { + token, err := lib.GenerateUUID(nil) + require.NoError(err) + + opts := structs.QueryOptions{ + Token: token, + } + var meta structs.QueryMeta + fn := func(_ memdb.WatchSet, _ *state.Store) error { + meta.ResultsFilteredByACLs = true + return nil + } + + err = s.blockingQuery(&opts, &meta, fn) + require.NoError(err) + require.True(meta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be honored for authenticated calls") + }) } func TestRPC_ReadyForConsistentReads(t *testing.T) { @@ -848,7 +882,7 @@ func TestRPC_LocalTokenStrippedOnForward(t *testing.T) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true c.ACLResolverSettings.ACLDefaultPolicy = "deny" - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -976,7 +1010,7 @@ func TestRPC_LocalTokenStrippedOnForward_GRPC(t *testing.T) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true c.ACLResolverSettings.ACLDefaultPolicy = "deny" - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.RPCConfig.EnableStreaming = true }) s1.tokens.UpdateAgentToken("root", tokenStore.TokenSourceConfig) diff --git a/agent/consul/segment_oss.go b/agent/consul/segment_oss.go index 1398087a4..034e79c54 100644 --- a/agent/consul/segment_oss.go +++ b/agent/consul/segment_oss.go @@ -1,14 +1,12 @@ +//go:build !consulent // +build !consulent package consul import ( "net" - "time" - "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" - "github.com/hashicorp/serf/serf" "github.com/hashicorp/consul/agent/structs" ) @@ -37,7 +35,7 @@ func (s *Server) setupSegmentRPC() (map[string]net.Listener, error) { // setupSegments returns an error if any segments are defined since the OSS // version of Consul doesn't support them. -func (s *Server) setupSegments(config *Config, port int, rpcListeners map[string]net.Listener) error { +func (s *Server) setupSegments(config *Config, rpcListeners map[string]net.Listener) error { if len(config.Segments) > 0 { return structs.ErrSegmentsNotSupported } @@ -48,28 +46,3 @@ func (s *Server) setupSegments(config *Config, port int, rpcListeners map[string // floodSegments is a NOP in the OSS version of Consul. func (s *Server) floodSegments(config *Config) { } - -func getSerfMemberEnterpriseMeta(member serf.Member) *structs.EnterpriseMeta { - return structs.NodeEnterpriseMetaInDefaultPartition() -} - -// reconcile is used to reconcile the differences between Serf membership and -// what is reflected in our strongly consistent store. Mainly we need to ensure -// all live nodes are registered, all failed nodes are marked as such, and all -// left nodes are deregistered. -func (s *Server) reconcile() (err error) { - defer metrics.MeasureSince([]string{"leader", "reconcile"}, time.Now()) - - members := s.serfLAN.Members() - knownMembers := make(map[string]struct{}) - for _, member := range members { - if err := s.reconcileMember(member); err != nil { - return err - } - knownMembers[member.Name] = struct{}{} - } - - // Reconcile any members that have been reaped while we were not the - // leader. - return s.reconcileReaped(knownMembers, nil) -} diff --git a/agent/consul/server.go b/agent/consul/server.go index 9c5a3876e..57ac4eb62 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -18,6 +18,7 @@ import ( "time" "github.com/hashicorp/go-version" + "go.etcd.io/bbolt" "github.com/armon/go-metrics" connlimit "github.com/hashicorp/go-connlimit" @@ -25,7 +26,7 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" - raftboltdb "github.com/hashicorp/raft-boltdb" + raftboltdb "github.com/hashicorp/raft-boltdb/v2" "github.com/hashicorp/serf/serf" "golang.org/x/time/rate" "google.golang.org/grpc" @@ -121,6 +122,11 @@ var ( ErrWANFederationDisabled = fmt.Errorf("WAN Federation is disabled") ) +const ( + PoolKindPartition = "partition" + PoolKindSegment = "segment" +) + // Server is Consul server which manages the service discovery, // health checking, DC forwarding, Raft, and multiple Serf pools. type Server struct { @@ -183,6 +189,12 @@ type Server struct { // serf cluster that spans datacenters eventChWAN chan serf.Event + // wanMembershipNotifyCh is used to receive notifications that the the + // serfWAN wan pool may have changed. + // + // If this is nil, notification is skipped. + wanMembershipNotifyCh chan struct{} + // fsm is the state machine used with Raft to provide // strong consistency. fsm *fsm.FSM @@ -248,14 +260,19 @@ type Server struct { // serfLAN is the Serf cluster maintained inside the DC // which contains all the DC nodes + // + // - If Network Segments are active, this only contains members in the + // default segment. + // + // - If Admin Partitions are active, this only contains members in the + // default partition. + // serfLAN *serf.Serf - // segmentLAN maps segment names to their Serf cluster - segmentLAN map[string]*serf.Serf - // serfWAN is the Serf cluster maintained between DC's // which SHOULD only consist of Consul servers serfWAN *serf.Serf + serfWANConfig *serf.Config memberlistTransportWAN wanfed.IngestionAwareTransport gatewayLocator *GatewayLocator @@ -362,7 +379,6 @@ func NewServer(config *Config, flat Deps) (*Server, error) { insecureRPCServer: rpc.NewServer(), tlsConfigurator: flat.TLSConfigurator, reassertLeaderCh: make(chan chan error), - segmentLAN: make(map[string]*serf.Serf, len(config.Segments)), sessionTimers: NewSessionTimers(), tombstoneGC: gc, serverLookup: NewServerLookup(), @@ -464,7 +480,7 @@ func NewServer(config *Config, flat Deps) (*Server, error) { return nil, fmt.Errorf("Failed to start Raft: %v", err) } - s.caManager = NewCAManager(&caDelegateWithState{s}, s.leaderRoutineManager, s.logger.ResetNamed("connect.ca"), s.config) + s.caManager = NewCAManager(&caDelegateWithState{Server: s}, s.leaderRoutineManager, s.logger.ResetNamed("connect.ca"), s.config) if s.config.ConnectEnabled && (s.config.AutoEncryptAllowTLS || s.config.AutoConfigAuthzEnabled) { go s.connectCARootsMonitor(&lib.StopChannelContext{StopCh: s.shutdownCh}) } @@ -483,10 +499,14 @@ func NewServer(config *Config, flat Deps) (*Server, error) { // a little gross to be reading the updated config. // Initialize the WAN Serf if enabled - serfBindPortWAN := -1 if config.SerfWANConfig != nil { - serfBindPortWAN = config.SerfWANConfig.MemberlistConfig.BindPort - s.serfWAN, err = s.setupSerf(config.SerfWANConfig, s.eventChWAN, serfWANSnapshot, true, serfBindPortWAN, "", s.Listener) + s.serfWAN, s.serfWANConfig, err = s.setupSerf(setupSerfOptions{ + Config: config.SerfWANConfig, + EventCh: s.eventChWAN, + SnapshotPath: serfWANSnapshot, + WAN: true, + Listener: s.Listener, + }) if err != nil { s.Shutdown() return nil, fmt.Errorf("Failed to start WAN Serf: %v", err) @@ -497,6 +517,7 @@ func NewServer(config *Config, flat Deps) (*Server, error) { s.memberlistTransportWAN = config.SerfWANConfig.MemberlistConfig.Transport.(wanfed.IngestionAwareTransport) // See big comment above why we are doing this. + serfBindPortWAN := config.SerfWANConfig.MemberlistConfig.BindPort if serfBindPortWAN == 0 { serfBindPortWAN = config.SerfWANConfig.MemberlistConfig.BindPort if serfBindPortWAN == 0 { @@ -508,14 +529,13 @@ func NewServer(config *Config, flat Deps) (*Server, error) { // Initialize the LAN segments before the default LAN Serf so we have // updated port information to publish there. - if err := s.setupSegments(config, serfBindPortWAN, segmentListeners); err != nil { + if err := s.setupSegments(config, segmentListeners); err != nil { s.Shutdown() return nil, fmt.Errorf("Failed to setup network segments: %v", err) } // Initialize the LAN Serf for the default network segment. - s.serfLAN, err = s.setupSerf(config.SerfLANConfig, s.eventChLAN, serfLANSnapshot, false, serfBindPortWAN, "", s.Listener) - if err != nil { + if err := s.setupSerfLAN(config); err != nil { s.Shutdown() return nil, fmt.Errorf("Failed to start LAN Serf: %v", err) } @@ -535,7 +555,7 @@ func NewServer(config *Config, flat Deps) (*Server, error) { s.Shutdown() return nil, fmt.Errorf("Failed to add WAN serf route: %v", err) } - go router.HandleSerfEvents(s.logger, s.router, types.AreaWAN, s.serfWAN.ShutdownCh(), s.eventChWAN) + go router.HandleSerfEvents(s.logger, s.router, types.AreaWAN, s.serfWAN.ShutdownCh(), s.eventChWAN, s.wanMembershipNotifyCh) // Fire up the LAN <-> WAN join flooder. addrFn := func(s *metadata.Server) (string, error) { @@ -717,13 +737,21 @@ func (s *Server) setupRaft() error { } // Create the backend raft store for logs and stable storage. - store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db")) + store, err := raftboltdb.New(raftboltdb.Options{ + BoltOptions: &bbolt.Options{ + NoFreelistSync: s.config.RaftBoltDBConfig.NoFreelistSync, + }, + Path: filepath.Join(path, "raft.db"), + }) if err != nil { return err } s.raftStore = store stable = store + // start publishing boltdb metrics + go store.RunMetrics(&lib.StopChannelContext{StopCh: s.shutdownCh}, 0) + // Wrap the store in a LogCache to improve performance. cacheStore, err := raft.NewLogCache(raftLogCacheSize, store) if err != nil { @@ -926,13 +954,7 @@ func (s *Server) Shutdown() error { s.leaderRoutineManager.StopAll() } - if s.serfLAN != nil { - s.serfLAN.Shutdown() - } - - for _, segment := range s.segmentLAN { - segment.Shutdown() - } + s.shutdownSerfLAN() if s.serfWAN != nil { s.serfWAN.Shutdown() @@ -942,6 +964,8 @@ func (s *Server) Shutdown() error { } s.router.Shutdown() + // TODO: actually shutdown areas? + if s.raft != nil { s.raftTransport.Close() s.raftLayer.Close() @@ -1100,13 +1124,6 @@ func (s *Server) Leave() error { return nil } -// JoinLAN is used to have Consul join the inner-DC pool The target address -// should be another node inside the DC listening on the Serf LAN address -func (s *Server) JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (int, error) { - // TODO(partitions): handle the different partitions - return s.serfLAN.Join(addrs, true) -} - // JoinWAN is used to have Consul join the cross-WAN Consul ring // The target address should be another node listening on the // Serf WAN address @@ -1114,6 +1131,11 @@ func (s *Server) JoinWAN(addrs []string) (int, error) { if s.serfWAN == nil { return 0, ErrWANFederationDisabled } + + if err := s.enterpriseValidateJoinWAN(); err != nil { + return 0, err + } + return s.serfWAN.Join(addrs, true) } @@ -1157,7 +1179,9 @@ func (s *Server) AgentLocalMember() serf.Member { return s.serfLAN.LocalMember() } -// LANMembersInAgentPartition is used to return the members of the LAN cluster +// LANMembersInAgentPartition returns the LAN members for this agent's +// canonical serf pool. For clients this is the only pool that exists. For +// servers it's the pool in the default segment and the default partition. func (s *Server) LANMembersInAgentPartition() []serf.Member { return s.serfLAN.Members() } @@ -1172,7 +1196,6 @@ func (s *Server) WANMembers() []serf.Member { // RemoveFailedNode is used to remove a failed node from the cluster. func (s *Server) RemoveFailedNode(node string, prune bool, entMeta *structs.EnterpriseMeta) error { - // TODO(partitions): handle the different partitions var removeFn func(*serf.Serf, string) error if prune { removeFn = (*serf.Serf).RemoveFailedNodePrune @@ -1180,10 +1203,6 @@ func (s *Server) RemoveFailedNode(node string, prune bool, entMeta *structs.Ente removeFn = (*serf.Serf).RemoveFailedNode } - if err := removeFn(s.serfLAN, node); err != nil { - return err - } - wanNode := node // The Serf WAN pool stores members as node.datacenter @@ -1191,13 +1210,20 @@ func (s *Server) RemoveFailedNode(node string, prune bool, entMeta *structs.Ente if !strings.HasSuffix(node, "."+s.config.Datacenter) { wanNode = node + "." + s.config.Datacenter } - if s.serfWAN != nil { - if err := removeFn(s.serfWAN, wanNode); err != nil { - return err - } + + return s.removeFailedNode(removeFn, node, wanNode, entMeta) +} + +// RemoveFailedNodeWAN is used to remove a failed node from the WAN cluster. +func (s *Server) RemoveFailedNodeWAN(wanNode string, prune bool, entMeta *structs.EnterpriseMeta) error { + var removeFn func(*serf.Serf, string) error + if prune { + removeFn = (*serf.Serf).RemoveFailedNodePrune + } else { + removeFn = (*serf.Serf).RemoveFailedNode } - return s.removeFailedNodeEnterprise(removeFn, node, wanNode) + return s.removeFailedNode(removeFn, "", wanNode, entMeta) } // IsLeader checks if this server is the cluster leader @@ -1213,6 +1239,7 @@ func (s *Server) LeaderLastContact() time.Time { // KeyManagerLAN returns the LAN Serf keyring manager func (s *Server) KeyManagerLAN() *serf.KeyManager { + // NOTE: The serfLAN keymanager is shared by all partitions. return s.serfLAN.KeyManager() } @@ -1221,15 +1248,8 @@ func (s *Server) KeyManagerWAN() *serf.KeyManager { return s.serfWAN.KeyManager() } -// LANSegments returns a map of LAN segments by name -func (s *Server) LANSegments() map[string]*serf.Serf { - segments := make(map[string]*serf.Serf, len(s.segmentLAN)+1) - segments[""] = s.serfLAN - for name, segment := range s.segmentLAN { - segments[name] = segment - } - - return segments +func (s *Server) AgentEnterpriseMeta() *structs.EnterpriseMeta { + return s.config.AgentEnterpriseMeta() } // inmemCodec is used to do an RPC call without going over a network @@ -1379,10 +1399,25 @@ func (s *Server) Stats() map[string]map[string]string { stats["serf_wan"] = s.serfWAN.Stats() } + s.addEnterpriseStats(stats) + return stats } -// GetLANCoordinate returns the coordinate of the server in the LAN gossip pool. +// GetLANCoordinate returns the coordinate of the node in the LAN gossip +// pool. +// +// - Clients return a single coordinate for the single gossip pool they are +// in (default, segment, or partition). +// +// - Servers return one coordinate for their canonical gossip pool (i.e. +// default partition/segment) and one per segment they are also ancillary +// members of. +// +// NOTE: servers do not emit coordinates for partitioned gossip pools they +// are ancillary members of. +// +// NOTE: This assumes coordinates are enabled, so check that before calling. func (s *Server) GetLANCoordinate() (lib.CoordinateSet, error) { lan, err := s.serfLAN.GetCoordinate() if err != nil { @@ -1390,16 +1425,17 @@ func (s *Server) GetLANCoordinate() (lib.CoordinateSet, error) { } cs := lib.CoordinateSet{"": lan} - for name, segment := range s.segmentLAN { - c, err := segment.GetCoordinate() - if err != nil { - return nil, err - } - cs[name] = c + if err := s.addEnterpriseLANCoordinates(cs); err != nil { + return nil, err } + return cs, nil } +func (s *Server) agentSegmentName() string { + return s.config.Segment +} + // ReloadConfig is used to have the Server do an online reload of // relevant configuration information func (s *Server) ReloadConfig(config ReloadableConfig) error { diff --git a/agent/consul/server_connect.go b/agent/consul/server_connect.go index 09453a5ee..5010eda7f 100644 --- a/agent/consul/server_connect.go +++ b/agent/consul/server_connect.go @@ -16,21 +16,22 @@ func (s *Server) getCARoots(ws memdb.WatchSet, state *state.Store) (*structs.Ind if err != nil { return nil, err } + if config == nil || config.ClusterID == "" { + return nil, fmt.Errorf("CA has not finished initializing") + } indexedRoots := &structs.IndexedCARoots{} - if config != nil { - // Build TrustDomain based on the ClusterID stored. - signingID := connect.SpiffeIDSigningForCluster(config) - if signingID == nil { - // If CA is bootstrapped at all then this should never happen but be - // defensive. - return nil, fmt.Errorf("no cluster trust domain setup") - } - - indexedRoots.TrustDomain = signingID.Host() + // Build TrustDomain based on the ClusterID stored. + signingID := connect.SpiffeIDSigningForCluster(config.ClusterID) + if signingID == nil { + // If CA is bootstrapped at all then this should never happen but be + // defensive. + return nil, fmt.Errorf("no cluster trust domain setup") } + indexedRoots.TrustDomain = signingID.Host() + indexedRoots.Index, indexedRoots.Roots = index, roots if indexedRoots.Roots == nil { indexedRoots.Roots = make(structs.CARoots, 0) diff --git a/agent/consul/server_oss.go b/agent/consul/server_oss.go index ad5a30eb0..7d4830d1b 100644 --- a/agent/consul/server_oss.go +++ b/agent/consul/server_oss.go @@ -4,18 +4,76 @@ package consul import ( + "fmt" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/serf/coordinate" "github.com/hashicorp/serf/serf" "google.golang.org/grpc" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/lib" ) -func (s *Server) removeFailedNodeEnterprise(remove func(*serf.Serf, string) error, node, wanNode string) error { - // nothing to do for oss - return nil +func (s *Server) registerEnterpriseGRPCServices(deps Deps, srv *grpc.Server) {} + +func (s *Server) enterpriseValidateJoinWAN() error { + return nil // no-op } -func (s *Server) registerEnterpriseGRPCServices(deps Deps, srv *grpc.Server) {} +// JoinLAN is used to have Consul join the inner-DC pool The target address +// should be another node inside the DC listening on the Serf LAN address +func (s *Server) JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (int, error) { + return s.serfLAN.Join(addrs, true) +} + +// removeFailedNode is used to remove a failed node from the cluster +// +// if node is empty, just remove wanNode from the WAN +func (s *Server) removeFailedNode( + removeFn func(*serf.Serf, string) error, + node, wanNode string, + entMeta *structs.EnterpriseMeta, +) error { + maybeRemove := func(s *serf.Serf, node string) (bool, error) { + if !isSerfMember(s, node) { + return false, nil + } + return true, removeFn(s, node) + } + + foundAny := false + + var merr error + + if node != "" { + if found, err := maybeRemove(s.serfLAN, node); err != nil { + merr = multierror.Append(merr, fmt.Errorf("could not remove failed node from LAN: %w", err)) + } else if found { + foundAny = true + } + } + + if s.serfWAN != nil { + if found, err := maybeRemove(s.serfWAN, wanNode); err != nil { + merr = multierror.Append(merr, fmt.Errorf("could not remove failed node from WAN: %w", err)) + } else if found { + foundAny = true + } + } + + if merr != nil { + return merr + } + + if !foundAny { + return fmt.Errorf("agent: No node found with name '%s'", node) + } + + return nil +} // lanPoolAllMembers only returns our own segment or partition's members, because // OSS servers can't be in multiple segments or partitions. @@ -39,3 +97,62 @@ func (s *Server) LANMembers(filter LANMemberFilter) ([]serf.Member, error) { } return s.LANMembersInAgentPartition(), nil } + +func (s *Server) GetMatchingLANCoordinate(_, _ string) (*coordinate.Coordinate, error) { + return s.serfLAN.GetCoordinate() +} + +func (s *Server) addEnterpriseLANCoordinates(cs lib.CoordinateSet) error { + return nil +} + +func (s *Server) LANSendUserEvent(name string, payload []byte, coalesce bool) error { + err := s.serfLAN.UserEvent(name, payload, coalesce) + if err != nil { + return fmt.Errorf("error broadcasting event: %w", err) + } + return nil +} + +func (s *Server) DoWithLANSerfs( + fn func(name, poolKind string, pool *serf.Serf) error, + errorFn func(name, poolKind string, err error) error, +) error { + if errorFn == nil { + errorFn = func(_, _ string, err error) error { return err } + } + err := fn("", "", s.serfLAN) + if err != nil { + return errorFn("", "", err) + } + return nil +} + +// reconcile is used to reconcile the differences between Serf membership and +// what is reflected in our strongly consistent store. Mainly we need to ensure +// all live nodes are registered, all failed nodes are marked as such, and all +// left nodes are deregistered. +func (s *Server) reconcile() (err error) { + defer metrics.MeasureSince([]string{"leader", "reconcile"}, time.Now()) + + members := s.serfLAN.Members() + knownMembers := make(map[string]struct{}) + for _, member := range members { + if err := s.reconcileMember(member); err != nil { + return err + } + knownMembers[member.Name] = struct{}{} + } + + // Reconcile any members that have been reaped while we were not the + // leader. + return s.reconcileReaped(knownMembers, nil) +} + +func (s *Server) addEnterpriseStats(stats map[string]map[string]string) { + // no-op +} + +func getSerfMemberEnterpriseMeta(member serf.Member) *structs.EnterpriseMeta { + return structs.NodeEnterpriseMetaInDefaultPartition() +} diff --git a/agent/consul/server_serf.go b/agent/consul/server_serf.go index 1950c6c32..44c3f857a 100644 --- a/agent/consul/server_serf.go +++ b/agent/consul/server_serf.go @@ -1,6 +1,7 @@ package consul import ( + "errors" "fmt" "net" "path/filepath" @@ -32,29 +33,75 @@ const ( maxPeerRetries = 6 ) +type setupSerfOptions struct { + Config *serf.Config + EventCh chan serf.Event + SnapshotPath string + Listener net.Listener + + // WAN only + WAN bool + + // LAN only + Segment string + Partition string +} + // setupSerf is used to setup and initialize a Serf -func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, wan bool, wanPort int, - segment string, listener net.Listener) (*serf.Serf, error) { +func (s *Server) setupSerf(opts setupSerfOptions) (*serf.Serf, *serf.Config, error) { + conf, err := s.setupSerfConfig(opts) + if err != nil { + return nil, nil, err + } + + cluster, err := serf.Create(conf) + if err != nil { + return nil, nil, err + } + + return cluster, conf, nil +} + +func (s *Server) setupSerfConfig(opts setupSerfOptions) (*serf.Config, error) { + if opts.Config == nil { + return nil, errors.New("serf config is a required field") + } + if opts.Listener == nil { + return nil, errors.New("listener is a required field") + } + if opts.WAN { + if opts.Segment != "" { + return nil, errors.New("cannot configure segments on the WAN serf pool") + } + if opts.Partition != "" { + return nil, errors.New("cannot configure partitions on the WAN serf pool") + } + } + + conf := opts.Config conf.Init() - if wan { + if opts.WAN { conf.NodeName = fmt.Sprintf("%s.%s", s.config.NodeName, s.config.Datacenter) } else { conf.NodeName = s.config.NodeName - if wanPort > 0 { - conf.Tags["wan_join_port"] = fmt.Sprintf("%d", wanPort) + if s.config.SerfWANConfig != nil { + serfBindPortWAN := s.config.SerfWANConfig.MemberlistConfig.BindPort + if serfBindPortWAN > 0 { + conf.Tags["wan_join_port"] = fmt.Sprintf("%d", serfBindPortWAN) + } } } conf.Tags["role"] = "consul" conf.Tags["dc"] = s.config.Datacenter - conf.Tags["segment"] = segment + conf.Tags["segment"] = opts.Segment conf.Tags["id"] = string(s.config.NodeID) conf.Tags["vsn"] = fmt.Sprintf("%d", s.config.ProtocolVersion) conf.Tags["vsn_min"] = fmt.Sprintf("%d", ProtocolVersionMin) conf.Tags["vsn_max"] = fmt.Sprintf("%d", ProtocolVersionMax) conf.Tags["raft_vsn"] = fmt.Sprintf("%d", s.config.RaftConfig.ProtocolVersion) conf.Tags["build"] = s.config.Build - addr := listener.Addr().(*net.TCPAddr) + addr := opts.Listener.Addr().(*net.TCPAddr) conf.Tags["port"] = fmt.Sprintf("%d", addr.Port) if s.config.Bootstrap { conf.Tags["bootstrap"] = "1" @@ -87,7 +134,7 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w conf.Tags["ft_si"] = "1" var subLoggerName string - if wan { + if opts.WAN { subLoggerName = logging.WAN } else { subLoggerName = logging.LAN @@ -107,22 +154,25 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w conf.MemberlistConfig.Logger = memberlistLogger conf.Logger = serfLogger - conf.EventCh = ch + conf.EventCh = opts.EventCh conf.ProtocolVersion = protocolVersionMap[s.config.ProtocolVersion] conf.RejoinAfterLeave = s.config.RejoinAfterLeave - if wan { - conf.Merge = &wanMergeDelegate{} + if opts.WAN { + conf.Merge = &wanMergeDelegate{ + localDatacenter: s.config.Datacenter, + } } else { conf.Merge = &lanMergeDelegate{ - dc: s.config.Datacenter, - nodeID: s.config.NodeID, - nodeName: s.config.NodeName, - segment: segment, - server: true, + dc: s.config.Datacenter, + nodeID: s.config.NodeID, + nodeName: s.config.NodeName, + segment: opts.Segment, + partition: opts.Partition, + server: true, } } - if wan { + if opts.WAN { nt, err := memberlist.NewNetTransport(&memberlist.NetTransportConfig{ BindAddrs: []string{conf.MemberlistConfig.BindAddr}, BindPort: conf.MemberlistConfig.BindPort, @@ -154,7 +204,7 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w // node which is rather unexpected. conf.EnableNameConflictResolution = false - if wan && s.config.ConnectMeshGatewayWANFederationEnabled { + if opts.WAN && s.config.ConnectMeshGatewayWANFederationEnabled { conf.MemberlistConfig.RequireNodeNames = true conf.MemberlistConfig.DisableTcpPingsForNode = func(nodeName string) bool { _, dc, err := wanfed.SplitNodeName(nodeName) @@ -169,7 +219,7 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w } if !s.config.DevMode { - conf.SnapshotPath = filepath.Join(s.config.DataDir, path) + conf.SnapshotPath = filepath.Join(s.config.DataDir, opts.SnapshotPath) } if err := lib.EnsurePath(conf.SnapshotPath, false); err != nil { return nil, err @@ -183,7 +233,7 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w s.config.OverrideInitialSerfTags(conf.Tags) } - return serf.Create(conf) + return conf, nil } // userEventName computes the name of a user event diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 84cb868b3..1f8bc4b0e 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -1,7 +1,6 @@ package consul import ( - "bytes" "crypto/x509" "fmt" "net" @@ -32,7 +31,6 @@ import ( "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -76,7 +74,7 @@ func testServerACLConfig(cb func(*Config)) func(*Config) { return func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = TestDefaultMasterToken + c.ACLInitialManagementToken = TestDefaultMasterToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" if cb != nil { @@ -117,11 +115,7 @@ func testServerConfig(t *testing.T) (string, *Config) { dir := testutil.TempDir(t, "consul") config := DefaultConfig() - ports := freeport.MustTake(3) - t.Cleanup(func() { - freeport.Return(ports) - }) - + ports := freeport.GetN(t, 3) config.NodeName = uniqueNodeName(t.Name()) config.Bootstrap = true config.Datacenter = "dc1" @@ -389,35 +383,38 @@ func TestServer_JoinLAN(t *testing.T) { }) } -// TestServer_JoinLAN_SerfAllowedCIDRs test that IPs might be blocked -// with Serf. -// To run properly, this test requires to be able to bind and have access -// on 127.0.1.1 which is the case for most Linux machines and Windows, -// so Unit test will run in the CI. -// To run it on Mac OS, please run this commandd first, otherwise the -// test will be skipped: `sudo ifconfig lo0 alias 127.0.1.1 up` +// TestServer_JoinLAN_SerfAllowedCIDRs test that IPs might be blocked with +// Serf. +// +// To run properly, this test requires to be able to bind and have access on +// 127.0.1.1 which is the case for most Linux machines and Windows, so Unit +// test will run in the CI. +// +// To run it on Mac OS, please run this command first, otherwise the test will +// be skipped: `sudo ifconfig lo0 alias 127.0.1.1 up` func TestServer_JoinLAN_SerfAllowedCIDRs(t *testing.T) { t.Parallel() + + const targetAddr = "127.0.1.1" + + skipIfCannotBindToIP(t, targetAddr) + dir1, s1 := testServerWithConfig(t, func(c *Config) { c.BootstrapExpect = 1 lan, err := memberlist.ParseCIDRs([]string{"127.0.0.1/32"}) - assert.NoError(t, err) + require.NoError(t, err) c.SerfLANConfig.MemberlistConfig.CIDRsAllowed = lan wan, err := memberlist.ParseCIDRs([]string{"127.0.0.0/24", "::1/128"}) - assert.NoError(t, err) + require.NoError(t, err) c.SerfWANConfig.MemberlistConfig.CIDRsAllowed = wan }) defer os.RemoveAll(dir1) defer s1.Shutdown() - targetAddr := "127.0.1.1" - dir2, a2, err := testClientWithConfigWithErr(t, func(c *Config) { + dir2, a2 := testClientWithConfig(t, func(c *Config) { c.SerfLANConfig.MemberlistConfig.BindAddr = targetAddr }) defer os.RemoveAll(dir2) - if err != nil { - t.Skipf("Cannot bind on %s, to run on Mac OS: `sudo ifconfig lo0 alias 127.0.1.1 up`", targetAddr) - } defer a2.Shutdown() dir3, rs3 := testServerWithConfig(t, func(c *Config) { @@ -451,6 +448,76 @@ func TestServer_JoinLAN_SerfAllowedCIDRs(t *testing.T) { }) } +// TestServer_JoinWAN_SerfAllowedCIDRs test that IPs might be +// blocked with Serf. +// +// To run properly, this test requires to be able to bind and have access on +// 127.0.1.1 which is the case for most Linux machines and Windows, so Unit +// test will run in the CI. +// +// To run it on Mac OS, please run this command first, otherwise the test will +// be skipped: `sudo ifconfig lo0 alias 127.0.1.1 up` +func TestServer_JoinWAN_SerfAllowedCIDRs(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + const targetAddr = "127.0.1.1" + + skipIfCannotBindToIP(t, targetAddr) + + wanCIDRs, err := memberlist.ParseCIDRs([]string{"127.0.0.1/32"}) + require.NoError(t, err) + + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.Bootstrap = true + c.BootstrapExpect = 1 + c.Datacenter = "dc1" + c.SerfWANConfig.MemberlistConfig.CIDRsAllowed = wanCIDRs + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + waitForLeaderEstablishment(t, s1) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + dir2, s2 := testServerWithConfig(t, func(c *Config) { + c.Bootstrap = true + c.BootstrapExpect = 1 + c.PrimaryDatacenter = "dc1" + c.Datacenter = "dc2" + c.SerfWANConfig.MemberlistConfig.BindAddr = targetAddr + }) + defer os.RemoveAll(dir2) + defer s2.Shutdown() + + waitForLeaderEstablishment(t, s2) + testrpc.WaitForLeader(t, s2.RPC, "dc2") + + // Joining should be fine + joinWANWithNoMembershipChecks(t, s2, s1) + + // But membership is blocked if you go and take a peek on the server. + t.Run("LAN membership should only show each other", func(t *testing.T) { + require.Len(t, s1.LANMembersInAgentPartition(), 1) + require.Len(t, s2.LANMembersInAgentPartition(), 1) + }) + t.Run("WAN membership in the primary should not show the secondary", func(t *testing.T) { + require.Len(t, s1.WANMembers(), 1) + }) + t.Run("WAN membership in the secondary can show the primary", func(t *testing.T) { + require.Len(t, s2.WANMembers(), 2) + }) +} + +func skipIfCannotBindToIP(t *testing.T, ip string) { + l, err := net.Listen("tcp", net.JoinHostPort(ip, "0")) + if err != nil { + t.Skipf("Cannot bind on %s, to run on Mac OS: `sudo ifconfig lo0 alias %s up`", ip, ip) + } + l.Close() +} + func TestServer_LANReap(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -649,9 +716,8 @@ func TestServer_JoinWAN_viaMeshGateway(t *testing.T) { t.Parallel() - gwPort := freeport.MustTake(1) - defer freeport.Return(gwPort) - gwAddr := ipaddr.FormatAddressPort("127.0.0.1", gwPort[0]) + port := freeport.GetOne(t) + gwAddr := ipaddr.FormatAddressPort("127.0.0.1", port) dir1, s1 := testServerWithConfig(t, func(c *Config) { c.TLSConfig.Domain = "consul" @@ -737,7 +803,7 @@ func TestServer_JoinWAN_viaMeshGateway(t *testing.T) { ID: "mesh-gateway", Service: "mesh-gateway", Meta: map[string]string{structs.MetaWANFederationKey: "1"}, - Port: gwPort[0], + Port: port, }, } @@ -792,7 +858,7 @@ func TestServer_JoinWAN_viaMeshGateway(t *testing.T) { ID: "mesh-gateway", Service: "mesh-gateway", Meta: map[string]string{structs.MetaWANFederationKey: "1"}, - Port: gwPort[0], + Port: port, }, } @@ -809,7 +875,7 @@ func TestServer_JoinWAN_viaMeshGateway(t *testing.T) { ID: "mesh-gateway", Service: "mesh-gateway", Meta: map[string]string{structs.MetaWANFederationKey: "1"}, - Port: gwPort[0], + Port: port, }, } @@ -1635,34 +1701,3 @@ func TestServer_RPC_RateLimit(t *testing.T) { } }) } - -func TestServer_CALogging(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - _, conf1 := testServerConfig(t) - - // Setup dummy logger to catch output - var buf bytes.Buffer - logger := testutil.LoggerWithOutput(t, &buf) - - deps := newDefaultDeps(t, conf1) - deps.Logger = logger - - s1, err := NewServer(conf1, deps) - require.NoError(t, err) - defer s1.Shutdown() - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // Wait til CA root is setup - retry.Run(t, func(r *retry.R) { - var out structs.IndexedCARoots - r.Check(s1.RPC("ConnectCA.Roots", structs.DCSpecificRequest{ - Datacenter: conf1.Datacenter, - }, &out)) - }) - - require.Contains(t, buf.String(), "consul CA provider configured") -} diff --git a/agent/consul/session_endpoint.go b/agent/consul/session_endpoint.go index e15b05227..ae39a6fc5 100644 --- a/agent/consul/session_endpoint.go +++ b/agent/consul/session_endpoint.go @@ -151,7 +151,7 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error { if args.Op == structs.SessionCreate && args.Session.TTL != "" { // If we created a session with a TTL, reset the expiration timer - s.srv.resetSessionTimer(args.Session.ID, &args.Session) + s.srv.resetSessionTimer(&args.Session) } else if args.Op == structs.SessionDestroy { // If we destroyed a session, it might potentially have a TTL, // and we need to clear the timer @@ -308,7 +308,7 @@ func (s *Session) Renew(args *structs.SessionSpecificRequest, // Reset the session TTL timer. reply.Sessions = structs.Sessions{session} - if err := s.srv.resetSessionTimer(args.SessionID, session); err != nil { + if err := s.srv.resetSessionTimer(session); err != nil { s.logger.Error("Session renew failed", "error", err) return err } diff --git a/agent/consul/session_endpoint_test.go b/agent/consul/session_endpoint_test.go index 61551b7e8..500bd56e3 100644 --- a/agent/consul/session_endpoint_test.go +++ b/agent/consul/session_endpoint_test.go @@ -6,6 +6,7 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/stretchr/testify/require" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" @@ -156,7 +157,7 @@ func TestSession_Apply_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -377,10 +378,11 @@ func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) { } t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -391,12 +393,17 @@ func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) { testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) - rules := ` -session "foo" { - policy = "read" -} -` - token := createToken(t, codec, rules) + deniedToken := createTokenWithPolicyName(t, codec, "denied", ` + session "foo" { + policy = "deny" + } + `, "root") + + allowedToken := createTokenWithPolicyName(t, codec, "allowed", ` + session "foo" { + policy = "read" + } + `, "root") // Create a node and a session. s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}) @@ -409,95 +416,94 @@ session "foo" { WriteRequest: structs.WriteRequest{Token: "root"}, } var out string - if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &out); err != nil { - t.Fatalf("err: %v", err) - } + err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &out) + require.NoError(t, err) - // Perform all the read operations, and make sure everything is empty. - getR := structs.SessionSpecificRequest{ - Datacenter: "dc1", - SessionID: out, - } - { - var sessions structs.IndexedSessions - if err := msgpackrpc.CallWithCodec(codec, "Session.Get", &getR, &sessions); err != nil { - t.Fatalf("err: %v", err) + t.Run("Get", func(t *testing.T) { + require := require.New(t) + + req := &structs.SessionSpecificRequest{ + Datacenter: "dc1", + SessionID: out, } - if len(sessions.Sessions) != 0 { - t.Fatalf("bad: %v", sessions.Sessions) - } - } - listR := structs.DCSpecificRequest{ - Datacenter: "dc1", - } - { + req.Token = deniedToken + + // ACL-restricted results filtered out. var sessions structs.IndexedSessions - if err := msgpackrpc.CallWithCodec(codec, "Session.List", &listR, &sessions); err != nil { - t.Fatalf("err: %v", err) - } - if len(sessions.Sessions) != 0 { - t.Fatalf("bad: %v", sessions.Sessions) - } - } - nodeR := structs.NodeSpecificRequest{ - Datacenter: "dc1", - Node: "foo", - } - { - var sessions structs.IndexedSessions - if err := msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", &nodeR, &sessions); err != nil { - t.Fatalf("err: %v", err) - } - if len(sessions.Sessions) != 0 { - t.Fatalf("bad: %v", sessions.Sessions) - } - } + err := msgpackrpc.CallWithCodec(codec, "Session.Get", req, &sessions) + require.NoError(err) + require.Empty(sessions.Sessions) + require.True(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") - // Finally, supply the token and make sure the reads are allowed. - getR.Token = token - { - var sessions structs.IndexedSessions - if err := msgpackrpc.CallWithCodec(codec, "Session.Get", &getR, &sessions); err != nil { - t.Fatalf("err: %v", err) - } - if len(sessions.Sessions) != 1 { - t.Fatalf("bad: %v", sessions.Sessions) - } - } - listR.Token = token - { - var sessions structs.IndexedSessions - if err := msgpackrpc.CallWithCodec(codec, "Session.List", &listR, &sessions); err != nil { - t.Fatalf("err: %v", err) - } - if len(sessions.Sessions) != 1 { - t.Fatalf("bad: %v", sessions.Sessions) - } - } - nodeR.Token = token - { - var sessions structs.IndexedSessions - if err := msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", &nodeR, &sessions); err != nil { - t.Fatalf("err: %v", err) - } - if len(sessions.Sessions) != 1 { - t.Fatalf("bad: %v", sessions.Sessions) - } - } + // ACL-restricted results included. + req.Token = allowedToken - // Try to get a session that doesn't exist to make sure that's handled - // correctly by the filter (it will get passed a nil slice). - getR.SessionID = "adf4238a-882b-9ddc-4a9d-5b6758e4159e" - { + err = msgpackrpc.CallWithCodec(codec, "Session.Get", req, &sessions) + require.NoError(err) + require.Len(sessions.Sessions, 1) + require.False(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + + // Try to get a session that doesn't exist to make sure that's handled + // correctly by the filter (it will get passed a nil slice). + req.SessionID = "adf4238a-882b-9ddc-4a9d-5b6758e4159e" + + err = msgpackrpc.CallWithCodec(codec, "Session.Get", req, &sessions) + require.NoError(err) + require.Empty(sessions.Sessions) + require.False(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("List", func(t *testing.T) { + require := require.New(t) + + req := &structs.DCSpecificRequest{ + Datacenter: "dc1", + } + req.Token = deniedToken + + // ACL-restricted results filtered out. var sessions structs.IndexedSessions - if err := msgpackrpc.CallWithCodec(codec, "Session.Get", &getR, &sessions); err != nil { - t.Fatalf("err: %v", err) + + err := msgpackrpc.CallWithCodec(codec, "Session.List", req, &sessions) + require.NoError(err) + require.Empty(sessions.Sessions) + require.True(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + + // ACL-restricted results included. + req.Token = allowedToken + + err = msgpackrpc.CallWithCodec(codec, "Session.List", req, &sessions) + require.NoError(err) + require.Len(sessions.Sessions, 1) + require.False(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("NodeSessions", func(t *testing.T) { + require := require.New(t) + + req := &structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: "foo", } - if len(sessions.Sessions) != 0 { - t.Fatalf("bad: %v", sessions.Sessions) - } - } + req.Token = deniedToken + + // ACL-restricted results filtered out. + var sessions structs.IndexedSessions + + err := msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", req, &sessions) + require.NoError(err) + require.Empty(sessions.Sessions) + require.True(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + + // ACL-restricted results included. + req.Token = allowedToken + + err = msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", req, &sessions) + require.NoError(err) + require.Len(sessions.Sessions, 1) + require.False(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) } func TestSession_ApplyTimers(t *testing.T) { @@ -725,7 +731,7 @@ func TestSession_Renew_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/session_ttl.go b/agent/consul/session_ttl.go index 426179d96..0bb1cb3f1 100644 --- a/agent/consul/session_ttl.go +++ b/agent/consul/session_ttl.go @@ -47,13 +47,12 @@ func (s *Server) initializeSessionTimers() error { // Scan all sessions and reset their timer state := s.fsm.State() - // TODO(partitions): track all session timers in all partitions - _, sessions, err := state.SessionList(nil, structs.WildcardEnterpriseMetaInDefaultPartition()) + _, sessions, err := state.SessionListAll(nil) if err != nil { return err } for _, session := range sessions { - if err := s.resetSessionTimer(session.ID, session); err != nil { + if err := s.resetSessionTimer(session); err != nil { return err } } @@ -63,20 +62,7 @@ func (s *Server) initializeSessionTimers() error { // resetSessionTimer is used to renew the TTL of a session. // This can be used for new sessions and existing ones. A session // will be faulted in if not given. -func (s *Server) resetSessionTimer(id string, session *structs.Session) error { - // Fault the session in if not given - if session == nil { - state := s.fsm.State() - _, s, err := state.SessionGet(nil, id, nil) - if err != nil { - return err - } - if s == nil { - return fmt.Errorf("Session '%s' not found", id) - } - session = s - } - +func (s *Server) resetSessionTimer(session *structs.Session) error { // Bail if the session has no TTL, fast-path some common inputs switch session.TTL { case "", "0", "0s", "0m", "0h": diff --git a/agent/consul/session_ttl_test.go b/agent/consul/session_ttl_test.go index 160a5b69e..5fc4b09f3 100644 --- a/agent/consul/session_ttl_test.go +++ b/agent/consul/session_ttl_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/net-rpc-msgpackrpc" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" ) func generateUUID() (ret string) { @@ -59,50 +59,6 @@ func TestInitializeSessionTimers(t *testing.T) { } } -func TestResetSessionTimer_Fault(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // Should not exist - err := s1.resetSessionTimer(generateUUID(), nil) - if err == nil || !strings.Contains(err.Error(), "not found") { - t.Fatalf("err: %v", err) - } - - // Create a session - state := s1.fsm.State() - if err := state.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil { - t.Fatalf("err: %s", err) - } - session := &structs.Session{ - ID: generateUUID(), - Node: "foo", - TTL: "10s", - } - if err := state.SessionCreate(100, session); err != nil { - t.Fatalf("err: %v", err) - } - - // Reset the session timer - err = s1.resetSessionTimer(session.ID, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Check that we have a timer - if s1.sessionTimers.Get(session.ID) == nil { - t.Fatalf("missing session timer") - } -} - func TestResetSessionTimer_NoTTL(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -130,7 +86,7 @@ func TestResetSessionTimer_NoTTL(t *testing.T) { } // Reset the session timer - err := s1.resetSessionTimer(session.ID, session) + err := s1.resetSessionTimer(session) if err != nil { t.Fatalf("err: %v", err) } @@ -155,7 +111,7 @@ func TestResetSessionTimer_InvalidTTL(t *testing.T) { } // Reset the session timer - err := s1.resetSessionTimer(session.ID, session) + err := s1.resetSessionTimer(session) if err == nil || !strings.Contains(err.Error(), "Invalid Session TTL") { t.Fatalf("err: %v", err) } diff --git a/agent/consul/snapshot_endpoint.go b/agent/consul/snapshot_endpoint.go index 831cec24f..66b394989 100644 --- a/agent/consul/snapshot_endpoint.go +++ b/agent/consul/snapshot_endpoint.go @@ -77,7 +77,7 @@ func (s *Server) dispatchSnapshotRequest(args *structs.SnapshotRequest, in io.Re // Set the metadata here before we do anything; this should always be // pessimistic if we get more data while the snapshot is being taken. - s.setQueryMeta(&reply.QueryMeta) + s.setQueryMeta(&reply.QueryMeta, args.Token) // Take the snapshot and capture the index. snap, err := snapshot.New(s.logger, s.raft) diff --git a/agent/consul/snapshot_endpoint_test.go b/agent/consul/snapshot_endpoint_test.go index 44f0dda43..be0298332 100644 --- a/agent/consul/snapshot_endpoint_test.go +++ b/agent/consul/snapshot_endpoint_test.go @@ -271,7 +271,7 @@ func TestSnapshot_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/state/acl_oss.go b/agent/consul/state/acl_oss.go index 25483fa3e..d7bed1d80 100644 --- a/agent/consul/state/acl_oss.go +++ b/agent/consul/state/acl_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/acl_oss_test.go b/agent/consul/state/acl_oss_test.go index 4ca8fce51..22b3c3b94 100644 --- a/agent/consul/state/acl_oss_test.go +++ b/agent/consul/state/acl_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index f60579a63..31bef38e3 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -3,6 +3,7 @@ package state import ( "errors" "fmt" + "net" "reflect" "strings" @@ -27,6 +28,14 @@ const ( minUUIDLookupLen = 2 ) +var ( + // startingVirtualIP is the start of the virtual IP range we assign to services. + // The effective CIDR range is startingVirtualIP to (startingVirtualIP + virtualIPMaxOffset). + startingVirtualIP = net.IP{240, 0, 0, 0} + + virtualIPMaxOffset = net.IP{15, 255, 255, 254} +) + func resizeNodeLookupKey(s string) string { l := len(s) @@ -72,6 +81,24 @@ func (s *Snapshot) Checks(node string, entMeta *structs.EnterpriseMeta) (memdb.R }) } +// ServiceVirtualIPs is used to pull the service virtual IP mappings for use during snapshots. +func (s *Snapshot) ServiceVirtualIPs() (memdb.ResultIterator, error) { + iter, err := s.tx.Get(tableServiceVirtualIPs, indexID) + if err != nil { + return nil, err + } + return iter, nil +} + +// FreeVirtualIPs is used to pull the freed virtual IPs for use during snapshots. +func (s *Snapshot) FreeVirtualIPs() (memdb.ResultIterator, error) { + iter, err := s.tx.Get(tableFreeVirtualIPs, indexID) + if err != nil { + return nil, err + } + return iter, nil +} + // Registration is used to make sure a node, service, and check registration is // performed within a single transaction to avoid race conditions on state // updates. @@ -79,6 +106,14 @@ func (s *Restore) Registration(idx uint64, req *structs.RegisterRequest) error { return s.store.ensureRegistrationTxn(s.tx, idx, true, req, true) } +func (s *Restore) ServiceVirtualIP(req ServiceVirtualIP) error { + return s.tx.Insert(tableServiceVirtualIPs, req) +} + +func (s *Restore) FreeVirtualIP(req FreeVirtualIP) error { + return s.tx.Insert(tableFreeVirtualIPs, req) +} + // EnsureRegistration is used to make sure a node, service, and check // registration is performed within a single transaction to avoid race // conditions on state updates. @@ -631,7 +666,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta } // Invalidate any sessions for this node. - toDelete, err := allNodeSessionsTxn(tx, nodeName) + toDelete, err := allNodeSessionsTxn(tx, nodeName, entMeta.PartitionOrDefault()) if err != nil { return err } @@ -706,11 +741,38 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool if err = checkGatewayWildcardsAndUpdate(tx, idx, svc); err != nil { return fmt.Errorf("failed updating gateway mapping: %s", err) } + if err := upsertKindServiceName(tx, idx, svc.Kind, svc.CompoundServiceName()); err != nil { + return fmt.Errorf("failed to persist service name: %v", err) + } + // Update upstream/downstream mappings if it's a connect service - if svc.Kind == structs.ServiceKindConnectProxy { + if svc.Kind == structs.ServiceKindConnectProxy || svc.Connect.Native { if err = updateMeshTopology(tx, idx, node, svc, existing); err != nil { return fmt.Errorf("failed updating upstream/downstream association") } + + supported, err := virtualIPsSupported(tx, nil) + if err != nil { + return err + } + + // Update the virtual IP for the service + if supported { + service := svc.Service + if svc.Kind == structs.ServiceKindConnectProxy { + service = svc.Proxy.DestinationServiceName + } + + sn := structs.ServiceName{Name: service, EnterpriseMeta: svc.EnterpriseMeta} + vip, err := assignServiceVirtualIP(tx, sn) + if err != nil { + return fmt.Errorf("failed updating virtual IP: %s", err) + } + if svc.TaggedAddresses == nil { + svc.TaggedAddresses = make(map[string]structs.ServiceAddress) + } + svc.TaggedAddresses[structs.TaggedAddressVirtualIP] = structs.ServiceAddress{Address: vip, Port: svc.Port} + } } // Create the service node entry and populate the indexes. Note that @@ -751,6 +813,120 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool return catalogInsertService(tx, entry) } +// assignServiceVirtualIP assigns a virtual IP to the target service and updates +// the global virtual IP counter if necessary. +func assignServiceVirtualIP(tx WriteTxn, sn structs.ServiceName) (string, error) { + serviceVIP, err := tx.First(tableServiceVirtualIPs, indexID, sn) + if err != nil { + return "", fmt.Errorf("failed service virtual IP lookup: %s", err) + } + + // Service already has a virtual IP assigned, nothing to do. + if serviceVIP != nil { + sVIP := serviceVIP.(ServiceVirtualIP).IP + result, err := addIPOffset(startingVirtualIP, sVIP) + if err != nil { + return "", err + } + + return result.String(), nil + } + + // Get the next available virtual IP, drawing from any freed from deleted services + // first and then falling back to the global counter if none are available. + latestVIP, err := tx.First(tableFreeVirtualIPs, indexCounterOnly, false) + if err != nil { + return "", fmt.Errorf("failed virtual IP index lookup: %s", err) + } + if latestVIP == nil { + latestVIP, err = tx.First(tableFreeVirtualIPs, indexCounterOnly, true) + if err != nil { + return "", fmt.Errorf("failed virtual IP index lookup: %s", err) + } + } + if latestVIP != nil { + if err := tx.Delete(tableFreeVirtualIPs, latestVIP); err != nil { + return "", fmt.Errorf("failed updating freed virtual IP table: %v", err) + } + } + + var latest FreeVirtualIP + if latestVIP == nil { + latest = FreeVirtualIP{ + IP: net.IPv4zero, + IsCounter: true, + } + } else { + latest = latestVIP.(FreeVirtualIP) + } + + // Store the next virtual IP from the counter if there aren't any freed IPs to draw from. + // Then increment to store the next free virtual IP. + newEntry := FreeVirtualIP{ + IP: latest.IP, + IsCounter: latest.IsCounter, + } + if latest.IsCounter { + newEntry.IP = make(net.IP, len(latest.IP)) + copy(newEntry.IP, latest.IP) + for i := len(newEntry.IP) - 1; i >= 0; i-- { + newEntry.IP[i]++ + if newEntry.IP[i] != 0 { + break + } + } + + // Out of virtual IPs, fail registration. + if newEntry.IP.Equal(virtualIPMaxOffset) { + return "", fmt.Errorf("cannot allocate any more unique service virtual IPs") + } + + if err := tx.Insert(tableFreeVirtualIPs, newEntry); err != nil { + return "", fmt.Errorf("failed updating freed virtual IP table: %v", err) + } + } + + assignedVIP := ServiceVirtualIP{ + Service: sn, + IP: newEntry.IP, + } + if err := tx.Insert(tableServiceVirtualIPs, assignedVIP); err != nil { + return "", fmt.Errorf("failed inserting service virtual IP entry: %s", err) + } + + result, err := addIPOffset(startingVirtualIP, assignedVIP.IP) + if err != nil { + return "", err + } + return result.String(), nil +} + +func addIPOffset(a, b net.IP) (net.IP, error) { + a4 := a.To4() + b4 := b.To4() + if a4 == nil || b4 == nil { + return nil, errors.New("ip is not ipv4") + } + + var raw uint64 + for i := 0; i < 4; i++ { + raw = raw<<8 + uint64(a4[i]) + uint64(b4[i]) + } + return net.IPv4(byte(raw>>24), byte(raw>>16), byte(raw>>8), byte(raw)), nil +} + +func virtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool, error) { + _, entry, err := systemMetadataGetTxn(tx, ws, structs.SystemMetadataVirtualIPsEnabled) + if err != nil { + return false, fmt.Errorf("failed system metadata lookup: %s", err) + } + if entry == nil { + return false, nil + } + + return entry.Value != "", nil +} + // Services returns all services along with a list of associated tags. func (s *Store) Services(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Services, error) { tx := s.db.Txn(false) @@ -792,16 +968,14 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (ui return idx, results, nil } -func (s *Store) ServiceList(ws memdb.WatchSet, - include func(svc *structs.ServiceNode) bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) { +func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) { tx := s.db.Txn(false) defer tx.Abort() - return serviceListTxn(tx, ws, include, entMeta) + return serviceListTxn(tx, ws, entMeta) } -func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, - include func(svc *structs.ServiceNode) bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) { +func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) { idx := catalogServicesMaxIndex(tx, entMeta) services, err := tx.Get(tableServices, indexID+"_prefix", entMeta) @@ -813,11 +987,7 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, unique := make(map[structs.ServiceName]struct{}) for service := services.Next(); service != nil; service = services.Next() { svc := service.(*structs.ServiceNode) - // TODO (freddy) This is a hack to exclude certain kinds. - // Need a new index to query by kind and namespace, have to coordinate with consul foundations first - if include == nil || include(svc) { - unique[svc.CompoundServiceName()] = struct{}{} - } + unique[svc.CompoundServiceName()] = struct{}{} } results := make(structs.ServiceList, 0, len(unique)) @@ -1515,6 +1685,12 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st if err := cleanupGatewayWildcards(tx, idx, svc); err != nil { return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", name.String(), err) } + if err := freeServiceVirtualIP(tx, svc.ServiceName, entMeta); err != nil { + return fmt.Errorf("failed to clean up virtual IP for %q: %v", name.String(), err) + } + if err := cleanupKindServiceName(tx, idx, svc.CompoundServiceName(), svc.ServiceKind); err != nil { + return fmt.Errorf("failed to persist service name: %v", err) + } } } else { return fmt.Errorf("Could not find any service %s: %s", svc.ServiceName, err) @@ -1523,6 +1699,40 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st return nil } +// freeServiceVirtualIP is used to free a virtual IP for a service after the last instance +// is removed. +func freeServiceVirtualIP(tx WriteTxn, svc string, entMeta *structs.EnterpriseMeta) error { + supported, err := virtualIPsSupported(tx, nil) + if err != nil { + return err + } + if !supported { + return nil + } + + sn := structs.NewServiceName(svc, entMeta) + serviceVIP, err := tx.First(tableServiceVirtualIPs, indexID, sn) + if err != nil { + return fmt.Errorf("failed service virtual IP lookup: %s", err) + } + // Service has no virtual IP assigned, nothing to do. + if serviceVIP == nil { + return nil + } + + // Delete the service virtual IP and add it to the freed IPs list. + if err := tx.Delete(tableServiceVirtualIPs, serviceVIP); err != nil { + return fmt.Errorf("failed updating freed virtual IP table: %v", err) + } + + newEntry := FreeVirtualIP{IP: serviceVIP.(ServiceVirtualIP).IP} + if err := tx.Insert(tableFreeVirtualIPs, newEntry); err != nil { + return fmt.Errorf("failed updating freed virtual IP table: %v", err) + } + + return nil +} + // EnsureCheck is used to store a check registration in the db. func (s *Store) EnsureCheck(idx uint64, hc *structs.HealthCheck) error { tx := s.db.WriteTxn(idx) @@ -2297,6 +2507,49 @@ func (s *Store) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *stru return lib.MaxUint64(maxIdx, idx), results, nil } +func (s *Store) VirtualIPForService(sn structs.ServiceName) (string, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + vip, err := tx.First(tableServiceVirtualIPs, indexID, sn) + if err != nil { + return "", fmt.Errorf("failed service virtual IP lookup: %s", err) + } + if vip == nil { + return "", nil + } + + result, err := addIPOffset(startingVirtualIP, vip.(ServiceVirtualIP).IP) + if err != nil { + return "", err + } + return result.String(), nil +} + +func (s *Store) ServiceNamesOfKind(ws memdb.WatchSet, kind structs.ServiceKind) (uint64, []*KindServiceName, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + return serviceNamesOfKindTxn(tx, ws, kind) +} + +func serviceNamesOfKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind) (uint64, []*KindServiceName, error) { + var names []*KindServiceName + iter, err := tx.Get(tableKindServiceNames, indexKindOnly, kind) + if err != nil { + return 0, nil, err + } + ws.Add(iter.WatchCh()) + + idx := kindServiceNamesMaxIndex(tx, ws, kind) + for name := iter.Next(); name != nil; name = iter.Next() { + ksn := name.(*KindServiceName) + names = append(names, ksn) + } + + return idx, names, nil +} + // parseCheckServiceNodes is used to parse through a given set of services, // and query for an associated node and a set of checks. This is the inner // method used to return a rich set of results from a more simple query. @@ -2562,7 +2815,8 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, // checkSessionsTxn returns the IDs of all sessions associated with a health check func checkSessionsTxn(tx ReadTxn, hc *structs.HealthCheck) ([]*sessionCheck, error) { - mappings, err := getCompoundWithTxn(tx, "session_checks", "node_check", &hc.EnterpriseMeta, hc.Node, string(hc.CheckID)) + mappings, err := tx.Get(tableSessionChecks, indexNodeCheck, MultiQuery{Value: []string{hc.Node, string(hc.CheckID)}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(hc.PartitionOrDefault())}) if err != nil { return nil, fmt.Errorf("failed session checks lookup: %s", err) } @@ -3632,3 +3886,44 @@ func truncateGatewayServiceTopologyMappings(tx WriteTxn, idx uint64, gateway str return nil } + +func upsertKindServiceName(tx WriteTxn, idx uint64, kind structs.ServiceKind, name structs.ServiceName) error { + q := KindServiceNameQuery{Name: name.Name, Kind: kind, EnterpriseMeta: name.EnterpriseMeta} + existing, err := tx.First(tableKindServiceNames, indexID, q) + if err != nil { + return err + } + + // Service name is already known. Nothing to do. + if existing != nil { + return nil + } + + ksn := KindServiceName{ + Kind: kind, + Service: name, + RaftIndex: structs.RaftIndex{ + CreateIndex: idx, + ModifyIndex: idx, + }, + } + if err := tx.Insert(tableKindServiceNames, &ksn); err != nil { + return fmt.Errorf("failed inserting %s/%s into %s: %s", kind, name.String(), tableKindServiceNames, err) + } + if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind)); err != nil { + return fmt.Errorf("failed updating %s index: %v", tableKindServiceNames, err) + } + return nil +} + +func cleanupKindServiceName(tx WriteTxn, idx uint64, name structs.ServiceName, kind structs.ServiceKind) error { + q := KindServiceNameQuery{Name: name.Name, Kind: kind, EnterpriseMeta: name.EnterpriseMeta} + if _, err := tx.DeleteAll(tableKindServiceNames, indexID, q); err != nil { + return fmt.Errorf("failed to delete %s from %s: %s", name, tableKindServiceNames, err) + } + + if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind)); err != nil { + return fmt.Errorf("failed updating %s index: %v", tableKindServiceNames, err) + } + return nil +} diff --git a/agent/consul/state/catalog_events_oss.go b/agent/consul/state/catalog_events_oss.go index cf5231dc9..9144c62c7 100644 --- a/agent/consul/state/catalog_events_oss.go +++ b/agent/consul/state/catalog_events_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/catalog_events_test.go b/agent/consul/state/catalog_events_test.go index 277dec11c..6e86171d2 100644 --- a/agent/consul/state/catalog_events_test.go +++ b/agent/consul/state/catalog_events_test.go @@ -73,6 +73,11 @@ func TestServiceHealthSnapshot(t *testing.T) { func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) { store := NewStateStore(nil) + require.NoError(t, store.SystemMetadataSet(0, &structs.SystemMetadataEntry{ + Key: structs.SystemMetadataVirtualIPsEnabled, + Value: "true", + })) + counter := newIndexCounter() err := store.EnsureRegistration(counter.Next(), testServiceRegistration(t, "db")) require.NoError(t, err) @@ -1574,6 +1579,10 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { func (tc eventsTestCase) run(t *testing.T) { s := NewStateStore(nil) + require.NoError(t, s.SystemMetadataSet(0, &structs.SystemMetadataEntry{ + Key: structs.SystemMetadataVirtualIPsEnabled, + Value: "true", + })) setupIndex := uint64(10) mutateIndex := uint64(100) @@ -1936,7 +1945,14 @@ func evServiceUnchanged(e *stream.Event) error { // evConnectNative option converts the base event to represent a connect-native // service instance. func evConnectNative(e *stream.Event) error { - getPayloadCheckServiceNode(e.Payload).Service.Connect.Native = true + csn := getPayloadCheckServiceNode(e.Payload) + csn.Service.Connect.Native = true + csn.Service.TaggedAddresses = map[string]structs.ServiceAddress{ + structs.TaggedAddressVirtualIP: { + Address: "240.0.0.1", + Port: csn.Service.Port, + }, + } return nil } @@ -1969,6 +1985,13 @@ func evSidecar(e *stream.Event) error { csn.Service.Proxy.DestinationServiceName = svc csn.Service.Proxy.DestinationServiceID = svc + csn.Service.TaggedAddresses = map[string]structs.ServiceAddress{ + structs.TaggedAddressVirtualIP: { + Address: "240.0.0.1", + Port: csn.Service.Port, + }, + } + // Convert the check to point to the right ID now. This isn't totally // realistic - sidecars should have alias checks etc but this is good enough // to test this code path. @@ -1990,7 +2013,12 @@ func evSidecar(e *stream.Event) error { // amount to simulate a service change. Can be used with evSidecar since it's a // relative change (+10). func evMutatePort(e *stream.Event) error { - getPayloadCheckServiceNode(e.Payload).Service.Port += 10 + csn := getPayloadCheckServiceNode(e.Payload) + csn.Service.Port += 10 + if addr, ok := csn.Service.TaggedAddresses[structs.TaggedAddressVirtualIP]; ok { + addr.Port = csn.Service.Port + csn.Service.TaggedAddresses[structs.TaggedAddressVirtualIP] = addr + } return nil } @@ -2067,6 +2095,10 @@ func evRenameService(e *stream.Event) error { // We don't need to update our own details, only the name of the destination csn.Service.Proxy.DestinationServiceName += "_changed" + taggedAddr := csn.Service.TaggedAddresses[structs.TaggedAddressVirtualIP] + taggedAddr.Address = "240.0.0.2" + csn.Service.TaggedAddresses[structs.TaggedAddressVirtualIP] = taggedAddr + if e.Topic == topicServiceHealthConnect { payload := e.Payload.(EventPayloadCheckServiceNode) payload.overrideKey = csn.Service.Proxy.DestinationServiceName diff --git a/agent/consul/state/catalog_oss.go b/agent/consul/state/catalog_oss.go index e6700cc1a..f2902ca71 100644 --- a/agent/consul/state/catalog_oss.go +++ b/agent/consul/state/catalog_oss.go @@ -1,9 +1,11 @@ +//go:build !consulent // +build !consulent package state import ( "fmt" + "strings" memdb "github.com/hashicorp/go-memdb" @@ -17,13 +19,7 @@ func serviceIndexName(name string, _ *structs.EnterpriseMeta) string { } func serviceKindIndexName(kind structs.ServiceKind, _ *structs.EnterpriseMeta) string { - switch kind { - case structs.ServiceKindTypical: - // needs a special case here - return "service_kind.typical" - default: - return "service_kind." + string(kind) - } + return "service_kind." + kind.Normalized() } func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, entMeta *structs.EnterpriseMeta) error { @@ -191,3 +187,22 @@ func validateRegisterRequestTxn(_ ReadTxn, _ *structs.RegisterRequest, _ bool) ( func (s *Store) ValidateRegisterRequest(_ *structs.RegisterRequest) (*structs.EnterpriseMeta, error) { return nil, nil } + +func indexFromKindServiceName(arg interface{}) ([]byte, error) { + var b indexBuilder + + switch n := arg.(type) { + case KindServiceNameQuery: + b.String(strings.ToLower(string(n.Kind))) + b.String(strings.ToLower(n.Name)) + return b.Bytes(), nil + + case *KindServiceName: + b.String(strings.ToLower(string(n.Kind))) + b.String(strings.ToLower(n.Service.Name)) + return b.Bytes(), nil + + default: + return nil, fmt.Errorf("type must be KindServiceNameQuery or *KindServiceName: %T", arg) + } +} diff --git a/agent/consul/state/catalog_oss_test.go b/agent/consul/state/catalog_oss_test.go index 5c81e671e..5811416b1 100644 --- a/agent/consul/state/catalog_oss_test.go +++ b/agent/consul/state/catalog_oss_test.go @@ -1,8 +1,11 @@ +//go:build !consulent // +build !consulent package state import ( + "net" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/types" ) @@ -385,3 +388,64 @@ func testIndexerTableServices() map[string]indexerTestCase { }, } } + +func testIndexerTableServiceVirtualIPs() map[string]indexerTestCase { + obj := ServiceVirtualIP{ + Service: structs.ServiceName{ + Name: "foo", + }, + IP: net.ParseIP("127.0.0.1"), + } + + return map[string]indexerTestCase{ + indexID: { + read: indexValue{ + source: structs.ServiceName{ + Name: "foo", + }, + expected: []byte("foo\x00"), + }, + write: indexValue{ + source: obj, + expected: []byte("foo\x00"), + }, + }, + } +} + +func testIndexerTableKindServiceNames() map[string]indexerTestCase { + obj := &KindServiceName{ + Service: structs.ServiceName{ + Name: "web-sidecar-proxy", + }, + Kind: structs.ServiceKindConnectProxy, + } + + return map[string]indexerTestCase{ + indexID: { + read: indexValue{ + source: &KindServiceName{ + Service: structs.ServiceName{ + Name: "web-sidecar-proxy", + }, + Kind: structs.ServiceKindConnectProxy, + }, + expected: []byte("connect-proxy\x00web-sidecar-proxy\x00"), + }, + write: indexValue{ + source: obj, + expected: []byte("connect-proxy\x00web-sidecar-proxy\x00"), + }, + }, + indexKind: { + read: indexValue{ + source: structs.ServiceKindConnectProxy, + expected: []byte("connect-proxy\x00"), + }, + write: indexValue{ + source: obj, + expected: []byte("connect-proxy\x00"), + }, + }, + } +} diff --git a/agent/consul/state/catalog_schema.go b/agent/consul/state/catalog_schema.go index 808c89834..c03f649be 100644 --- a/agent/consul/state/catalog_schema.go +++ b/agent/consul/state/catalog_schema.go @@ -2,6 +2,7 @@ package state import ( "fmt" + "net" "reflect" "strings" @@ -11,11 +12,14 @@ import ( ) const ( - tableNodes = "nodes" - tableServices = "services" - tableChecks = "checks" - tableGatewayServices = "gateway-services" - tableMeshTopology = "mesh-topology" + tableNodes = "nodes" + tableServices = "services" + tableChecks = "checks" + tableGatewayServices = "gateway-services" + tableMeshTopology = "mesh-topology" + tableServiceVirtualIPs = "service-virtual-ips" + tableFreeVirtualIPs = "free-virtual-ips" + tableKindServiceNames = "kind-service-names" indexID = "id" indexService = "service" @@ -30,6 +34,7 @@ const ( indexGateway = "gateway" indexUUID = "uuid" indexMeta = "meta" + indexCounterOnly = "counter" ) // nodesTableSchema returns a new table schema used for storing struct.Node. @@ -598,3 +603,139 @@ func (q NodeCheckQuery) NamespaceOrDefault() string { func (q NodeCheckQuery) PartitionOrDefault() string { return q.EnterpriseMeta.PartitionOrDefault() } + +// ServiceVirtualIP is used to store a virtual IP associated with a service. +// It is also used to store assigned virtual IPs when a snapshot is created. +type ServiceVirtualIP struct { + Service structs.ServiceName + IP net.IP +} + +// FreeVirtualIP is used to store a virtual IP freed up by a service deregistration. +// It is also used to store free virtual IPs when a snapshot is created. +type FreeVirtualIP struct { + IP net.IP + IsCounter bool +} + +func serviceVirtualIPTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: tableServiceVirtualIPs, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + AllowMissing: false, + Unique: true, + Indexer: &ServiceNameIndex{ + Field: "Service", + }, + }, + }, + } +} + +func freeVirtualIPTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: tableFreeVirtualIPs, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + AllowMissing: false, + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "IP", + }, + }, + indexCounterOnly: { + Name: indexCounterOnly, + AllowMissing: false, + Unique: false, + Indexer: &memdb.ConditionalIndex{ + Conditional: func(obj interface{}) (bool, error) { + if vip, ok := obj.(FreeVirtualIP); ok { + return vip.IsCounter, nil + } + return false, fmt.Errorf("object is not a virtual IP entry") + }, + }, + }, + }, + } +} + +type KindServiceName struct { + Kind structs.ServiceKind + Service structs.ServiceName + + structs.RaftIndex +} + +func kindServiceNameTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: tableKindServiceNames, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + AllowMissing: false, + Unique: true, + Indexer: indexerSingle{ + readIndex: indexFromKindServiceName, + writeIndex: indexFromKindServiceName, + }, + }, + indexKindOnly: { + Name: indexKindOnly, + AllowMissing: false, + Unique: false, + Indexer: indexerSingle{ + readIndex: indexFromKindServiceNameKindOnly, + writeIndex: indexFromKindServiceNameKindOnly, + }, + }, + }, + } +} + +// KindServiceNameQuery is used to lookup service names by kind or enterprise meta. +type KindServiceNameQuery struct { + Kind structs.ServiceKind + Name string + structs.EnterpriseMeta +} + +// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer +// receiver for this method. Remove once that is fixed. +func (q KindServiceNameQuery) NamespaceOrDefault() string { + return q.EnterpriseMeta.NamespaceOrDefault() +} + +// PartitionOrDefault exists because structs.EnterpriseMeta uses a pointer +// receiver for this method. Remove once that is fixed. +func (q KindServiceNameQuery) PartitionOrDefault() string { + return q.EnterpriseMeta.PartitionOrDefault() +} + +func indexFromKindServiceNameKindOnly(raw interface{}) ([]byte, error) { + switch x := raw.(type) { + case *KindServiceName: + var b indexBuilder + b.String(strings.ToLower(string(x.Kind))) + return b.Bytes(), nil + + case structs.ServiceKind: + var b indexBuilder + b.String(strings.ToLower(string(x))) + return b.Bytes(), nil + + default: + return nil, fmt.Errorf("type must be *KindServiceName or structs.ServiceKind: %T", raw) + } +} + +func kindServiceNamesMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind) uint64 { + return maxIndexWatchTxn(tx, ws, kindServiceNameIndexName(kind)) +} + +func kindServiceNameIndexName(kind structs.ServiceKind) string { + return "kind_service_names." + kind.Normalized() +} diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index 75d37c5f7..bfca9a2d9 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -1548,6 +1548,142 @@ func TestStateStore_EnsureService_connectProxy(t *testing.T) { assert.Equal(&expect1, out.Services["connect-proxy"]) } +func TestStateStore_EnsureService_virtualIps(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + require.NoError(t, s.SystemMetadataSet(0, &structs.SystemMetadataEntry{ + Key: structs.SystemMetadataVirtualIPsEnabled, + Value: "true", + })) + + // Create the service registration. + entMeta := structs.DefaultEnterpriseMetaInDefaultPartition() + ns1 := &structs.NodeService{ + ID: "foo", + Service: "foo", + Address: "1.1.1.1", + Port: 1111, + Weights: &structs.Weights{ + Passing: 1, + Warning: 1, + }, + Connect: structs.ServiceConnect{Native: true}, + EnterpriseMeta: *entMeta, + } + + // Service successfully registers into the state store. + testRegisterNode(t, s, 0, "node1") + require.NoError(t, s.EnsureService(10, "node1", ns1)) + + // Make sure there's a virtual IP for the foo service. + vip, err := s.VirtualIPForService(structs.ServiceName{Name: "foo"}) + require.NoError(t, err) + assert.Equal("240.0.0.1", vip) + + // Retrieve and verify + _, out, err := s.NodeServices(nil, "node1", nil) + require.NoError(t, err) + assert.NotNil(out) + assert.Len(out.Services, 1) + + taggedAddress := out.Services["foo"].TaggedAddresses[structs.TaggedAddressVirtualIP] + assert.Equal(vip, taggedAddress.Address) + assert.Equal(ns1.Port, taggedAddress.Port) + + // Create the service registration. + ns2 := &structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: "redis-proxy", + Service: "redis-proxy", + Address: "2.2.2.2", + Port: 2222, + Weights: &structs.Weights{ + Passing: 1, + Warning: 1, + }, + Proxy: structs.ConnectProxyConfig{DestinationServiceName: "redis"}, + EnterpriseMeta: *entMeta, + } + require.NoError(t, s.EnsureService(11, "node1", ns2)) + + // Make sure the virtual IP has been incremented for the redis service. + vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"}) + require.NoError(t, err) + assert.Equal("240.0.0.2", vip) + + // Retrieve and verify + _, out, err = s.NodeServices(nil, "node1", nil) + assert.Nil(err) + assert.NotNil(out) + assert.Len(out.Services, 2) + + taggedAddress = out.Services["redis-proxy"].TaggedAddresses[structs.TaggedAddressVirtualIP] + assert.Equal(vip, taggedAddress.Address) + assert.Equal(ns2.Port, taggedAddress.Port) + + // Delete the first service and make sure it no longer has a virtual IP assigned. + require.NoError(t, s.DeleteService(12, "node1", "foo", entMeta)) + vip, err = s.VirtualIPForService(structs.ServiceName{Name: "connect-proxy"}) + require.NoError(t, err) + assert.Equal("", vip) + + // Register another instance of redis-proxy and make sure the virtual IP is unchanged. + ns3 := &structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: "redis-proxy2", + Service: "redis-proxy", + Address: "3.3.3.3", + Port: 3333, + Weights: &structs.Weights{ + Passing: 1, + Warning: 1, + }, + Proxy: structs.ConnectProxyConfig{DestinationServiceName: "redis"}, + EnterpriseMeta: *entMeta, + } + require.NoError(t, s.EnsureService(13, "node1", ns3)) + + // Make sure the virtual IP is unchanged for the redis service. + vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"}) + require.NoError(t, err) + assert.Equal("240.0.0.2", vip) + + // Make sure the new instance has the same virtual IP. + _, out, err = s.NodeServices(nil, "node1", nil) + require.NoError(t, err) + taggedAddress = out.Services["redis-proxy2"].TaggedAddresses[structs.TaggedAddressVirtualIP] + assert.Equal(vip, taggedAddress.Address) + assert.Equal(ns3.Port, taggedAddress.Port) + + // Register another service to take its virtual IP. + ns4 := &structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: "web-proxy", + Service: "web-proxy", + Address: "4.4.4.4", + Port: 4444, + Weights: &structs.Weights{ + Passing: 1, + Warning: 1, + }, + Proxy: structs.ConnectProxyConfig{DestinationServiceName: "web"}, + EnterpriseMeta: *entMeta, + } + require.NoError(t, s.EnsureService(14, "node1", ns4)) + + // Make sure the virtual IP has allocated from the previously freed service. + vip, err = s.VirtualIPForService(structs.ServiceName{Name: "web"}) + require.NoError(t, err) + assert.Equal("240.0.0.1", vip) + + // Retrieve and verify + _, out, err = s.NodeServices(nil, "node1", nil) + require.NoError(t, err) + taggedAddress = out.Services["web-proxy"].TaggedAddresses[structs.TaggedAddressVirtualIP] + assert.Equal(vip, taggedAddress.Address) + assert.Equal(ns4.Port, taggedAddress.Port) +} + func TestStateStore_Services(t *testing.T) { s := testStateStore(t) @@ -7520,6 +7656,143 @@ func TestProtocolForIngressGateway(t *testing.T) { } } +func TestStateStore_EnsureService_ServiceNames(t *testing.T) { + s := testStateStore(t) + + // Create the service registration. + entMeta := structs.DefaultEnterpriseMetaInDefaultPartition() + + services := []structs.NodeService{ + { + Kind: structs.ServiceKindIngressGateway, + ID: "ingress-gateway", + Service: "ingress-gateway", + Address: "2.2.2.2", + Port: 2222, + EnterpriseMeta: *entMeta, + }, + { + Kind: structs.ServiceKindMeshGateway, + ID: "mesh-gateway", + Service: "mesh-gateway", + Address: "4.4.4.4", + Port: 4444, + EnterpriseMeta: *entMeta, + }, + { + Kind: structs.ServiceKindConnectProxy, + ID: "connect-proxy", + Service: "connect-proxy", + Address: "1.1.1.1", + Port: 1111, + Proxy: structs.ConnectProxyConfig{DestinationServiceName: "foo"}, + EnterpriseMeta: *entMeta, + }, + { + Kind: structs.ServiceKindTerminatingGateway, + ID: "terminating-gateway", + Service: "terminating-gateway", + Address: "3.3.3.3", + Port: 3333, + EnterpriseMeta: *entMeta, + }, + { + Kind: structs.ServiceKindTypical, + ID: "web", + Service: "web", + Address: "5.5.5.5", + Port: 5555, + EnterpriseMeta: *entMeta, + }, + } + + var idx uint64 + testRegisterNode(t, s, idx, "node1") + + for _, svc := range services { + idx++ + require.NoError(t, s.EnsureService(idx, "node1", &svc)) + + // Ensure the service name was stored for all of them under the appropriate kind + gotIdx, gotNames, err := s.ServiceNamesOfKind(nil, svc.Kind) + require.NoError(t, err) + require.Equal(t, idx, gotIdx) + require.Len(t, gotNames, 1) + require.Equal(t, svc.CompoundServiceName(), gotNames[0].Service) + require.Equal(t, svc.Kind, gotNames[0].Kind) + } + + // Register another ingress gateway and there should be two names under the kind index + newIngress := structs.NodeService{ + Kind: structs.ServiceKindIngressGateway, + ID: "new-ingress-gateway", + Service: "new-ingress-gateway", + Address: "6.6.6.6", + Port: 6666, + EnterpriseMeta: *entMeta, + } + idx++ + require.NoError(t, s.EnsureService(idx, "node1", &newIngress)) + + gotIdx, got, err := s.ServiceNamesOfKind(nil, structs.ServiceKindIngressGateway) + require.NoError(t, err) + require.Equal(t, idx, gotIdx) + + expect := []*KindServiceName{ + { + Kind: structs.ServiceKindIngressGateway, + Service: structs.NewServiceName("ingress-gateway", nil), + RaftIndex: structs.RaftIndex{ + CreateIndex: 1, + ModifyIndex: 1, + }, + }, + { + Kind: structs.ServiceKindIngressGateway, + Service: structs.NewServiceName("new-ingress-gateway", nil), + RaftIndex: structs.RaftIndex{ + CreateIndex: idx, + ModifyIndex: idx, + }, + }, + } + require.Equal(t, expect, got) + + // Deregister an ingress gateway and the index should not slide back + idx++ + require.NoError(t, s.DeleteService(idx, "node1", "new-ingress-gateway", entMeta)) + + gotIdx, got, err = s.ServiceNamesOfKind(nil, structs.ServiceKindIngressGateway) + require.NoError(t, err) + require.Equal(t, idx, gotIdx) + require.Equal(t, expect[:1], got) + + // Registering another instance of a known service should not bump the kind index + newMGW := structs.NodeService{ + Kind: structs.ServiceKindMeshGateway, + ID: "mesh-gateway-1", + Service: "mesh-gateway", + Address: "7.7.7.7", + Port: 7777, + EnterpriseMeta: *entMeta, + } + idx++ + require.NoError(t, s.EnsureService(idx, "node1", &newMGW)) + + gotIdx, _, err = s.ServiceNamesOfKind(nil, structs.ServiceKindMeshGateway) + require.NoError(t, err) + require.Equal(t, uint64(2), gotIdx) + + // Deregister the single typical service and the service name should also be dropped + idx++ + require.NoError(t, s.DeleteService(idx, "node1", "web", entMeta)) + + gotIdx, got, err = s.ServiceNamesOfKind(nil, structs.ServiceKindTypical) + require.NoError(t, err) + require.Equal(t, idx, gotIdx) + require.Empty(t, got) +} + func runStep(t *testing.T, name string, fn func(t *testing.T)) { t.Helper() if !t.Run(name, fn) { diff --git a/agent/consul/state/config_entry.go b/agent/consul/state/config_entry.go index 14d9c4936..29e4e7aa5 100644 --- a/agent/consul/state/config_entry.go +++ b/agent/consul/state/config_entry.go @@ -395,7 +395,7 @@ func validateProposedConfigEntryInGraph( } case structs.ServiceIntentions: case structs.MeshConfig: - case structs.PartitionExports: + case structs.ExportedServices: default: return fmt.Errorf("unhandled kind %q during validation of %q", kindName.Kind, kindName.Name) } @@ -800,7 +800,7 @@ func (s *Store) serviceDiscoveryChainTxn( } // Build TrustDomain based on the ClusterID stored. - signingID := connect.SpiffeIDSigningForCluster(config) + signingID := connect.SpiffeIDSigningForCluster(config.ClusterID) if signingID == nil { // If CA is bootstrapped at all then this should never happen but be // defensive. @@ -880,24 +880,21 @@ func readDiscoveryChainConfigEntriesTxn( sid := structs.NewServiceID(serviceName, entMeta) - // Grab the proxy defaults if they exist. - idx, proxy, err := getProxyConfigEntryTxn(tx, ws, structs.ProxyConfigGlobal, overrides, entMeta) - if err != nil { - return 0, nil, err - } else if proxy != nil { - res.GlobalProxy = proxy - } - - // At every step we'll need service defaults. + // At every step we'll need service and proxy defaults. todoDefaults[sid] = struct{}{} + var maxIdx uint64 + // first fetch the router, of which we only collect 1 per chain eval - _, router, err := getRouterConfigEntryTxn(tx, ws, serviceName, overrides, entMeta) + idx, router, err := getRouterConfigEntryTxn(tx, ws, serviceName, overrides, entMeta) if err != nil { return 0, nil, err } else if router != nil { res.Routers[sid] = router } + if idx > maxIdx { + maxIdx = idx + } if router != nil { for _, svc := range router.ListRelatedServices() { @@ -922,10 +919,13 @@ func readDiscoveryChainConfigEntriesTxn( // Yes, even for splitters. todoDefaults[splitID] = struct{}{} - _, splitter, err := getSplitterConfigEntryTxn(tx, ws, splitID.ID, overrides, &splitID.EnterpriseMeta) + idx, splitter, err := getSplitterConfigEntryTxn(tx, ws, splitID.ID, overrides, &splitID.EnterpriseMeta) if err != nil { return 0, nil, err } + if idx > maxIdx { + maxIdx = idx + } if splitter == nil { res.Splitters[splitID] = nil @@ -959,10 +959,13 @@ func readDiscoveryChainConfigEntriesTxn( // And resolvers, too. todoDefaults[resolverID] = struct{}{} - _, resolver, err := getResolverConfigEntryTxn(tx, ws, resolverID.ID, overrides, &resolverID.EnterpriseMeta) + idx, resolver, err := getResolverConfigEntryTxn(tx, ws, resolverID.ID, overrides, &resolverID.EnterpriseMeta) if err != nil { return 0, nil, err } + if idx > maxIdx { + maxIdx = idx + } if resolver == nil { res.Resolvers[resolverID] = nil @@ -987,16 +990,31 @@ func readDiscoveryChainConfigEntriesTxn( continue // already fetched } - _, entry, err := getServiceConfigEntryTxn(tx, ws, svcID.ID, overrides, &svcID.EnterpriseMeta) + if _, ok := res.ProxyDefaults[svcID.PartitionOrDefault()]; !ok { + idx, proxy, err := getProxyConfigEntryTxn(tx, ws, structs.ProxyConfigGlobal, overrides, &svcID.EnterpriseMeta) + if err != nil { + return 0, nil, err + } + if idx > maxIdx { + maxIdx = idx + } + if proxy != nil { + res.ProxyDefaults[proxy.PartitionOrDefault()] = proxy + } + } + + idx, entry, err := getServiceConfigEntryTxn(tx, ws, svcID.ID, overrides, &svcID.EnterpriseMeta) if err != nil { return 0, nil, err } + if idx > maxIdx { + maxIdx = idx + } if entry == nil { res.Services[svcID] = nil continue } - res.Services[svcID] = entry } @@ -1022,7 +1040,7 @@ func readDiscoveryChainConfigEntriesTxn( } } - return idx, res, nil + return maxIdx, res, nil } // anyKey returns any key from the provided map if any exist. Useful for using diff --git a/agent/consul/state/config_entry_intention_oss.go b/agent/consul/state/config_entry_intention_oss.go index 5caf25b38..d6fafe621 100644 --- a/agent/consul/state/config_entry_intention_oss.go +++ b/agent/consul/state/config_entry_intention_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/config_entry_oss.go b/agent/consul/state/config_entry_oss.go index e7809389d..817162d19 100644 --- a/agent/consul/state/config_entry_oss.go +++ b/agent/consul/state/config_entry_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/config_entry_oss_test.go b/agent/consul/state/config_entry_oss_test.go index b6cb4832c..1a4af3cf9 100644 --- a/agent/consul/state/config_entry_oss_test.go +++ b/agent/consul/state/config_entry_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/config_entry_test.go b/agent/consul/state/config_entry_test.go index b47d9a356..daddc9bb8 100644 --- a/agent/consul/state/config_entry_test.go +++ b/agent/consul/state/config_entry_test.go @@ -1347,6 +1347,13 @@ func entrySetToKindNames(entrySet *structs.DiscoveryChainConfigEntries) []Config &entry.EnterpriseMeta, )) } + for _, entry := range entrySet.ProxyDefaults { + out = append(out, NewConfigEntryKindName( + entry.Kind, + entry.Name, + &entry.EnterpriseMeta, + )) + } return out } diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index ab54ae69a..0b35d0393 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -180,8 +180,6 @@ func (s *Store) caSetConfigTxn(idx uint64, tx WriteTxn, config *structs.CAConfig if prev != nil { existing := prev.(*structs.CAConfiguration) config.CreateIndex = existing.CreateIndex - // Allow the ClusterID to change if it's provided by an internal operation, such - // as a primary datacenter being switched to secondary mode. if config.ClusterID == "" { config.ClusterID = existing.ClusterID } diff --git a/agent/consul/state/coordinate_oss.go b/agent/consul/state/coordinate_oss.go index 9e76f0cf9..d6b6042d7 100644 --- a/agent/consul/state/coordinate_oss.go +++ b/agent/consul/state/coordinate_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/coordinate_oss_test.go b/agent/consul/state/coordinate_oss_test.go index d7a7d805d..0bb08c1df 100644 --- a/agent/consul/state/coordinate_oss_test.go +++ b/agent/consul/state/coordinate_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/delay_oss.go b/agent/consul/state/delay_oss.go index 96aa2d9c3..41b9a0405 100644 --- a/agent/consul/state/delay_oss.go +++ b/agent/consul/state/delay_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/graveyard.go b/agent/consul/state/graveyard.go index 319ae5c95..89601ea21 100644 --- a/agent/consul/state/graveyard.go +++ b/agent/consul/state/graveyard.go @@ -16,6 +16,10 @@ type Tombstone struct { structs.EnterpriseMeta } +func (t Tombstone) IDValue() string { + return t.Key +} + // Graveyard manages a set of tombstones. type Graveyard struct { // GC is when we create tombstones to track their time-to-live. @@ -50,32 +54,9 @@ func (g *Graveyard) InsertTxn(tx WriteTxn, key string, idx uint64, entMeta *stru return nil } -// GetMaxIndexTxn returns the highest index tombstone whose key matches the -// given context, using a prefix match. -func (g *Graveyard) GetMaxIndexTxn(tx ReadTxn, prefix string, entMeta *structs.EnterpriseMeta) (uint64, error) { - stones, err := getWithTxn(tx, "tombstones", "id_prefix", prefix, entMeta) - if err != nil { - return 0, fmt.Errorf("failed querying tombstones: %s", err) - } - - var lindex uint64 - for stone := stones.Next(); stone != nil; stone = stones.Next() { - s := stone.(*Tombstone) - if s.Index > lindex { - lindex = s.Index - } - } - return lindex, nil -} - // DumpTxn returns all the tombstones. func (g *Graveyard) DumpTxn(tx ReadTxn) (memdb.ResultIterator, error) { - iter, err := tx.Get("tombstones", "id") - if err != nil { - return nil, err - } - - return iter, nil + return tx.Get(tableTombstones, indexID) } // RestoreTxn is used when restoring from a snapshot. For general inserts, use @@ -94,7 +75,7 @@ func (g *Graveyard) ReapTxn(tx WriteTxn, idx uint64) error { // This does a full table scan since we currently can't index on a // numeric value. Since this is all in-memory and done infrequently // this pretty reasonable. - stones, err := tx.Get("tombstones", "id") + stones, err := tx.Get(tableTombstones, indexID) if err != nil { return fmt.Errorf("failed querying tombstones: %s", err) } diff --git a/agent/consul/state/graveyard_oss.go b/agent/consul/state/graveyard_oss.go index 201f77dc0..71b6bd90b 100644 --- a/agent/consul/state/graveyard_oss.go +++ b/agent/consul/state/graveyard_oss.go @@ -1,9 +1,12 @@ +//go:build !consulent // +build !consulent package state import ( "fmt" + + "github.com/hashicorp/consul/agent/structs" ) func (g *Graveyard) insertTombstoneWithTxn(tx WriteTxn, _ string, stone *Tombstone, updateMax bool) error { @@ -22,3 +25,21 @@ func (g *Graveyard) insertTombstoneWithTxn(tx WriteTxn, _ string, stone *Tombsto } return nil } + +// GetMaxIndexTxn returns the highest index tombstone whose key matches the +// given context, using a prefix match. +func (g *Graveyard) GetMaxIndexTxn(tx ReadTxn, prefix string, _ *structs.EnterpriseMeta) (uint64, error) { + var lindex uint64 + q := Query{Value: prefix, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition()} + stones, err := tx.Get(tableTombstones, indexID+"_prefix", q) + if err != nil { + return 0, fmt.Errorf("failed querying tombstones: %s", err) + } + for stone := stones.Next(); stone != nil; stone = stones.Next() { + s := stone.(*Tombstone) + if s.Index > lindex { + lindex = s.Index + } + } + return lindex, nil +} diff --git a/agent/consul/state/graveyard_test.go b/agent/consul/state/graveyard_test.go index 09e91e56c..f8452be09 100644 --- a/agent/consul/state/graveyard_test.go +++ b/agent/consul/state/graveyard_test.go @@ -195,7 +195,7 @@ func TestGraveyard_Snapshot_Restore(t *testing.T) { }() // Verify the index was set correctly. - if idx := s.maxIndex("tombstones"); idx != 9 { + if idx := s.maxIndex(partitionedIndexEntryName(tableTombstones, "default")); idx != 9 { t.Fatalf("bad index: %d", idx) } @@ -250,7 +250,7 @@ func TestGraveyard_Snapshot_Restore(t *testing.T) { }() // Verify that the restore works. - if idx := s.maxIndex("tombstones"); idx != 9 { + if idx := s.maxIndex(partitionedIndexEntryName(tableTombstones, "default")); idx != 9 { t.Fatalf("bad index: %d", idx) } diff --git a/agent/consul/state/indexer.go b/agent/consul/state/indexer.go index 5d0c58eb5..7fa30a7d5 100644 --- a/agent/consul/state/indexer.go +++ b/agent/consul/state/indexer.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "errors" "fmt" + "strings" "time" "github.com/hashicorp/consul/agent/structs" @@ -137,6 +138,58 @@ func (b *indexBuilder) Bytes() []byte { return (*bytes.Buffer)(b).Bytes() } +// singleValueID is an interface that may be implemented by any type that should +// be indexed by a single ID and a structs.EnterpriseMeta to scope the ID. +type singleValueID interface { + IDValue() string + PartitionOrDefault() string + NamespaceOrDefault() string +} + +type multiValueID interface { + IDValue() []string + PartitionOrDefault() string + NamespaceOrDefault() string +} + +var _ singleValueID = (*structs.DirEntry)(nil) +var _ singleValueID = (*Tombstone)(nil) +var _ singleValueID = (*Query)(nil) +var _ singleValueID = (*structs.Session)(nil) + +// indexFromIDValue creates an index key from any struct that implements singleValueID +func indexFromIDValueLowerCase(raw interface{}) ([]byte, error) { + e, ok := raw.(singleValueID) + if !ok { + return nil, fmt.Errorf("unexpected type %T, does not implement singleValueID", raw) + } + + v := strings.ToLower(e.IDValue()) + if v == "" { + return nil, errMissingValueForIndex + } + + var b indexBuilder + b.String(v) + return b.Bytes(), nil +} + +// indexFromIDValue creates an index key from any struct that implements singleValueID +func indexFromMultiValueID(raw interface{}) ([]byte, error) { + e, ok := raw.(multiValueID) + if !ok { + return nil, fmt.Errorf("unexpected type %T, does not implement multiValueID", raw) + } + var b indexBuilder + for _, v := range e.IDValue() { + if v == "" { + return nil, errMissingValueForIndex + } + b.String(strings.ToLower(v)) + } + return b.Bytes(), nil +} + func (b *indexBuilder) Bool(v bool) { b.Raw([]byte{intFromBool(v)}) } diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index 72850b29e..f2f64500f 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -995,36 +995,29 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet, maxIdx = index } - // Check for a wildcard intention (* -> *) since it overrides the default decision from ACLs - if len(intentions) > 0 { - // Intentions with wildcard source and destination have the lowest precedence, so they are last in the list - ixn := intentions[len(intentions)-1] - - if ixn.HasWildcardSource() && ixn.HasWildcardDestination() { - defaultDecision = acl.Allow - if ixn.Action == structs.IntentionActionDeny { - defaultDecision = acl.Deny - } - } - } - - index, allServices, err := serviceListTxn(tx, ws, func(svc *structs.ServiceNode) bool { - // Only include ingress gateways as downstreams, since they cannot receive service mesh traffic - // TODO(freddy): One remaining issue is that this includes non-Connect services (typical services without a proxy) - // Ideally those should be excluded as well, since they can't be upstreams/downstreams without a proxy. - // Maybe start tracking services represented by proxies? (both sidecar and ingress) - if svc.ServiceKind == structs.ServiceKindTypical || (svc.ServiceKind == structs.ServiceKindIngressGateway && downstreams) { - return true - } - return false - }, target.WithWildcardNamespace()) + // TODO(tproxy): One remaining improvement is that this includes non-Connect services (typical services without a proxy) + // Ideally those should be excluded as well, since they can't be upstreams/downstreams without a proxy. + // Maybe narrow serviceNamesOfKindTxn to services represented by proxies? (ingress, sidecar-proxy, terminating) + index, services, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical) if err != nil { - return index, nil, fmt.Errorf("failed to fetch catalog service list: %v", err) + return index, nil, fmt.Errorf("failed to list ingress service names: %v", err) } if index > maxIdx { maxIdx = index } + if downstreams { + // Ingress gateways can only ever be downstreams, since mesh services don't dial them. + index, ingress, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindIngressGateway) + if err != nil { + return index, nil, fmt.Errorf("failed to list ingress service names: %v", err) + } + if index > maxIdx { + maxIdx = index + } + services = append(services, ingress...) + } + // When checking authorization to upstreams, the match type for the decision is `destination` because we are deciding // if upstream candidates are covered by intentions that have the target service as a source. // The reverse is true for downstreams. @@ -1032,11 +1025,13 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet, if downstreams { decisionMatchType = structs.IntentionMatchSource } - result := make([]ServiceWithDecision, 0, len(allServices)) - for _, candidate := range allServices { + result := make([]ServiceWithDecision, 0, len(services)) + for _, svc := range services { + candidate := svc.Service if candidate.Name == structs.ConsulServiceName { continue } + opts := IntentionDecisionOpts{ Target: candidate.Name, Namespace: candidate.NamespaceOrDefault(), diff --git a/agent/consul/state/intention_oss.go b/agent/consul/state/intention_oss.go index a06949d42..e6872ab5b 100644 --- a/agent/consul/state/intention_oss.go +++ b/agent/consul/state/intention_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/kvs.go b/agent/consul/state/kvs.go index db7d731e3..34639ace0 100644 --- a/agent/consul/state/kvs.go +++ b/agent/consul/state/kvs.go @@ -9,20 +9,26 @@ import ( "github.com/hashicorp/consul/agent/structs" ) -// kvsTableSchema returns a new table schema used for storing key/value data for -// Consul's kv store. +const ( + tableKVs = "kvs" + tableTombstones = "tombstones" + + indexSession = "session" +) + +// kvsTableSchema returns a new table schema used for storing structs.DirEntry func kvsTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ - Name: "kvs", + Name: tableKVs, Indexes: map[string]*memdb.IndexSchema{ - "id": { - Name: "id", + indexID: { + Name: indexID, AllowMissing: false, Unique: true, Indexer: kvsIndexer(), }, - "session": { - Name: "session", + indexSession: { + Name: indexSession, AllowMissing: true, Unique: false, Indexer: &memdb.UUIDFieldIndex{ @@ -33,14 +39,31 @@ func kvsTableSchema() *memdb.TableSchema { } } +// indexFromIDValue creates an index key from any struct that implements singleValueID +func indexFromIDValue(raw interface{}) ([]byte, error) { + e, ok := raw.(singleValueID) + if !ok { + return nil, fmt.Errorf("unexpected type %T, does not implement singleValueID", raw) + } + + v := e.IDValue() + if v == "" { + return nil, errMissingValueForIndex + } + + var b indexBuilder + b.String(v) + return b.Bytes(), nil +} + // tombstonesTableSchema returns a new table schema used for storing tombstones // during KV delete operations to prevent the index from sliding backwards. func tombstonesTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ - Name: "tombstones", + Name: tableTombstones, Indexes: map[string]*memdb.IndexSchema{ - "id": { - Name: "id", + indexID: { + Name: indexID, AllowMissing: false, Unique: true, Indexer: kvsIndexer(), @@ -51,11 +74,7 @@ func tombstonesTableSchema() *memdb.TableSchema { // KVs is used to pull the full list of KVS entries for use during snapshots. func (s *Snapshot) KVs() (memdb.ResultIterator, error) { - iter, err := s.tx.Get("kvs", "id_prefix") - if err != nil { - return nil, err - } - return iter, nil + return s.tx.Get(tableKVs, indexID+"_prefix") } // Tombstones is used to pull all the tombstones from the graveyard. @@ -97,6 +116,7 @@ func (s *Store) ReapTombstones(idx uint64, index uint64) error { // KVSSet is used to store a key/value pair. func (s *Store) KVSSet(idx uint64, entry *structs.DirEntry) error { + entry.EnterpriseMeta.Normalize() tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -114,8 +134,7 @@ func (s *Store) KVSSet(idx uint64, entry *structs.DirEntry) error { // session (should be validated before calling this). Otherwise, we will keep // whatever the existing session is. func kvsSetTxn(tx WriteTxn, idx uint64, entry *structs.DirEntry, updateSession bool) error { - // Retrieve an existing KV pair - existingNode, err := firstWithTxn(tx, "kvs", "id", entry.Key, &entry.EnterpriseMeta) + existingNode, err := tx.First(tableKVs, indexID, entry) if err != nil { return fmt.Errorf("failed kvs lookup: %s", err) } @@ -161,19 +180,23 @@ func (s *Store) KVSGet(ws memdb.WatchSet, key string, entMeta *structs.Enterpris tx := s.db.Txn(false) defer tx.Abort() - return kvsGetTxn(tx, ws, key, entMeta) + // TODO: accept non-pointer entMeta + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + + return kvsGetTxn(tx, ws, key, *entMeta) } // kvsGetTxn is the inner method that gets a KVS entry inside an existing // transaction. func kvsGetTxn(tx ReadTxn, - ws memdb.WatchSet, key string, entMeta *structs.EnterpriseMeta) (uint64, *structs.DirEntry, error) { + ws memdb.WatchSet, key string, entMeta structs.EnterpriseMeta) (uint64, *structs.DirEntry, error) { // Get the table index. idx := kvsMaxIndex(tx, entMeta) - // Retrieve the key. - watchCh, entry, err := firstWatchWithTxn(tx, "kvs", "id", key, entMeta) + watchCh, entry, err := tx.FirstWatch(tableKVs, indexID, Query{Value: key, EnterpriseMeta: entMeta}) if err != nil { return 0, nil, fmt.Errorf("failed kvs lookup: %s", err) } @@ -194,13 +217,18 @@ func (s *Store) KVSList(ws memdb.WatchSet, tx := s.db.Txn(false) defer tx.Abort() - return s.kvsListTxn(tx, ws, prefix, entMeta) + // TODO: accept non-pointer entMeta + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + + return s.kvsListTxn(tx, ws, prefix, *entMeta) } // kvsListTxn is the inner method that gets a list of KVS entries matching a // prefix. func (s *Store) kvsListTxn(tx ReadTxn, - ws memdb.WatchSet, prefix string, entMeta *structs.EnterpriseMeta) (uint64, structs.DirEntries, error) { + ws memdb.WatchSet, prefix string, entMeta structs.EnterpriseMeta) (uint64, structs.DirEntries, error) { // Get the table indexes. idx := kvsMaxIndex(tx, entMeta) @@ -213,7 +241,7 @@ func (s *Store) kvsListTxn(tx ReadTxn, // Check for the highest index in the graveyard. If the prefix is empty // then just use the full table indexes since we are listing everything. if prefix != "" { - gindex, err := s.kvsGraveyard.GetMaxIndexTxn(tx, prefix, entMeta) + gindex, err := s.kvsGraveyard.GetMaxIndexTxn(tx, prefix, &entMeta) if err != nil { return 0, nil, fmt.Errorf("failed graveyard lookup: %s", err) } @@ -249,8 +277,13 @@ func (s *Store) KVSDelete(idx uint64, key string, entMeta *structs.EnterpriseMet // kvsDeleteTxn is the inner method used to perform the actual deletion // of a key/value pair within an existing transaction. func (s *Store) kvsDeleteTxn(tx WriteTxn, idx uint64, key string, entMeta *structs.EnterpriseMeta) error { + + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + // Look up the entry in the state store. - entry, err := firstWithTxn(tx, "kvs", "id", key, entMeta) + entry, err := tx.First(tableKVs, indexID, Query{Value: key, EnterpriseMeta: *entMeta}) if err != nil { return fmt.Errorf("failed kvs lookup: %s", err) } @@ -286,8 +319,10 @@ func (s *Store) KVSDeleteCAS(idx, cidx uint64, key string, entMeta *structs.Ente // kvsDeleteCASTxn is the inner method that does a CAS delete within an existing // transaction. func (s *Store) kvsDeleteCASTxn(tx WriteTxn, idx, cidx uint64, key string, entMeta *structs.EnterpriseMeta) (bool, error) { - // Retrieve the existing kvs entry, if any exists. - entry, err := firstWithTxn(tx, "kvs", "id", key, entMeta) + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + entry, err := tx.First(tableKVs, indexID, Query{Value: key, EnterpriseMeta: *entMeta}) if err != nil { return false, fmt.Errorf("failed kvs lookup: %s", err) } @@ -327,8 +362,7 @@ func (s *Store) KVSSetCAS(idx uint64, entry *structs.DirEntry) (bool, error) { // kvsSetCASTxn is the inner method used to do a CAS inside an existing // transaction. func kvsSetCASTxn(tx WriteTxn, idx uint64, entry *structs.DirEntry) (bool, error) { - // Retrieve the existing entry. - existing, err := firstWithTxn(tx, "kvs", "id", entry.Key, &entry.EnterpriseMeta) + existing, err := tx.First(tableKVs, indexID, entry) if err != nil { return false, fmt.Errorf("failed kvs lookup: %s", err) } @@ -397,7 +431,7 @@ func kvsLockTxn(tx WriteTxn, idx uint64, entry *structs.DirEntry) (bool, error) } // Verify that the session exists. - sess, err := firstWithTxn(tx, "sessions", "id", entry.Session, &entry.EnterpriseMeta) + sess, err := tx.First(tableSessions, indexID, Query{Value: entry.Session, EnterpriseMeta: entry.EnterpriseMeta}) if err != nil { return false, fmt.Errorf("failed session lookup: %s", err) } @@ -405,8 +439,7 @@ func kvsLockTxn(tx WriteTxn, idx uint64, entry *structs.DirEntry) (bool, error) return false, fmt.Errorf("invalid session %#v", entry.Session) } - // Retrieve the existing entry. - existing, err := firstWithTxn(tx, "kvs", "id", entry.Key, &entry.EnterpriseMeta) + existing, err := tx.First(tableKVs, indexID, entry) if err != nil { return false, fmt.Errorf("failed kvs lookup: %s", err) } @@ -462,8 +495,7 @@ func kvsUnlockTxn(tx WriteTxn, idx uint64, entry *structs.DirEntry) (bool, error return false, fmt.Errorf("missing session") } - // Retrieve the existing entry. - existing, err := firstWithTxn(tx, "kvs", "id", entry.Key, &entry.EnterpriseMeta) + existing, err := tx.First(tableKVs, indexID, entry) if err != nil { return false, fmt.Errorf("failed kvs lookup: %s", err) } @@ -497,7 +529,11 @@ func kvsUnlockTxn(tx WriteTxn, idx uint64, entry *structs.DirEntry) (bool, error func kvsCheckSessionTxn(tx WriteTxn, key string, session string, entMeta *structs.EnterpriseMeta) (*structs.DirEntry, error) { - entry, err := firstWithTxn(tx, "kvs", "id", key, entMeta) + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + + entry, err := tx.First(tableKVs, indexID, Query{Value: key, EnterpriseMeta: *entMeta}) if err != nil { return nil, fmt.Errorf("failed kvs lookup: %s", err) } @@ -516,9 +552,9 @@ func kvsCheckSessionTxn(tx WriteTxn, // kvsCheckIndexTxn checks to see if the given modify index matches the current // entry for a key. func kvsCheckIndexTxn(tx WriteTxn, - key string, cidx uint64, entMeta *structs.EnterpriseMeta) (*structs.DirEntry, error) { + key string, cidx uint64, entMeta structs.EnterpriseMeta) (*structs.DirEntry, error) { - entry, err := firstWithTxn(tx, "kvs", "id", key, entMeta) + entry, err := tx.First(tableKVs, indexID, Query{Value: key, EnterpriseMeta: entMeta}) if err != nil { return nil, fmt.Errorf("failed kvs lookup: %s", err) } diff --git a/agent/consul/state/kvs_oss.go b/agent/consul/state/kvs_oss.go index 25f427e1f..54387e0d9 100644 --- a/agent/consul/state/kvs_oss.go +++ b/agent/consul/state/kvs_oss.go @@ -1,8 +1,10 @@ +//go:build !consulent // +build !consulent package state import ( + "bytes" "fmt" "github.com/hashicorp/go-memdb" @@ -10,35 +12,55 @@ import ( "github.com/hashicorp/consul/agent/structs" ) -func kvsIndexer() *memdb.StringFieldIndex { - return &memdb.StringFieldIndex{ - Field: "Key", - Lowercase: false, +func kvsIndexer() indexerSingleWithPrefix { + return indexerSingleWithPrefix{ + readIndex: readIndex(indexFromIDValue), + writeIndex: writeIndex(indexFromIDValue), + prefixIndex: prefixIndex(prefixIndexForIDValue), } } +func prefixIndexForIDValue(arg interface{}) ([]byte, error) { + switch v := arg.(type) { + // DeletePrefix always uses a string, pass it along unmodified + case string: + return []byte(v), nil + case structs.EnterpriseMeta: + return nil, nil + case singleValueID: + var b indexBuilder + if v.IDValue() != "" { + // Omit null terminator, because we want to prefix match keys + b.String(v.IDValue()) + } + prefix := bytes.Trim(b.Bytes(), "\x00") + return prefix, nil + } + return nil, fmt.Errorf("unexpected type %T for singleValueID prefix index", arg) +} + func insertKVTxn(tx WriteTxn, entry *structs.DirEntry, updateMax bool, _ bool) error { - if err := tx.Insert("kvs", entry); err != nil { + if err := tx.Insert(tableKVs, entry); err != nil { return err } if updateMax { - if err := indexUpdateMaxTxn(tx, entry.ModifyIndex, "kvs"); err != nil { + if err := indexUpdateMaxTxn(tx, entry.ModifyIndex, tableKVs); err != nil { return fmt.Errorf("failed updating kvs index: %v", err) } } else { - if err := tx.Insert(tableIndex, &IndexEntry{"kvs", entry.ModifyIndex}); err != nil { + if err := tx.Insert(tableIndex, &IndexEntry{tableKVs, entry.ModifyIndex}); err != nil { return fmt.Errorf("failed updating kvs index: %s", err) } } return nil } -func kvsListEntriesTxn(tx ReadTxn, ws memdb.WatchSet, prefix string, entMeta *structs.EnterpriseMeta) (uint64, structs.DirEntries, error) { +func kvsListEntriesTxn(tx ReadTxn, ws memdb.WatchSet, prefix string, entMeta structs.EnterpriseMeta) (uint64, structs.DirEntries, error) { var ents structs.DirEntries var lindex uint64 - entries, err := tx.Get("kvs", "id_prefix", prefix) + entries, err := tx.Get(tableKVs, indexID+"_prefix", prefix) if err != nil { return 0, nil, fmt.Errorf("failed kvs lookup: %s", err) } @@ -59,7 +81,7 @@ func kvsListEntriesTxn(tx ReadTxn, ws memdb.WatchSet, prefix string, entMeta *st // existing transaction. func (s *Store) kvsDeleteTreeTxn(tx WriteTxn, idx uint64, prefix string, entMeta *structs.EnterpriseMeta) error { // For prefix deletes, only insert one tombstone and delete the entire subtree - deleted, err := tx.DeletePrefix("kvs", "id_prefix", prefix) + deleted, err := tx.DeletePrefix(tableKVs, indexID+"_prefix", prefix) if err != nil { return fmt.Errorf("failed recursive deleting kvs entry: %s", err) } @@ -78,19 +100,27 @@ func (s *Store) kvsDeleteTreeTxn(tx WriteTxn, idx uint64, prefix string, entMeta return nil } -func kvsMaxIndex(tx ReadTxn, entMeta *structs.EnterpriseMeta) uint64 { +func kvsMaxIndex(tx ReadTxn, entMeta structs.EnterpriseMeta) uint64 { return maxIndexTxn(tx, "kvs", "tombstones") } func kvsDeleteWithEntry(tx WriteTxn, entry *structs.DirEntry, idx uint64) error { // Delete the entry and update the index. - if err := tx.Delete("kvs", entry); err != nil { + if err := tx.Delete(tableKVs, entry); err != nil { return fmt.Errorf("failed deleting kvs entry: %s", err) } - if err := tx.Insert(tableIndex, &IndexEntry{"kvs", idx}); err != nil { + if err := tx.Insert(tableIndex, &IndexEntry{tableKVs, idx}); err != nil { return fmt.Errorf("failed updating kvs index: %s", err) } return nil } + +func partitionedIndexEntryName(entry string, _ string) string { + return entry +} + +func partitionedAndNamespacedIndexEntryName(entry string, _ *structs.EnterpriseMeta) string { + return entry +} diff --git a/agent/consul/state/kvs_oss_test.go b/agent/consul/state/kvs_oss_test.go new file mode 100644 index 000000000..4ec7ac7a3 --- /dev/null +++ b/agent/consul/state/kvs_oss_test.go @@ -0,0 +1,64 @@ +//go:build !consulent +// +build !consulent + +package state + +import "github.com/hashicorp/consul/agent/structs" + +func testIndexerTableKVs() map[string]indexerTestCase { + return map[string]indexerTestCase{ + indexID: { + read: indexValue{ + source: Query{Value: "TheKey"}, + expected: []byte("TheKey\x00"), + }, + write: indexValue{ + source: &structs.DirEntry{Key: "TheKey"}, + expected: []byte("TheKey\x00"), + }, + prefix: []indexValue{ + { + source: "indexString", + expected: []byte("indexString"), + }, + { + source: structs.EnterpriseMeta{}, + expected: nil, + }, + { + source: Query{Value: "TheKey"}, + expected: []byte("TheKey"), + }, + }, + }, + } +} + +func testIndexerTableTombstones() map[string]indexerTestCase { + return map[string]indexerTestCase{ + indexID: { + read: indexValue{ + source: Query{Value: "TheKey"}, + expected: []byte("TheKey\x00"), + }, + write: indexValue{ + source: &Tombstone{Key: "TheKey"}, + expected: []byte("TheKey\x00"), + }, + prefix: []indexValue{ + { + source: "indexString", + expected: []byte("indexString"), + }, + { + source: structs.EnterpriseMeta{}, + expected: nil, + }, + { + source: Query{Value: "TheKey"}, + expected: []byte("TheKey"), + }, + }, + }, + } +} diff --git a/agent/consul/state/kvs_test.go b/agent/consul/state/kvs_test.go index 5538f24d9..842e06ec2 100644 --- a/agent/consul/state/kvs_test.go +++ b/agent/consul/state/kvs_test.go @@ -500,7 +500,8 @@ func TestStateStore_KVSDelete(t *testing.T) { // The entry was removed from the state store tx := s.db.Txn(false) defer tx.Abort() - e, err := firstWithTxn(tx, "kvs", "id", "foo", nil) + + e, err := tx.First(tableKVs, indexID, Query{Value: "foo"}) if err != nil { t.Fatalf("err: %s", err) } @@ -509,7 +510,7 @@ func TestStateStore_KVSDelete(t *testing.T) { } // Try fetching the other keys to ensure they still exist - e, err = firstWithTxn(tx, "kvs", "id", "foo/bar", nil) + e, err = tx.First(tableKVs, indexID, Query{Value: "foo/bar"}) if err != nil { t.Fatalf("err: %s", err) } @@ -518,7 +519,7 @@ func TestStateStore_KVSDelete(t *testing.T) { } // Check that the index table was updated - if idx := s.maxIndex("kvs"); idx != 3 { + if idx := s.maxIndex(partitionedIndexEntryName(tableKVs, "default")); idx != 3 { t.Fatalf("bad index: %d", idx) } @@ -550,7 +551,7 @@ func TestStateStore_KVSDelete(t *testing.T) { if err := s.KVSDelete(5, "foo", nil); err != nil { t.Fatalf("err: %s", err) } - if idx := s.maxIndex("kvs"); idx != 3 { + if idx := s.maxIndex(partitionedIndexEntryName(tableKVs, "default")); idx != 3 { t.Fatalf("bad index: %d", idx) } } @@ -633,7 +634,7 @@ func TestStateStore_KVSDeleteCAS(t *testing.T) { if !ok || err != nil { t.Fatalf("expected (true, nil), got: (%v, %#v)", ok, err) } - if idx := s.maxIndex("kvs"); idx != 5 { + if idx := s.maxIndex(partitionedIndexEntryName(tableKVs, "default")); idx != 5 { t.Fatalf("bad index: %d", idx) } } @@ -658,7 +659,7 @@ func TestStateStore_KVSSetCAS(t *testing.T) { // Check that nothing was actually stored tx := s.db.Txn(false) - if e, err := firstWithTxn(tx, "kvs", "id", "foo", nil); e != nil || err != nil { + if e, err := tx.First(tableKVs, indexID, Query{Value: "foo"}); e != nil || err != nil { t.Fatalf("expected (nil, nil), got: (%#v, %#v)", e, err) } tx.Abort() @@ -852,7 +853,7 @@ func TestStateStore_KVSDeleteTree(t *testing.T) { if err := s.KVSDeleteTree(9, "bar", nil); err != nil { t.Fatalf("err: %s", err) } - if idx := s.maxIndex("kvs"); idx != 4 { + if idx := s.maxIndex(partitionedIndexEntryName(tableKVs, "default")); idx != 4 { t.Fatalf("bad index: %d", idx) } @@ -865,7 +866,7 @@ func TestStateStore_KVSDeleteTree(t *testing.T) { tx := s.db.Txn(false) defer tx.Abort() - entries, err := tx.Get("kvs", "id") + entries, err := tx.Get(tableKVs, indexID) if err != nil { t.Fatalf("err: %s", err) } @@ -883,7 +884,7 @@ func TestStateStore_KVSDeleteTree(t *testing.T) { } // Index should be updated if modifications are made - if idx := s.maxIndex("kvs"); idx != 5 { + if idx := s.maxIndex(partitionedIndexEntryName(tableKVs, "default")); idx != 5 { t.Fatalf("bad index: %d", idx) } @@ -1358,7 +1359,7 @@ func TestStateStore_KVS_Snapshot_Restore(t *testing.T) { } // Check that the index was updated. - if idx := s.maxIndex("kvs"); idx != 7 { + if idx := s.maxIndex(partitionedIndexEntryName(tableKVs, "default")); idx != 7 { t.Fatalf("bad index: %d", idx) } }() diff --git a/agent/consul/state/operations_oss.go b/agent/consul/state/operations_oss.go index a3a8f396d..7be71732d 100644 --- a/agent/consul/state/operations_oss.go +++ b/agent/consul/state/operations_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state @@ -8,24 +9,6 @@ import ( "github.com/hashicorp/consul/agent/structs" ) -func firstWithTxn(tx ReadTxn, - table, index, idxVal string, entMeta *structs.EnterpriseMeta) (interface{}, error) { - - return tx.First(table, index, idxVal) -} - -func firstWatchWithTxn(tx ReadTxn, - table, index, idxVal string, entMeta *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { - - return tx.FirstWatch(table, index, idxVal) -} - -func getWithTxn(tx ReadTxn, - table, index, idxVal string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) { - - return tx.Get(table, index, idxVal) -} - func getCompoundWithTxn(tx ReadTxn, table, index string, _ *structs.EnterpriseMeta, idxVals ...interface{}) (memdb.ResultIterator, error) { diff --git a/agent/consul/state/prepared_query.go b/agent/consul/state/prepared_query.go index 8d2364511..bf4aecb0f 100644 --- a/agent/consul/state/prepared_query.go +++ b/agent/consul/state/prepared_query.go @@ -206,7 +206,7 @@ func preparedQuerySetTxn(tx WriteTxn, idx uint64, query *structs.PreparedQuery) // Verify that the session exists. if query.Session != "" { - sess, err := firstWithTxn(tx, "sessions", "id", query.Session, nil) + sess, err := tx.First(tableSessions, indexID, Query{Value: query.Session, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition()}) if err != nil { return fmt.Errorf("invalid session: %v", err) } diff --git a/agent/consul/state/query.go b/agent/consul/state/query.go index 799a8f019..7e0838448 100644 --- a/agent/consul/state/query.go +++ b/agent/consul/state/query.go @@ -15,6 +15,10 @@ type Query struct { structs.EnterpriseMeta } +func (q Query) IDValue() string { + return q.Value +} + // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer // receiver for this method. Remove once that is fixed. func (q Query) NamespaceOrDefault() string { @@ -27,6 +31,27 @@ func (q Query) PartitionOrDefault() string { return q.EnterpriseMeta.PartitionOrDefault() } +type MultiQuery struct { + Value []string + structs.EnterpriseMeta +} + +func (q MultiQuery) IDValue() []string { + return q.Value +} + +// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer +// receiver for this method. Remove once that is fixed. +func (q MultiQuery) NamespaceOrDefault() string { + return q.EnterpriseMeta.NamespaceOrDefault() +} + +// PartitionOrDefault exists because structs.EnterpriseMeta uses a pointer +// receiver for this method. Remove once that is fixed. +func (q MultiQuery) PartitionOrDefault() string { + return q.EnterpriseMeta.PartitionOrDefault() +} + // indexFromQuery builds an index key where Query.Value is lowercase, and is // a required value. func indexFromQuery(arg interface{}) ([]byte, error) { diff --git a/agent/consul/state/query_oss.go b/agent/consul/state/query_oss.go index 0b36a461c..04fed3a6b 100644 --- a/agent/consul/state/query_oss.go +++ b/agent/consul/state/query_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state @@ -17,6 +18,9 @@ func prefixIndexFromQuery(arg interface{}) ([]byte, error) { case structs.EnterpriseMeta: return nil, nil case Query: + if v.Value == "" { + return nil, nil + } b.String(strings.ToLower(v.Value)) return b.Bytes(), nil } @@ -28,21 +32,6 @@ func prefixIndexFromQueryNoNamespace(arg interface{}) ([]byte, error) { return prefixIndexFromQuery(arg) } -func prefixIndexFromServiceNameAsString(arg interface{}) ([]byte, error) { - var b indexBuilder - switch v := arg.(type) { - case *structs.EnterpriseMeta: - return nil, nil - case structs.EnterpriseMeta: - return nil, nil - case structs.ServiceName: - b.String(strings.ToLower(v.String())) - return b.Bytes(), nil - } - - return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) -} - // indexFromAuthMethodQuery builds an index key where Query.Value is lowercase, and is // a required value. func indexFromAuthMethodQuery(arg interface{}) ([]byte, error) { diff --git a/agent/consul/state/schema.go b/agent/consul/state/schema.go index bca4eec01..75a2ffa74 100644 --- a/agent/consul/state/schema.go +++ b/agent/consul/state/schema.go @@ -32,12 +32,15 @@ func newDBSchema() *memdb.DBSchema { preparedQueriesTableSchema, rolesTableSchema, servicesTableSchema, + serviceVirtualIPTableSchema, sessionChecksTableSchema, sessionsTableSchema, systemMetadataTableSchema, tokensTableSchema, tombstonesTableSchema, usageTableSchema, + freeVirtualIPTableSchema, + kindServiceNameTableSchema, ) withEnterpriseSchema(db) return db diff --git a/agent/consul/state/schema_oss_test.go b/agent/consul/state/schema_oss_test.go index 69129472b..77581d0d9 100644 --- a/agent/consul/state/schema_oss_test.go +++ b/agent/consul/state/schema_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/schema_test.go b/agent/consul/state/schema_test.go index 923040814..7ef17c8fd 100644 --- a/agent/consul/state/schema_test.go +++ b/agent/consul/state/schema_test.go @@ -43,12 +43,17 @@ func TestNewDBSchema_Indexers(t *testing.T) { tableACLRoles: testIndexerTableACLRoles, tableACLTokens: testIndexerTableACLTokens, // catalog - tableChecks: testIndexerTableChecks, - tableServices: testIndexerTableServices, - tableNodes: testIndexerTableNodes, - tableCoordinates: testIndexerTableCoordinates, - tableMeshTopology: testIndexerTableMeshTopology, - tableGatewayServices: testIndexerTableGatewayServices, + tableChecks: testIndexerTableChecks, + tableServices: testIndexerTableServices, + tableNodes: testIndexerTableNodes, + tableCoordinates: testIndexerTableCoordinates, + tableMeshTopology: testIndexerTableMeshTopology, + tableGatewayServices: testIndexerTableGatewayServices, + tableServiceVirtualIPs: testIndexerTableServiceVirtualIPs, + tableKindServiceNames: testIndexerTableKindServiceNames, + // KV + tableKVs: testIndexerTableKVs, + tableTombstones: testIndexerTableTombstones, // config tableConfigEntries: testIndexerTableConfigEntries, } diff --git a/agent/consul/state/session.go b/agent/consul/state/session.go index 00f5a6542..876e67f50 100644 --- a/agent/consul/state/session.go +++ b/agent/consul/state/session.go @@ -11,20 +11,43 @@ import ( "github.com/hashicorp/consul/agent/structs" ) +const ( + tableSessions = "sessions" + tableSessionChecks = "session_checks" + + indexNodeCheck = "node_check" +) + +func indexFromSession(raw interface{}) ([]byte, error) { + e, ok := raw.(*structs.Session) + if !ok { + return nil, fmt.Errorf("unexpected type %T, does not implement *structs.Session", raw) + } + + v := strings.ToLower(e.ID) + if v == "" { + return nil, errMissingValueForIndex + } + + var b indexBuilder + b.String(v) + return b.Bytes(), nil +} + // sessionsTableSchema returns a new table schema used for storing session // information. func sessionsTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ - Name: "sessions", + Name: tableSessions, Indexes: map[string]*memdb.IndexSchema{ - "id": { - Name: "id", + indexID: { + Name: indexID, AllowMissing: false, Unique: true, Indexer: sessionIndexer(), }, - "node": { - Name: "node", + indexNode: { + Name: indexNode, AllowMissing: false, Unique: false, Indexer: nodeSessionsIndexer(), @@ -37,43 +60,93 @@ func sessionsTableSchema() *memdb.TableSchema { // checks. func sessionChecksTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ - Name: "session_checks", + Name: tableSessionChecks, Indexes: map[string]*memdb.IndexSchema{ - "id": { - Name: "id", + indexID: { + Name: indexID, AllowMissing: false, Unique: true, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Node", - Lowercase: true, - }, - &CheckIDIndex{}, - &memdb.UUIDFieldIndex{ - Field: "Session", - }, - }, - }, + Indexer: idCheckIndexer(), }, - "node_check": { - Name: "node_check", + indexNodeCheck: { + Name: indexNodeCheck, AllowMissing: false, Unique: false, Indexer: nodeChecksIndexer(), }, - "session": { - Name: "session", + indexSession: { + Name: indexSession, AllowMissing: false, Unique: false, - Indexer: &memdb.UUIDFieldIndex{ - Field: "Session", - }, + Indexer: sessionCheckIndexer(), }, }, } } +// indexNodeFromSession creates an index key from *structs.Session +func indexNodeFromSession(raw interface{}) ([]byte, error) { + e, ok := raw.(*structs.Session) + if !ok { + return nil, fmt.Errorf("unexpected type %T, does not implement *structs.Session", raw) + } + + v := strings.ToLower(e.Node) + if v == "" { + return nil, errMissingValueForIndex + } + var b indexBuilder + + b.String(v) + return b.Bytes(), nil +} + +// indexFromNodeCheckIDSession creates an index key from sessionCheck +func indexFromNodeCheckIDSession(raw interface{}) ([]byte, error) { + e, ok := raw.(*sessionCheck) + if !ok { + return nil, fmt.Errorf("unexpected type %T, does not implement sessionCheck", raw) + } + + var b indexBuilder + v := strings.ToLower(e.Node) + if v == "" { + return nil, errMissingValueForIndex + } + b.String(v) + + v = strings.ToLower(string(e.CheckID.ID)) + if v == "" { + return nil, errMissingValueForIndex + } + b.String(v) + + v = strings.ToLower(e.Session) + if v == "" { + return nil, errMissingValueForIndex + } + b.String(v) + + return b.Bytes(), nil +} + +// indexSessionCheckFromSession creates an index key from sessionCheck +func indexSessionCheckFromSession(raw interface{}) ([]byte, error) { + e, ok := raw.(*sessionCheck) + if !ok { + return nil, fmt.Errorf("unexpected type %T, does not implement *sessionCheck", raw) + } + + var b indexBuilder + v := strings.ToLower(e.Session) + if v == "" { + return nil, errMissingValueForIndex + } + b.String(v) + + return b.Bytes(), nil +} + type CheckIDIndex struct { } @@ -132,7 +205,7 @@ func (index *CheckIDIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { // Sessions is used to pull the full list of sessions for use during snapshots. func (s *Snapshot) Sessions() (memdb.ResultIterator, error) { - iter, err := s.tx.Get("sessions", "id") + iter, err := s.tx.Get(tableSessions, indexID) if err != nil { return nil, err } @@ -195,7 +268,7 @@ func sessionCreateTxn(tx WriteTxn, idx uint64, sess *structs.Session) error { sess.ModifyIndex = idx // Check that the node exists - node, err := tx.First(tableNodes, indexID, Query{Value: sess.Node}) + node, err := tx.First(tableNodes, indexID, Query{Value: sess.Node, EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(sess.PartitionOrDefault())}) if err != nil { return fmt.Errorf("failed node lookup: %s", err) } @@ -223,11 +296,14 @@ func (s *Store) SessionGet(ws memdb.WatchSet, tx := s.db.Txn(false) defer tx.Abort() - // Get the table index. - idx := sessionMaxIndex(tx, entMeta) + idx := maxIndexTxnSessions(tx, entMeta) // Look up the session by its ID - watchCh, session, err := firstWatchWithTxn(tx, "sessions", "id", sessionID, entMeta) + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + watchCh, session, err := tx.FirstWatch(tableSessions, indexID, Query{Value: sessionID, EnterpriseMeta: *entMeta}) + if err != nil { return 0, nil, fmt.Errorf("failed session lookup: %s", err) } @@ -239,29 +315,6 @@ func (s *Store) SessionGet(ws memdb.WatchSet, return idx, nil, nil } -// SessionList returns a slice containing all of the active sessions. -func (s *Store) SessionList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Sessions, error) { - tx := s.db.Txn(false) - defer tx.Abort() - - // Get the table index. - idx := sessionMaxIndex(tx, entMeta) - - // Query all of the active sessions. - sessions, err := getWithTxn(tx, "sessions", "id_prefix", "", entMeta) - if err != nil { - return 0, nil, fmt.Errorf("failed session lookup: %s", err) - } - ws.Add(sessions.WatchCh()) - - // Go over the sessions and create a slice of them. - var result structs.Sessions - for session := sessions.Next(); session != nil; session = sessions.Next() { - result = append(result, session.(*structs.Session)) - } - return idx, result, nil -} - // NodeSessions returns a set of active sessions associated // with the given node ID. The returned index is the highest // index seen from the result set. @@ -270,7 +323,7 @@ func (s *Store) NodeSessions(ws memdb.WatchSet, nodeID string, entMeta *structs. defer tx.Abort() // Get the table index. - idx := sessionMaxIndex(tx, entMeta) + idx := maxIndexTxnSessions(tx, entMeta) // Get all of the sessions which belong to the node result, err := nodeSessionsTxn(tx, ws, nodeID, entMeta) @@ -299,7 +352,10 @@ func (s *Store) SessionDestroy(idx uint64, sessionID string, entMeta *structs.En // session deletion and handle session invalidation, etc. func (s *Store) deleteSessionTxn(tx WriteTxn, idx uint64, sessionID string, entMeta *structs.EnterpriseMeta) error { // Look up the session. - sess, err := firstWithTxn(tx, "sessions", "id", sessionID, entMeta) + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + sess, err := tx.First(tableSessions, indexID, Query{Value: sessionID, EnterpriseMeta: *entMeta}) if err != nil { return fmt.Errorf("failed session lookup: %s", err) } @@ -324,7 +380,7 @@ func (s *Store) deleteSessionTxn(tx WriteTxn, idx uint64, sessionID string, entM now := time.Now() // Get an iterator over all of the keys with the given session. - entries, err := tx.Get("kvs", "session", sessionID) + entries, err := tx.Get(tableKVs, indexSession, sessionID) if err != nil { return fmt.Errorf("failed kvs lookup: %s", err) } @@ -367,8 +423,11 @@ func (s *Store) deleteSessionTxn(tx WriteTxn, idx uint64, sessionID string, entM return fmt.Errorf("unknown session behavior %#v", session.Behavior) } + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } // Delete any check mappings. - mappings, err := tx.Get("session_checks", "session", sessionID) + mappings, err := tx.Get(tableSessionChecks, indexSession, Query{Value: sessionID, EnterpriseMeta: *entMeta}) if err != nil { return fmt.Errorf("failed session checks lookup: %s", err) } @@ -380,7 +439,7 @@ func (s *Store) deleteSessionTxn(tx WriteTxn, idx uint64, sessionID string, entM // Do the delete in a separate loop so we don't trash the iterator. for _, obj := range objs { - if err := tx.Delete("session_checks", obj); err != nil { + if err := tx.Delete(tableSessionChecks, obj); err != nil { return fmt.Errorf("failed deleting session check: %s", err) } } diff --git a/agent/consul/state/session_oss.go b/agent/consul/state/session_oss.go index 16eb026dd..d313fb5f9 100644 --- a/agent/consul/state/session_oss.go +++ b/agent/consul/state/session_oss.go @@ -1,9 +1,11 @@ +//go:build !consulent // +build !consulent package state import ( "fmt" + "strings" "github.com/hashicorp/go-memdb" @@ -11,33 +13,66 @@ import ( "github.com/hashicorp/consul/api" ) -func sessionIndexer() *memdb.UUIDFieldIndex { - return &memdb.UUIDFieldIndex{ - Field: "ID", +func sessionIndexer() indexerSingleWithPrefix { + return indexerSingleWithPrefix{ + readIndex: readIndex(indexFromQuery), + writeIndex: writeIndex(indexFromSession), + prefixIndex: prefixIndex(prefixIndexFromQuery), } } -func nodeSessionsIndexer() *memdb.StringFieldIndex { - return &memdb.StringFieldIndex{ - Field: "Node", - Lowercase: true, +func nodeSessionsIndexer() indexerSingle { + return indexerSingle{ + readIndex: readIndex(indexFromIDValueLowerCase), + writeIndex: writeIndex(indexNodeFromSession), } } -func nodeChecksIndexer() *memdb.CompoundIndex { - return &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Node", - Lowercase: true, - }, - &CheckIDIndex{}, - }, +func idCheckIndexer() indexerSingle { + return indexerSingle{ + readIndex: indexFromNodeCheckIDSession, + writeIndex: indexFromNodeCheckIDSession, } } +func sessionCheckIndexer() indexerSingle { + return indexerSingle{ + readIndex: indexFromQuery, + writeIndex: indexSessionCheckFromSession, + } +} + +func nodeChecksIndexer() indexerSingle { + return indexerSingle{ + readIndex: indexFromMultiValueID, + writeIndex: indexFromNodeCheckID, + } +} + +// indexFromNodeCheckID creates an index key from a sessionCheck structure +func indexFromNodeCheckID(raw interface{}) ([]byte, error) { + e, ok := raw.(*sessionCheck) + if !ok { + return nil, fmt.Errorf("unexpected type %T, does not implement *structs.Session", raw) + } + var b indexBuilder + v := strings.ToLower(e.Node) + if v == "" { + return nil, errMissingValueForIndex + } + b.String(v) + + v = strings.ToLower(string(e.CheckID.ID)) + if v == "" { + return nil, errMissingValueForIndex + } + b.String(v) + + return b.Bytes(), nil +} + func sessionDeleteWithSession(tx WriteTxn, session *structs.Session, idx uint64) error { - if err := tx.Delete("sessions", session); err != nil { + if err := tx.Delete(tableSessions, session); err != nil { return fmt.Errorf("failed deleting session: %s", err) } @@ -50,7 +85,7 @@ func sessionDeleteWithSession(tx WriteTxn, session *structs.Session, idx uint64) } func insertSessionTxn(tx WriteTxn, session *structs.Session, idx uint64, updateMax bool, _ bool) error { - if err := tx.Insert("sessions", session); err != nil { + if err := tx.Insert(tableSessions, session); err != nil { return err } @@ -61,7 +96,7 @@ func insertSessionTxn(tx WriteTxn, session *structs.Session, idx uint64, updateM CheckID: structs.CheckID{ID: checkID}, Session: session.ID, } - if err := tx.Insert("session_checks", mapping); err != nil { + if err := tx.Insert(tableSessionChecks, mapping); err != nil { return fmt.Errorf("failed inserting session check mapping: %s", err) } } @@ -81,14 +116,14 @@ func insertSessionTxn(tx WriteTxn, session *structs.Session, idx uint64, updateM return nil } -func allNodeSessionsTxn(tx ReadTxn, node string) (structs.Sessions, error) { +func allNodeSessionsTxn(tx ReadTxn, node string, _ string) (structs.Sessions, error) { return nodeSessionsTxn(tx, nil, node, nil) } func nodeSessionsTxn(tx ReadTxn, ws memdb.WatchSet, node string, entMeta *structs.EnterpriseMeta) (structs.Sessions, error) { - sessions, err := tx.Get("sessions", "node", node) + sessions, err := tx.Get(tableSessions, indexNode, Query{Value: node}) if err != nil { return nil, fmt.Errorf("failed session lookup: %s", err) } @@ -124,3 +159,35 @@ func validateSessionChecksTxn(tx ReadTxn, session *structs.Session) error { } return nil } + +// SessionList returns a slice containing all of the active sessions. +func (s *Store) SessionList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Sessions, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + // Get the table index. + idx := sessionMaxIndex(tx, entMeta) + + var result structs.Sessions + + // Query all of the active sessions. + sessions, err := tx.Get(tableSessions, indexID+"_prefix", Query{}) + if err != nil { + return 0, nil, fmt.Errorf("failed session lookup: %s", err) + } + ws.Add(sessions.WatchCh()) + // Go over the sessions and create a slice of them. + for session := sessions.Next(); session != nil; session = sessions.Next() { + result = append(result, session.(*structs.Session)) + } + + return idx, result, nil +} + +func maxIndexTxnSessions(tx *memdb.Txn, _ *structs.EnterpriseMeta) uint64 { + return maxIndexTxn(tx, tableSessions) +} + +func (s *Store) SessionListAll(ws memdb.WatchSet) (uint64, structs.Sessions, error) { + return s.SessionList(ws, nil) +} diff --git a/agent/consul/state/session_test.go b/agent/consul/state/session_test.go index 1ef85e802..2e841500a 100644 --- a/agent/consul/state/session_test.go +++ b/agent/consul/state/session_test.go @@ -72,7 +72,7 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) { if err := s.SessionCreate(2, sess); err != nil { t.Fatalf("err: %s", err) } - if idx := s.maxIndex("sessions"); idx != 2 { + if idx := s.maxIndex(partitionedAndNamespacedIndexEntryName(tableSessions, &sess.EnterpriseMeta)); idx != 2 { t.Fatalf("bad index: %s", err) } if !watchFired(ws) { @@ -143,7 +143,7 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) { // Check mappings were inserted { - check, err := tx.First("session_checks", "session", sess.ID) + check, err := tx.First(tableSessionChecks, indexSession, Query{Value: sess.ID}) if err != nil { t.Fatalf("err: %s", err) } @@ -174,7 +174,7 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) { t.Fatalf("err: %s", err) } - checks, err := tx.Get("session_checks", "session", sess2.ID) + checks, err := tx.Get(tableSessionChecks, indexSession, Query{Value: sess2.ID}) if err != nil { t.Fatalf("err: %s", err) } @@ -379,13 +379,13 @@ func TestStateStore_SessionDestroy(t *testing.T) { } // Check that the index was updated - if idx := s.maxIndex("sessions"); idx != 3 { + if idx := s.maxIndex(partitionedAndNamespacedIndexEntryName(tableSessions, &sess.EnterpriseMeta)); idx != 3 { t.Fatalf("bad index: %d", idx) } // Make sure the session is really gone. tx := s.db.Txn(false) - sessions, err := tx.Get("sessions", "id") + sessions, err := tx.Get(tableSessions, indexID) if err != nil || sessions.Next() != nil { t.Fatalf("session should not exist") } @@ -509,7 +509,7 @@ func TestStateStore_Session_Snapshot_Restore(t *testing.T) { tx := s.db.Txn(false) defer tx.Abort() - check, err := tx.First("session_checks", "session", session1) + check, err := tx.First(tableSessionChecks, indexSession, Query{Value: session1}) if err != nil { t.Fatalf("err: %s", err) } @@ -730,7 +730,7 @@ func TestStateStore_Session_Invalidate_DeleteCheck(t *testing.T) { // Manually make sure the session checks mapping is clear. tx := s.db.Txn(false) - mapping, err := tx.First("session_checks", "session", session.ID) + mapping, err := tx.First(tableSessionChecks, indexSession, Query{Value: session.ID}) if err != nil { t.Fatalf("err: %s", err) } diff --git a/agent/consul/state/state_store_oss_test.go b/agent/consul/state/state_store_oss_test.go index 794dfac43..99f6d2950 100644 --- a/agent/consul/state/state_store_oss_test.go +++ b/agent/consul/state/state_store_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/state/state_store_test.go b/agent/consul/state/state_store_test.go index 68e2e08fe..b617e0e46 100644 --- a/agent/consul/state/state_store_test.go +++ b/agent/consul/state/state_store_test.go @@ -170,14 +170,20 @@ func testRegisterIngressService(t *testing.T, s *Store, idx uint64, nodeID, serv t.Fatalf("bad service: %#v", result) } } - func testRegisterCheck(t *testing.T, s *Store, idx uint64, nodeID string, serviceID string, checkID types.CheckID, state string) { + testRegisterCheckWithPartition(t, s, idx, + nodeID, serviceID, checkID, state, "") +} + +func testRegisterCheckWithPartition(t *testing.T, s *Store, idx uint64, + nodeID string, serviceID string, checkID types.CheckID, state string, partition string) { chk := &structs.HealthCheck{ - Node: nodeID, - CheckID: checkID, - ServiceID: serviceID, - Status: state, + Node: nodeID, + CheckID: checkID, + ServiceID: serviceID, + Status: state, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(partition), } if err := s.EnsureCheck(idx, chk); err != nil { t.Fatalf("err: %s", err) @@ -185,7 +191,7 @@ func testRegisterCheck(t *testing.T, s *Store, idx uint64, tx := s.db.Txn(false) defer tx.Abort() - c, err := tx.First(tableChecks, indexID, NodeCheckQuery{Node: nodeID, CheckID: string(checkID)}) + c, err := tx.First(tableChecks, indexID, NodeCheckQuery{Node: nodeID, CheckID: string(checkID), EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(partition)}) if err != nil { t.Fatalf("err: %s", err) } @@ -238,7 +244,8 @@ func testSetKey(t *testing.T, s *Store, idx uint64, key, value string, entMeta * tx := s.db.Txn(false) defer tx.Abort() - e, err := firstWithTxn(tx, "kvs", "id", key, entMeta) + + e, err := tx.First(tableKVs, indexID, entry) if err != nil { t.Fatalf("err: %s", err) } diff --git a/agent/consul/state/txn.go b/agent/consul/state/txn.go index 8caa4a9d7..4f44b56cc 100644 --- a/agent/consul/state/txn.go +++ b/agent/consul/state/txn.go @@ -55,14 +55,14 @@ func (s *Store) txnKVS(tx WriteTxn, idx uint64, op *structs.TxnKVOp) (structs.Tx } case api.KVGet: - _, entry, err = kvsGetTxn(tx, nil, op.DirEnt.Key, &op.DirEnt.EnterpriseMeta) + _, entry, err = kvsGetTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta) if entry == nil && err == nil { err = fmt.Errorf("key %q doesn't exist", op.DirEnt.Key) } case api.KVGetTree: var entries structs.DirEntries - _, entries, err = s.kvsListTxn(tx, nil, op.DirEnt.Key, &op.DirEnt.EnterpriseMeta) + _, entries, err = s.kvsListTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta) if err == nil { results := make(structs.TxnResults, 0, len(entries)) for _, e := range entries { @@ -76,10 +76,10 @@ func (s *Store) txnKVS(tx WriteTxn, idx uint64, op *structs.TxnKVOp) (structs.Tx entry, err = kvsCheckSessionTxn(tx, op.DirEnt.Key, op.DirEnt.Session, &op.DirEnt.EnterpriseMeta) case api.KVCheckIndex: - entry, err = kvsCheckIndexTxn(tx, op.DirEnt.Key, op.DirEnt.ModifyIndex, &op.DirEnt.EnterpriseMeta) + entry, err = kvsCheckIndexTxn(tx, op.DirEnt.Key, op.DirEnt.ModifyIndex, op.DirEnt.EnterpriseMeta) case api.KVCheckNotExists: - _, entry, err = kvsGetTxn(tx, nil, op.DirEnt.Key, &op.DirEnt.EnterpriseMeta) + _, entry, err = kvsGetTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta) if entry != nil && err == nil { err = fmt.Errorf("key %q exists", op.DirEnt.Key) } diff --git a/agent/consul/state/usage_oss.go b/agent/consul/state/usage_oss.go index cdefd9e55..c8975be3b 100644 --- a/agent/consul/state/usage_oss.go +++ b/agent/consul/state/usage_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package state diff --git a/agent/consul/status_endpoint_test.go b/agent/consul/status_endpoint_test.go index 1c4afa4ce..ee27b2d78 100644 --- a/agent/consul/status_endpoint_test.go +++ b/agent/consul/status_endpoint_test.go @@ -7,12 +7,13 @@ import ( "testing" "time" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/tlsutil" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" - "github.com/stretchr/testify/require" ) func rpcClient(t *testing.T, s *Server) rpc.ClientCodec { @@ -24,7 +25,9 @@ func rpcClient(t *testing.T, s *Server) rpc.ClientCodec { // Write the Consul RPC byte to set the mode conn.Write([]byte{byte(pool.RPCConsul)}) - return msgpackrpc.NewCodecFromHandle(true, true, conn, structs.MsgpackHandle) + codec := msgpackrpc.NewCodecFromHandle(true, true, conn, structs.MsgpackHandle) + t.Cleanup(func() { codec.Close() }) + return codec } func insecureRPCClient(s *Server, c tlsutil.Config) (rpc.ClientCodec, error) { diff --git a/agent/consul/system_metadata_test.go b/agent/consul/system_metadata_test.go index c8edb29b7..30f57defd 100644 --- a/agent/consul/system_metadata_test.go +++ b/agent/consul/system_metadata_test.go @@ -32,7 +32,7 @@ func TestLeader_SystemMetadata_CRUD(t *testing.T) { state := srv.fsm.State() - // Initially empty + // Initially has no entries _, entries, err := state.SystemMetadataList(nil) require.NoError(t, err) require.Len(t, entries, 0) diff --git a/agent/consul/txn_endpoint.go b/agent/consul/txn_endpoint.go index 4e6ff80d4..5a17d7bfd 100644 --- a/agent/consul/txn_endpoint.go +++ b/agent/consul/txn_endpoint.go @@ -183,7 +183,6 @@ func (t *Txn) Read(args *structs.TxnReadRequest, reply *structs.TxnReadResponse) defer metrics.MeasureSince([]string{"txn", "read"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. - t.srv.setQueryMeta(&reply.QueryMeta) if args.RequireConsistent { if err := t.srv.consistentRead(); err != nil { return err @@ -195,6 +194,14 @@ func (t *Txn) Read(args *structs.TxnReadRequest, reply *structs.TxnReadResponse) if err != nil { return err } + + // There are currently two different ways we handle permission issues. + // + // For simple reads such as KVGet and KVGetTree, the txn succeeds but the + // offending results are omitted. For more involved operations such as + // KVCheckIndex, the txn fails and permission denied errors are returned. + // + // TODO: Maybe we should unify these, or at least cover it in the docs? reply.Errors = t.preCheck(authz, args.Ops) if len(reply.Errors) > 0 { return nil @@ -203,6 +210,13 @@ func (t *Txn) Read(args *structs.TxnReadRequest, reply *structs.TxnReadResponse) // Run the read transaction. state := t.srv.fsm.State() reply.Results, reply.Errors = state.TxnRO(args.Ops) + + total := len(reply.Results) reply.Results = FilterTxnResults(authz, reply.Results) + reply.QueryMeta.ResultsFilteredByACLs = total != len(reply.Results) + + // We have to do this ourselves since we are not doing a blocking RPC. + t.srv.setQueryMeta(&reply.QueryMeta, args.Token) + return nil } diff --git a/agent/consul/txn_endpoint_test.go b/agent/consul/txn_endpoint_test.go index d850e0b2b..9619dc881 100644 --- a/agent/consul/txn_endpoint_test.go +++ b/agent/consul/txn_endpoint_test.go @@ -3,7 +3,6 @@ package consul import ( "bytes" "os" - "reflect" "strings" "testing" "time" @@ -320,7 +319,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -839,7 +838,7 @@ func TestTxn_Read_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -874,127 +873,69 @@ func TestTxn_Read_ACLDeny(t *testing.T) { id := createToken(t, codec, testTxnRules) - // Set up a transaction where every operation should get blocked due to - // ACLs. - arg := structs.TxnReadRequest{ - Datacenter: "dc1", - Ops: structs.TxnOps{ - &structs.TxnOp{ - KV: &structs.TxnKVOp{ - Verb: api.KVGet, - DirEnt: structs.DirEntry{ - Key: "nope", + t.Run("simple read operations (results get filtered out)", func(t *testing.T) { + arg := structs.TxnReadRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: id}, + Ops: structs.TxnOps{ + { + KV: &structs.TxnKVOp{ + Verb: api.KVGet, + DirEnt: structs.DirEntry{ + Key: "nope", + }, + }, + }, + { + KV: &structs.TxnKVOp{ + Verb: api.KVGetTree, + DirEnt: structs.DirEntry{ + Key: "nope", + }, }, }, }, - &structs.TxnOp{ - KV: &structs.TxnKVOp{ - Verb: api.KVGetTree, - DirEnt: structs.DirEntry{ - Key: "nope", - }, - }, - }, - &structs.TxnOp{ - KV: &structs.TxnKVOp{ - Verb: api.KVCheckSession, - DirEnt: structs.DirEntry{ - Key: "nope", - }, - }, - }, - &structs.TxnOp{ - KV: &structs.TxnKVOp{ - Verb: api.KVCheckIndex, - DirEnt: structs.DirEntry{ - Key: "nope", - }, - }, - }, - &structs.TxnOp{ - Node: &structs.TxnNodeOp{ - Verb: api.NodeGet, - Node: structs.Node{ID: node.ID, Node: node.Node}, - }, - }, - &structs.TxnOp{ - Service: &structs.TxnServiceOp{ - Verb: api.ServiceGet, - Node: "foo", - Service: svc, - }, - }, - &structs.TxnOp{ - Check: &structs.TxnCheckOp{ - Verb: api.CheckGet, - Check: check, - }, - }, - }, - QueryOptions: structs.QueryOptions{ - Token: id, - }, - } - var out structs.TxnReadResponse - if err := msgpackrpc.CallWithCodec(codec, "Txn.Read", &arg, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Verify the transaction's return value. - expected := structs.TxnReadResponse{ - QueryMeta: structs.QueryMeta{ - KnownLeader: true, - }, - } - for i, op := range arg.Ops { - switch { - case op.KV != nil: - switch op.KV.Verb { - case api.KVGet, api.KVGetTree: - // These get filtered but won't result in an error. - - default: - expected.Errors = append(expected.Errors, &structs.TxnError{ - OpIndex: i, - What: acl.ErrPermissionDenied.Error(), - }) - } - case op.Node != nil: - switch op.Node.Verb { - case api.NodeGet: - // These get filtered but won't result in an error. - - default: - expected.Errors = append(expected.Errors, &structs.TxnError{ - OpIndex: i, - What: acl.ErrPermissionDenied.Error(), - }) - } - case op.Service != nil: - switch op.Service.Verb { - case api.ServiceGet: - // These get filtered but won't result in an error. - - default: - expected.Errors = append(expected.Errors, &structs.TxnError{ - OpIndex: i, - What: acl.ErrPermissionDenied.Error(), - }) - } - case op.Check != nil: - switch op.Check.Verb { - case api.CheckGet: - // These get filtered but won't result in an error. - - default: - expected.Errors = append(expected.Errors, &structs.TxnError{ - OpIndex: i, - What: acl.ErrPermissionDenied.Error(), - }) - } } - } - if !reflect.DeepEqual(out, expected) { - t.Fatalf("bad %v", out) - } + + var out structs.TxnReadResponse + err := msgpackrpc.CallWithCodec(codec, "Txn.Read", &arg, &out) + require.NoError(err) + require.Empty(out.Results) + require.Empty(out.Errors) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("complex operations (return permission denied errors)", func(t *testing.T) { + arg := structs.TxnReadRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: id}, + Ops: structs.TxnOps{ + { + KV: &structs.TxnKVOp{ + Verb: api.KVCheckSession, + DirEnt: structs.DirEntry{ + Key: "nope", + }, + }, + }, + { + KV: &structs.TxnKVOp{ + Verb: api.KVCheckIndex, + DirEnt: structs.DirEntry{ + Key: "nope", + }, + }, + }, + }, + } + + var out structs.TxnReadResponse + err := msgpackrpc.CallWithCodec(codec, "Txn.Read", &arg, &out) + require.NoError(err) + require.Equal(structs.TxnErrors{ + {OpIndex: 0, What: acl.ErrPermissionDenied.Error()}, + {OpIndex: 1, What: acl.ErrPermissionDenied.Error()}, + }, out.Errors) + require.Empty(out.Results) + }) } diff --git a/agent/consul/usagemetrics/usagemetrics_oss.go b/agent/consul/usagemetrics/usagemetrics_oss.go index 6376cce88..661fd6d34 100644 --- a/agent/consul/usagemetrics/usagemetrics_oss.go +++ b/agent/consul/usagemetrics/usagemetrics_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package usagemetrics diff --git a/agent/consul/usagemetrics/usagemetrics_oss_test.go b/agent/consul/usagemetrics/usagemetrics_oss_test.go index 94be9f632..5ab34256f 100644 --- a/agent/consul/usagemetrics/usagemetrics_oss_test.go +++ b/agent/consul/usagemetrics/usagemetrics_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package usagemetrics @@ -177,12 +178,12 @@ func TestUsageReporter_emitNodeUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, @@ -362,12 +363,12 @@ func TestUsageReporter_emitNodeUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, @@ -575,12 +576,12 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, @@ -802,12 +803,12 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, @@ -1006,12 +1007,12 @@ func TestUsageReporter_emitKVUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, @@ -1200,12 +1201,12 @@ func TestUsageReporter_emitKVUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, diff --git a/agent/consul/util.go b/agent/consul/util.go index 09e69381a..3e6b17a28 100644 --- a/agent/consul/util.go +++ b/agent/consul/util.go @@ -158,3 +158,12 @@ func (c *Client) CheckServers(datacenter string, fn func(*metadata.Server) bool) c.router.CheckServers(datacenter, fn) } + +func isSerfMember(s *serf.Serf, nodeName string) bool { + for _, m := range s.Members() { + if m.Name == nodeName { + return true + } + } + return false +} diff --git a/agent/coordinate_endpoint_test.go b/agent/coordinate_endpoint_test.go index 78d5ff609..36b956a8f 100644 --- a/agent/coordinate_endpoint_test.go +++ b/agent/coordinate_endpoint_test.go @@ -8,11 +8,12 @@ import ( "testing" "time" + "github.com/hashicorp/serf/coordinate" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" - "github.com/hashicorp/serf/coordinate" ) func TestCoordinate_Disabled_Response(t *testing.T) { @@ -137,7 +138,6 @@ func TestCoordinate_Nodes(t *testing.T) { arg1 := structs.CoordinateUpdateRequest{ Datacenter: "dc1", Node: "foo", - Segment: "alpha", Coord: coordinate.NewCoordinate(coordinate.DefaultConfig()), } var out struct{} @@ -178,65 +178,6 @@ func TestCoordinate_Nodes(t *testing.T) { r.Fatalf("expected: bar, foo received: %v", coordinates) } }) - // Filter on a nonexistent node segment - req, _ = http.NewRequest("GET", "/v1/coordinate/nodes?segment=nope", nil) - resp = httptest.NewRecorder() - retry.Run(t, func(r *retry.R) { - obj, err := a.srv.CoordinateNodes(resp, req) - if err != nil { - r.Fatalf("err: %v", err) - } - - if resp.Code != http.StatusOK { - r.Fatalf("bad: %v", resp.Code) - } - - coordinates, ok := obj.(structs.Coordinates) - if !ok { - r.Fatalf("expected: structs.Coordinates, received: %+v", obj) - } - if len(coordinates) != 0 { - r.Fatalf("coordinates should be empty, received: %v", coordinates) - } - }) - // Filter on a real node segment - req, _ = http.NewRequest("GET", "/v1/coordinate/nodes?segment=alpha", nil) - resp = httptest.NewRecorder() - retry.Run(t, func(r *retry.R) { - obj, err := a.srv.CoordinateNodes(resp, req) - if err != nil { - r.Fatalf("err: %v", err) - } - - if resp.Code != http.StatusOK { - r.Fatalf("bad: %v", resp.Code) - } - - coordinates, ok := obj.(structs.Coordinates) - if !ok { - r.Fatalf("expected: structs.Coordinates, received: %+v", obj) - } - if len(coordinates) != 1 || coordinates[0].Node != "foo" { - r.Fatalf("expected: foo received: %v", coordinates) - } - }) - // Make sure the empty filter works - req, _ = http.NewRequest("GET", "/v1/coordinate/nodes?segment=", nil) - resp = httptest.NewRecorder() - retry.Run(t, func(r *retry.R) { - obj, err := a.srv.CoordinateNodes(resp, req) - if err != nil { - r.Fatalf("err: %v", err) - } - - coordinates, ok := obj.(structs.Coordinates) - if !ok { - r.Fatalf("expected: structs.Coordinates, received: %+v", obj) - } - if len(coordinates) != 1 || coordinates[0].Node != "bar" { - r.Fatalf("expected: bar received: %v", coordinates) - } - }) } func TestCoordinate_Node(t *testing.T) { @@ -280,7 +221,6 @@ func TestCoordinate_Node(t *testing.T) { arg1 := structs.CoordinateUpdateRequest{ Datacenter: "dc1", Node: "foo", - Segment: "alpha", Coord: coordinate.NewCoordinate(coordinate.DefaultConfig()), } var out struct{} @@ -315,45 +255,6 @@ func TestCoordinate_Node(t *testing.T) { coordinates[0].Node != "foo" { t.Fatalf("bad: %v", coordinates) } - - // Filter on a nonexistent node segment - req, _ = http.NewRequest("GET", "/v1/coordinate/node/foo?segment=nope", nil) - resp = httptest.NewRecorder() - _, err = a.srv.CoordinateNode(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if resp.Code != http.StatusNotFound { - t.Fatalf("bad: %v", resp.Code) - } - - // Filter on a real node segment - req, _ = http.NewRequest("GET", "/v1/coordinate/node/foo?segment=alpha", nil) - resp = httptest.NewRecorder() - obj, err = a.srv.CoordinateNode(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - - if resp.Code != http.StatusOK { - t.Fatalf("bad: %v", resp.Code) - } - - coordinates = obj.(structs.Coordinates) - if len(coordinates) != 1 || coordinates[0].Node != "foo" { - t.Fatalf("bad: %v", coordinates) - } - - // Make sure the empty filter works - req, _ = http.NewRequest("GET", "/v1/coordinate/node/foo?segment=", nil) - resp = httptest.NewRecorder() - _, err = a.srv.CoordinateNode(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if resp.Code != http.StatusNotFound { - t.Fatalf("bad: %v", resp.Code) - } } func TestCoordinate_Update(t *testing.T) { diff --git a/agent/dns.go b/agent/dns.go index 47f2edae5..1deda3ebd 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -695,7 +695,7 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi done := false for i := len(labels) - 1; i >= 0 && !done; i-- { switch labels[i] { - case "service", "connect", "ingress", "node", "query", "addr": + case "service", "connect", "virtual", "ingress", "node", "query", "addr": queryParts = labels[:i] querySuffixes = labels[i+1:] queryKind = labels[i] @@ -785,6 +785,41 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi // name.connect.consul return d.serviceLookup(cfg, lookup, req, resp) + case "virtual": + if len(queryParts) < 1 { + return invalid() + } + + if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) { + return invalid() + } + + args := structs.ServiceSpecificRequest{ + Datacenter: datacenter, + ServiceName: queryParts[len(queryParts)-1], + EnterpriseMeta: entMeta, + QueryOptions: structs.QueryOptions{ + Token: d.agent.tokens.UserToken(), + }, + } + var out string + if err := d.agent.RPC("Catalog.VirtualIPForService", &args, &out); err != nil { + return err + } + if out != "" { + resp.Answer = append(resp.Answer, &dns.A{ + Hdr: dns.RR_Header{ + Name: qName + respDomain, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + Ttl: uint32(cfg.NodeTTL / time.Second), + }, + A: net.ParseIP(out), + }) + } + + return nil + case "ingress": if len(queryParts) < 1 { return invalid() diff --git a/agent/dns_oss.go b/agent/dns_oss.go index 47b68bae0..1328195c8 100644 --- a/agent/dns_oss.go +++ b/agent/dns_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent diff --git a/agent/dns_test.go b/agent/dns_test.go index 80b7a93e9..6d4085833 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -1756,6 +1756,24 @@ func TestDNS_ConnectServiceLookup(t *testing.T) { require.Equal(t, uint32(0), srvRec.Hdr.Ttl) require.Equal(t, "127.0.0.55", cnameRec.A.String()) } + + // Look up the virtual IP of the proxy. + questions = []string{ + "db.virtual.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeA) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.Nil(t, err) + require.Len(t, in.Answer, 1) + + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok) + require.Equal(t, "240.0.0.1", aRec.A.String()) + } } func TestDNS_IngressServiceLookup(t *testing.T) { @@ -6206,11 +6224,18 @@ func TestDNS_ServiceLookup_FilterACL(t *testing.T) { for _, tt := range tests { t.Run("ACLToken == "+tt.token, func(t *testing.T) { a := NewTestAgent(t, ` - acl_token = "`+tt.token+`" - acl_master_token = "root" - acl_datacenter = "dc1" - acl_down_policy = "deny" - acl_default_policy = "deny" + primary_datacenter = "dc1" + + acl { + enabled = true + default_policy = "deny" + down_policy = "deny" + + tokens { + initial_management = "root" + default = "`+tt.token+`" + } + } `) defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") diff --git a/agent/enterprise_delegate_oss.go b/agent/enterprise_delegate_oss.go index c7e1921aa..876c8837a 100644 --- a/agent/enterprise_delegate_oss.go +++ b/agent/enterprise_delegate_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent diff --git a/agent/event_endpoint.go b/agent/event_endpoint.go index 02bf81285..eb6561a1b 100644 --- a/agent/event_endpoint.go +++ b/agent/event_endpoint.go @@ -127,17 +127,6 @@ RUN_QUERY: // Get the recent events events := s.agent.UserEvents() - // Filter the events using the ACL, if present - for i := 0; i < len(events); i++ { - name := events[i].Name - if authz.EventRead(name, nil) == acl.Allow { - continue - } - s.agent.logger.Debug("dropping event from result due to ACLs", "event", name) - events = append(events[:i], events[i+1:]...) - i-- - } - // Filter the events if requested if nameFilter != "" { for i := 0; i < len(events); i++ { @@ -148,6 +137,36 @@ RUN_QUERY: } } + // Filter the events using the ACL, if present + // + // Note: we filter the results with ACLs *after* applying the user-supplied + // name filter, to ensure the filtered-by-acls header we set below does not + // include results that would be filtered out even if the user did have + // permission. + var removed bool + for i := 0; i < len(events); i++ { + name := events[i].Name + if authz.EventRead(name, nil) == acl.Allow { + continue + } + s.agent.logger.Debug("dropping event from result due to ACLs", "event", name) + removed = true + events = append(events[:i], events[i+1:]...) + i-- + } + + // Set the X-Consul-Results-Filtered-By-ACLs header, but only if the user is + // authenticated (to prevent information leaking). + // + // This is done automatically for HTTP endpoints that proxy to an RPC endpoint + // that sets QueryMeta.ResultsFilteredByACLs, but must be done manually for + // agent-local endpoints. + // + // For more information see the comment on: Server.maskResultsFilteredByACLs. + if token != "" { + setResultsFilteredByACLs(resp, removed) + } + // Determine the index var index uint64 if len(events) == 0 { diff --git a/agent/event_endpoint_test.go b/agent/event_endpoint_test.go index 476bf0cc1..d3ca95077 100644 --- a/agent/event_endpoint_test.go +++ b/agent/event_endpoint_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" + "github.com/stretchr/testify/require" ) func TestEventFire(t *testing.T) { @@ -199,47 +200,78 @@ func TestEventList_ACLFilter(t *testing.T) { defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") - // Fire an event. - p := &UserEvent{Name: "foo"} - if err := a.UserEvent("dc1", "root", p); err != nil { - t.Fatalf("err: %v", err) + // Fire some events. + events := []*UserEvent{ + {Name: "foo"}, + {Name: "bar"}, + } + for _, e := range events { + err := a.UserEvent("dc1", "root", e) + require.NoError(t, err) } t.Run("no token", func(t *testing.T) { retry.Run(t, func(r *retry.R) { - req, _ := http.NewRequest("GET", "/v1/event/list", nil) + require := require.New(r) + + req := httptest.NewRequest("GET", "/v1/event/list", nil) resp := httptest.NewRecorder() + obj, err := a.srv.EventList(resp, req) - if err != nil { - r.Fatal(err) - } + require.NoError(err) list, ok := obj.([]*UserEvent) - if !ok { - r.Fatalf("bad: %#v", obj) - } - if len(list) != 0 { - r.Fatalf("bad: %#v", list) - } + require.True(ok) + require.Empty(list) + require.Empty(resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) + }) + + t.Run("token with access to one event type", func(t *testing.T) { + retry.Run(t, func(r *retry.R) { + require := require.New(r) + + token := testCreateToken(t, a, ` + event "foo" { + policy = "read" + } + `) + + req := httptest.NewRequest("GET", fmt.Sprintf("/v1/event/list?token=%s", token), nil) + resp := httptest.NewRecorder() + + obj, err := a.srv.EventList(resp, req) + require.NoError(err) + + list, ok := obj.([]*UserEvent) + require.True(ok) + require.Len(list, 1) + require.Equal("foo", list[0].Name) + require.NotEmpty(resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) }) t.Run("root token", func(t *testing.T) { retry.Run(t, func(r *retry.R) { - req, _ := http.NewRequest("GET", "/v1/event/list?token=root", nil) + require := require.New(r) + + req := httptest.NewRequest("GET", "/v1/event/list?token=root", nil) resp := httptest.NewRecorder() + obj, err := a.srv.EventList(resp, req) - if err != nil { - r.Fatal(err) - } + require.NoError(err) list, ok := obj.([]*UserEvent) - if !ok { - r.Fatalf("bad: %#v", obj) - } - if len(list) != 1 || list[0].Name != "foo" { - r.Fatalf("bad: %#v", list) + require.True(ok) + require.Len(list, 2) + + var names []string + for _, e := range list { + names = append(names, e.Name) } + require.ElementsMatch([]string{"foo", "bar"}, names) + + require.Empty(resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) }) } diff --git a/agent/exec/exec_unix.go b/agent/exec/exec_unix.go index 3dddd1941..0bbdad8c5 100644 --- a/agent/exec/exec_unix.go +++ b/agent/exec/exec_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package exec diff --git a/agent/exec/exec_windows.go b/agent/exec/exec_windows.go index 851b1d4cf..bb96e72c3 100644 --- a/agent/exec/exec_windows.go +++ b/agent/exec/exec_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package exec diff --git a/agent/grpc/client_test.go b/agent/grpc/client_test.go index 665e38f8c..cd4c827dd 100644 --- a/agent/grpc/client_test.go +++ b/agent/grpc/client_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "net" - "strconv" "strings" "sync/atomic" "testing" @@ -21,6 +20,7 @@ import ( "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/tlsutil" + "github.com/hashicorp/consul/types" ) // useTLSForDcAlwaysTrue tell GRPC to always return the TLS is enabled @@ -29,15 +29,12 @@ func useTLSForDcAlwaysTrue(_ string) bool { } func TestNewDialer_WithTLSWrapper(t *testing.T) { - ports := freeport.MustTake(1) - defer freeport.Return(ports) - - lis, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(ports[0]))) + lis, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) t.Cleanup(logError(t, lis.Close)) builder := resolver.NewServerResolverBuilder(newConfig(t)) - builder.AddServer(&metadata.Server{ + builder.AddServer(types.AreaWAN, &metadata.Server{ Name: "server-1", ID: "ID1", Datacenter: "dc1", @@ -68,26 +65,18 @@ func TestNewDialer_WithTLSWrapper(t *testing.T) { } func TestNewDialer_WithALPNWrapper(t *testing.T) { - ports := freeport.MustTake(3) - defer freeport.Return(ports) - - var ( - s1addr = ipaddr.FormatAddressPort("127.0.0.1", ports[0]) - s2addr = ipaddr.FormatAddressPort("127.0.0.1", ports[1]) - gwAddr = ipaddr.FormatAddressPort("127.0.0.1", ports[2]) - ) - - lis1, err := net.Listen("tcp", s1addr) + lis1, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) t.Cleanup(logError(t, lis1.Close)) - lis2, err := net.Listen("tcp", s2addr) + lis2, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) t.Cleanup(logError(t, lis2.Close)) // Send all of the traffic to dc2's server var p tcpproxy.Proxy - p.AddRoute(gwAddr, tcpproxy.To(s2addr)) + gwAddr := ipaddr.FormatAddressPort("127.0.0.1", freeport.GetOne(t)) + p.AddRoute(gwAddr, tcpproxy.To(lis2.Addr().String())) p.AddStopACMESearch(gwAddr) require.NoError(t, p.Start()) defer func() { @@ -96,14 +85,14 @@ func TestNewDialer_WithALPNWrapper(t *testing.T) { }() builder := resolver.NewServerResolverBuilder(newConfig(t)) - builder.AddServer(&metadata.Server{ + builder.AddServer(types.AreaWAN, &metadata.Server{ Name: "server-1", ID: "ID1", Datacenter: "dc1", Addr: lis1.Addr(), UseTLS: true, }) - builder.AddServer(&metadata.Server{ + builder.AddServer(types.AreaWAN, &metadata.Server{ Name: "server-2", ID: "ID2", Datacenter: "dc2", @@ -165,7 +154,7 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler(t *testing.T) { srv := newSimpleTestServer(t, "server-1", "dc1", tlsConf) md := srv.Metadata() - res.AddServer(md) + res.AddServer(types.AreaWAN, md) t.Cleanup(srv.shutdown) pool := NewClientConnPool(ClientConnPoolConfig{ @@ -193,10 +182,7 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler(t *testing.T) { func TestNewDialer_IntegrationWithTLSEnabledHandler_viaMeshGateway(t *testing.T) { // if this test is failing because of expired certificates // use the procedure in test/CA-GENERATION.md - ports := freeport.MustTake(1) - defer freeport.Return(ports) - - gwAddr := ipaddr.FormatAddressPort("127.0.0.1", ports[0]) + gwAddr := ipaddr.FormatAddressPort("127.0.0.1", freeport.GetOne(t)) res := resolver.NewServerResolverBuilder(newConfig(t)) registerWithGRPC(t, res) @@ -226,7 +212,7 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler_viaMeshGateway(t *testing.T) }() md := srv.Metadata() - res.AddServer(md) + res.AddServer(types.AreaWAN, md) t.Cleanup(srv.shutdown) clientTLSConf, err := tlsutil.NewConfigurator(tlsutil.Config{ @@ -281,7 +267,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Failover(t *testing.T) { for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) srv := newSimpleTestServer(t, name, "dc1", nil) - res.AddServer(srv.Metadata()) + res.AddServer(types.AreaWAN, srv.Metadata()) t.Cleanup(srv.shutdown) } @@ -295,7 +281,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Failover(t *testing.T) { first, err := client.Something(ctx, &testservice.Req{}) require.NoError(t, err) - res.RemoveServer(&metadata.Server{ID: first.ServerName, Datacenter: "dc1"}) + res.RemoveServer(types.AreaWAN, &metadata.Server{ID: first.ServerName, Datacenter: "dc1"}) resp, err := client.Something(ctx, &testservice.Req{}) require.NoError(t, err) @@ -317,7 +303,7 @@ func TestClientConnPool_ForwardToLeader_Failover(t *testing.T) { for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) srv := newSimpleTestServer(t, name, "dc1", nil) - res.AddServer(srv.Metadata()) + res.AddServer(types.AreaWAN, srv.Metadata()) servers = append(servers, srv) t.Cleanup(srv.shutdown) } @@ -367,7 +353,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Rebalance(t *testing.T) { for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) srv := newSimpleTestServer(t, name, "dc1", nil) - res.AddServer(srv.Metadata()) + res.AddServer(types.AreaWAN, srv.Metadata()) t.Cleanup(srv.shutdown) } @@ -421,7 +407,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_MultiDC(t *testing.T) { for _, dc := range dcs { name := "server-0-" + dc srv := newSimpleTestServer(t, name, dc, nil) - res.AddServer(srv.Metadata()) + res.AddServer(types.AreaWAN, srv.Metadata()) t.Cleanup(srv.shutdown) } diff --git a/agent/grpc/handler_test.go b/agent/grpc/handler_test.go index 908bed0b1..46faa9696 100644 --- a/agent/grpc/handler_test.go +++ b/agent/grpc/handler_test.go @@ -6,6 +6,8 @@ import ( "testing" "time" + "github.com/hashicorp/consul/types" + "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" @@ -28,7 +30,7 @@ func TestHandler_PanicRecoveryInterceptor(t *testing.T) { registerWithGRPC(t, res) srv := newPanicTestServer(t, logger, "server-1", "dc1", nil) - res.AddServer(srv.Metadata()) + res.AddServer(types.AreaWAN, srv.Metadata()) t.Cleanup(srv.shutdown) pool := NewClientConnPool(ClientConnPoolConfig{ diff --git a/agent/grpc/resolver/resolver.go b/agent/grpc/resolver/resolver.go index f6c3d7fe9..e77ee568d 100644 --- a/agent/grpc/resolver/resolver.go +++ b/agent/grpc/resolver/resolver.go @@ -10,6 +10,7 @@ import ( "google.golang.org/grpc/resolver" "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/types" ) // ServerResolverBuilder tracks the current server list and keeps any @@ -18,9 +19,9 @@ type ServerResolverBuilder struct { cfg Config // leaderResolver is used to track the address of the leader in the local DC. leaderResolver leaderResolver - // servers is an index of Servers by Server.ID. The map contains server IDs + // servers is an index of Servers by area and Server.ID. The map contains server IDs // for all datacenters. - servers map[string]*metadata.Server + servers map[types.AreaID]map[string]*metadata.Server // resolvers is an index of connections to the serverResolver which manages // addresses of servers for that connection. resolvers map[resolver.ClientConn]*serverResolver @@ -37,7 +38,7 @@ type Config struct { func NewServerResolverBuilder(cfg Config) *ServerResolverBuilder { return &ServerResolverBuilder{ cfg: cfg, - servers: make(map[string]*metadata.Server), + servers: make(map[types.AreaID]map[string]*metadata.Server), resolvers: make(map[resolver.ClientConn]*serverResolver), } } @@ -72,9 +73,11 @@ func (s *ServerResolverBuilder) ServerForGlobalAddr(globalAddr string) (*metadat s.lock.RLock() defer s.lock.RUnlock() - for _, server := range s.servers { - if DCPrefix(server.Datacenter, server.Addr.String()) == globalAddr { - return server, nil + for _, areaServers := range s.servers { + for _, server := range areaServers { + if DCPrefix(server.Datacenter, server.Addr.String()) == globalAddr { + return server, nil + } } } return nil, fmt.Errorf("failed to find Consul server for global address %q", globalAddr) @@ -138,11 +141,17 @@ func (s *ServerResolverBuilder) Authority() string { } // AddServer updates the resolvers' states to include the new server's address. -func (s *ServerResolverBuilder) AddServer(server *metadata.Server) { +func (s *ServerResolverBuilder) AddServer(areaID types.AreaID, server *metadata.Server) { s.lock.Lock() defer s.lock.Unlock() - s.servers[uniqueID(server)] = server + areaServers, ok := s.servers[areaID] + if !ok { + areaServers = make(map[string]*metadata.Server) + s.servers[areaID] = areaServers + } + + areaServers[uniqueID(server)] = server addrs := s.getDCAddrs(server.Datacenter) for _, resolver := range s.resolvers { @@ -168,11 +177,19 @@ func DCPrefix(datacenter, suffix string) string { } // RemoveServer updates the resolvers' states with the given server removed. -func (s *ServerResolverBuilder) RemoveServer(server *metadata.Server) { +func (s *ServerResolverBuilder) RemoveServer(areaID types.AreaID, server *metadata.Server) { s.lock.Lock() defer s.lock.Unlock() - delete(s.servers, uniqueID(server)) + areaServers, ok := s.servers[areaID] + if !ok { + return // already gone + } + + delete(areaServers, uniqueID(server)) + if len(areaServers) == 0 { + delete(s.servers, areaID) + } addrs := s.getDCAddrs(server.Datacenter) for _, resolver := range s.resolvers { @@ -185,18 +202,29 @@ func (s *ServerResolverBuilder) RemoveServer(server *metadata.Server) { // getDCAddrs returns a list of the server addresses for the given datacenter. // This method requires that lock is held for reads. func (s *ServerResolverBuilder) getDCAddrs(dc string) []resolver.Address { - var addrs []resolver.Address - for _, server := range s.servers { - if server.Datacenter != dc { - continue - } + var ( + addrs []resolver.Address + keptServerIDs = make(map[string]struct{}) + ) + for _, areaServers := range s.servers { + for _, server := range areaServers { + if server.Datacenter != dc { + continue + } - addrs = append(addrs, resolver.Address{ - // NOTE: the address persisted here is only dialable using our custom dialer - Addr: DCPrefix(server.Datacenter, server.Addr.String()), - Type: resolver.Backend, - ServerName: server.Name, - }) + // Servers may be part of multiple areas, so only include each one once. + if _, ok := keptServerIDs[server.ID]; ok { + continue + } + keptServerIDs[server.ID] = struct{}{} + + addrs = append(addrs, resolver.Address{ + // NOTE: the address persisted here is only dialable using our custom dialer + Addr: DCPrefix(server.Datacenter, server.Addr.String()), + Type: resolver.Backend, + ServerName: server.Name, + }) + } } return addrs } diff --git a/agent/grpc/server_test.go b/agent/grpc/server_test.go index a6684b0ff..eb56b8933 100644 --- a/agent/grpc/server_test.go +++ b/agent/grpc/server_test.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "net" - "strconv" "sync/atomic" "testing" "time" @@ -18,7 +17,6 @@ import ( "github.com/hashicorp/consul/agent/grpc/internal/testservice" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/pool" - "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/go-hclog" ) @@ -59,12 +57,7 @@ func newTestServer(t *testing.T, logger hclog.Logger, name, dc string, tlsConf * addr := &net.IPAddr{IP: net.ParseIP("127.0.0.1")} handler := NewHandler(logger, addr, register) - ports := freeport.MustTake(1) - t.Cleanup(func() { - freeport.Return(ports) - }) - - lis, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(ports[0]))) + lis, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) rpc := &fakeRPCListener{t: t, handler: handler, tlsConf: tlsConf} diff --git a/agent/http.go b/agent/http.go index 02e95e6ba..a1d8461d0 100644 --- a/agent/http.go +++ b/agent/http.go @@ -734,6 +734,7 @@ func setMeta(resp http.ResponseWriter, m structs.QueryMetaCompat) { setKnownLeader(resp, m.GetKnownLeader()) setConsistency(resp, m.GetConsistencyLevel()) setQueryBackend(resp, m.GetBackend()) + setResultsFilteredByACLs(resp, m.GetResultsFilteredByACLs()) } func setQueryBackend(resp http.ResponseWriter, backend structs.QueryBackend) { @@ -757,6 +758,16 @@ func setCacheMeta(resp http.ResponseWriter, m *cache.ResultMeta) { } } +// setResultsFilteredByACLs sets an HTTP response header to indicate that the +// query results were filtered by enforcing ACLs. If the given filtered value +// is false the header will be omitted, as its ambiguous whether the results +// were not filtered or whether the endpoint doesn't yet support this header. +func setResultsFilteredByACLs(resp http.ResponseWriter, filtered bool) { + if filtered { + resp.Header().Set("X-Consul-Results-Filtered-By-ACLs", "true") + } +} + // setHeaders is used to set canonical response header fields func setHeaders(resp http.ResponseWriter, headers map[string]string) { for field, value := range headers { diff --git a/agent/http_oss.go b/agent/http_oss.go index 79840a6d1..797070ea1 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent diff --git a/agent/http_oss_test.go b/agent/http_oss_test.go index db7d89e15..4bb392f61 100644 --- a/agent/http_oss_test.go +++ b/agent/http_oss_test.go @@ -72,13 +72,13 @@ func TestHTTPAPI_MethodNotAllowed_OSS(t *testing.T) { enabled = true default_policy = "deny" tokens { - master = "sekrit" - agent = "sekrit" + initial_management = "sekrit" + agent = "sekrit" } } `) defer a.Shutdown() - // Use the master token here so the wait actually works. + // Use the initial management token here so the wait actually works. testrpc.WaitForTestAgent(t, a.RPC, "dc1", testrpc.WithToken("sekrit")) all := []string{"GET", "PUT", "POST", "DELETE", "HEAD", "OPTIONS"} diff --git a/agent/http_register.go b/agent/http_register.go index 2e3d98ade..df20cdfe3 100644 --- a/agent/http_register.go +++ b/agent/http_register.go @@ -66,11 +66,11 @@ func init() { registerEndpoint("/v1/config", []string{"PUT"}, (*HTTPHandlers).ConfigApply) registerEndpoint("/v1/connect/ca/configuration", []string{"GET", "PUT"}, (*HTTPHandlers).ConnectCAConfiguration) registerEndpoint("/v1/connect/ca/roots", []string{"GET"}, (*HTTPHandlers).ConnectCARoots) - registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPHandlers).IntentionEndpoint) + registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPHandlers).IntentionEndpoint) // POST is deprecated registerEndpoint("/v1/connect/intentions/match", []string{"GET"}, (*HTTPHandlers).IntentionMatch) registerEndpoint("/v1/connect/intentions/check", []string{"GET"}, (*HTTPHandlers).IntentionCheck) registerEndpoint("/v1/connect/intentions/exact", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).IntentionExact) - registerEndpoint("/v1/connect/intentions/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).IntentionSpecific) + registerEndpoint("/v1/connect/intentions/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).IntentionSpecific) // deprecated registerEndpoint("/v1/coordinate/datacenters", []string{"GET"}, (*HTTPHandlers).CoordinateDatacenters) registerEndpoint("/v1/coordinate/nodes", []string{"GET"}, (*HTTPHandlers).CoordinateNodes) registerEndpoint("/v1/coordinate/node/", []string{"GET"}, (*HTTPHandlers).CoordinateNode) diff --git a/agent/http_test.go b/agent/http_test.go index d391af3c1..33e2e7867 100644 --- a/agent/http_test.go +++ b/agent/http_test.go @@ -264,6 +264,22 @@ func TestSetKnownLeader(t *testing.T) { } } +func TestSetFilteredByACLs(t *testing.T) { + t.Parallel() + resp := httptest.NewRecorder() + setResultsFilteredByACLs(resp, true) + header := resp.Header().Get("X-Consul-Results-Filtered-By-ACLs") + if header != "true" { + t.Fatalf("Bad: %v", header) + } + resp = httptest.NewRecorder() + setResultsFilteredByACLs(resp, false) + header = resp.Header().Get("X-Consul-Results-Filtered-By-ACLs") + if header != "" { + t.Fatalf("Bad: %v", header) + } +} + func TestSetLastContact(t *testing.T) { t.Parallel() tests := []struct { @@ -291,23 +307,24 @@ func TestSetLastContact(t *testing.T) { func TestSetMeta(t *testing.T) { t.Parallel() meta := structs.QueryMeta{ - Index: 1000, - KnownLeader: true, - LastContact: 123456 * time.Microsecond, + Index: 1000, + KnownLeader: true, + LastContact: 123456 * time.Microsecond, + ResultsFilteredByACLs: true, } resp := httptest.NewRecorder() setMeta(resp, &meta) - header := resp.Header().Get("X-Consul-Index") - if header != "1000" { - t.Fatalf("Bad: %v", header) + + testCases := map[string]string{ + "X-Consul-Index": "1000", + "X-Consul-KnownLeader": "true", + "X-Consul-LastContact": "123", + "X-Consul-Results-Filtered-By-ACLs": "true", } - header = resp.Header().Get("X-Consul-KnownLeader") - if header != "true" { - t.Fatalf("Bad: %v", header) - } - header = resp.Header().Get("X-Consul-LastContact") - if header != "123" { - t.Fatalf("Bad: %v", header) + for header, expectedValue := range testCases { + if v := resp.Header().Get(header); v != expectedValue { + t.Fatalf("expected %q for header %s got %q", expectedValue, header, v) + } } } @@ -977,13 +994,21 @@ func TestHTTPServer_PProfHandlers_ACLs(t *testing.T) { dc1 := "dc1" a := NewTestAgent(t, ` - acl_datacenter = "`+dc1+`" - acl_default_policy = "deny" - acl_master_token = "master" - acl_agent_token = "agent" - acl_agent_master_token = "towel" - enable_debug = false -`) + primary_datacenter = "`+dc1+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + agent = "agent" + agent_recovery = "towel" + } + } + + enable_debug = false + `) cases := []struct { code int @@ -993,7 +1018,7 @@ func TestHTTPServer_PProfHandlers_ACLs(t *testing.T) { }{ { code: http.StatusOK, - token: "master", + token: "root", endpoint: "/debug/pprof/heap", nilResponse: false, }, @@ -1017,7 +1042,7 @@ func TestHTTPServer_PProfHandlers_ACLs(t *testing.T) { }, { code: http.StatusOK, - token: "master", + token: "root", endpoint: "/debug/pprof/heap", nilResponse: false, }, diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 8af3520ad..8a8456721 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -46,7 +46,8 @@ func (s *HTTPHandlers) IntentionList(resp http.ResponseWriter, req *http.Request return reply.Intentions, nil } -// POST /v1/connect/intentions +// IntentionCreate is used to create legacy intentions. +// Deprecated: use IntentionPutExact. func (s *HTTPHandlers) IntentionCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Method is tested in IntentionEndpoint @@ -258,6 +259,20 @@ func (s *HTTPHandlers) IntentionCheck(resp http.ResponseWriter, req *http.Reques return &reply, nil } +// IntentionExact handles the endpoint for /v1/connect/intentions/exact +func (s *HTTPHandlers) IntentionExact(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + switch req.Method { + case "GET": + return s.IntentionGetExact(resp, req) + case "PUT": + return s.IntentionPutExact(resp, req) + case "DELETE": + return s.IntentionDeleteExact(resp, req) + default: + return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} + } +} + // GET /v1/connect/intentions/exact func (s *HTTPHandlers) IntentionGetExact(resp http.ResponseWriter, req *http.Request) (interface{}, error) { var entMeta structs.EnterpriseMeta @@ -334,123 +349,6 @@ func (s *HTTPHandlers) IntentionGetExact(resp http.ResponseWriter, req *http.Req return reply.Intentions[0], nil } -// IntentionExact handles the endpoint for /v1/connect/intentions/exact -func (s *HTTPHandlers) IntentionExact(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - switch req.Method { - case "GET": - return s.IntentionGetExact(resp, req) - case "PUT": - return s.IntentionPutExact(resp, req) - case "DELETE": - return s.IntentionDeleteExact(resp, req) - default: - return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} - } -} - -// IntentionSpecific handles the endpoint for /v1/connect/intentions/:id -func (s *HTTPHandlers) IntentionSpecific(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - id := strings.TrimPrefix(req.URL.Path, "/v1/connect/intentions/") - - switch req.Method { - case "GET": - return s.IntentionSpecificGet(id, resp, req) - - case "PUT": - return s.IntentionSpecificUpdate(id, resp, req) - - case "DELETE": - return s.IntentionSpecificDelete(id, resp, req) - - default: - return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} - } -} - -// GET /v1/connect/intentions/:id -func (s *HTTPHandlers) IntentionSpecificGet(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { - // Method is tested in IntentionEndpoint - - args := structs.IntentionQueryRequest{ - IntentionID: id, - } - if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { - return nil, nil - } - - var reply structs.IndexedIntentions - if err := s.agent.RPC("Intention.Get", &args, &reply); err != nil { - // We have to check the string since the RPC sheds the error type - if err.Error() == consul.ErrIntentionNotFound.Error() { - resp.WriteHeader(http.StatusNotFound) - fmt.Fprint(resp, err.Error()) - return nil, nil - } - - // Not ideal, but there are a number of error scenarios that are not - // user error (400). We look for a specific case of invalid UUID - // to detect a parameter error and return a 400 response. The error - // is not a constant type or message, so we have to use strings.Contains - if strings.Contains(err.Error(), "UUID") { - return nil, BadRequestError{Reason: err.Error()} - } - - return nil, err - } - - // This shouldn't happen since the called API documents it shouldn't, - // but we check since the alternative if it happens is a panic. - if len(reply.Intentions) == 0 { - resp.WriteHeader(http.StatusNotFound) - return nil, nil - } - - return reply.Intentions[0], nil -} - -// PUT /v1/connect/intentions/:id -func (s *HTTPHandlers) IntentionSpecificUpdate(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { - // Method is tested in IntentionEndpoint - - var entMeta structs.EnterpriseMeta - if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { - return nil, err - } - if entMeta.PartitionOrDefault() != structs.PartitionOrDefault("") { - return nil, BadRequestError{Reason: "Cannot use a partition with this endpoint"} - } - - args := structs.IntentionRequest{ - Op: structs.IntentionOpUpdate, - } - s.parseDC(req, &args.Datacenter) - s.parseToken(req, &args.Token) - if err := decodeBody(req.Body, &args.Intention); err != nil { - return nil, BadRequestError{Reason: fmt.Sprintf("Request decode failed: %v", err)} - } - - if args.Intention.DestinationPartition != "" && args.Intention.DestinationPartition != "default" { - return nil, BadRequestError{Reason: "Cannot specify a destination partition with this endpoint"} - } - if args.Intention.SourcePartition != "" && args.Intention.SourcePartition != "default" { - return nil, BadRequestError{Reason: "Cannot specify a source partition with this endpoint"} - } - - args.Intention.FillPartitionAndNamespace(&entMeta, false) - - // Use the ID from the URL - args.Intention.ID = id - - var reply string - if err := s.agent.RPC("Intention.Apply", &args, &reply); err != nil { - return nil, err - } - - // Update uses the same create response - return intentionCreateResponse{reply}, nil - -} - // PUT /v1/connect/intentions/exact func (s *HTTPHandlers) IntentionPutExact(resp http.ResponseWriter, req *http.Request) (interface{}, error) { var entMeta structs.EnterpriseMeta @@ -493,25 +391,6 @@ func (s *HTTPHandlers) IntentionPutExact(resp http.ResponseWriter, req *http.Req return true, nil } -// DELETE /v1/connect/intentions/:id -func (s *HTTPHandlers) IntentionSpecificDelete(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { - // Method is tested in IntentionEndpoint - - args := structs.IntentionRequest{ - Op: structs.IntentionOpDelete, - Intention: &structs.Intention{ID: id}, - } - s.parseDC(req, &args.Datacenter) - s.parseToken(req, &args.Token) - - var reply string - if err := s.agent.RPC("Intention.Apply", &args, &reply); err != nil { - return nil, err - } - - return true, nil -} - // DELETE /v1/connect/intentions/exact func (s *HTTPHandlers) IntentionDeleteExact(resp http.ResponseWriter, req *http.Request) (interface{}, error) { var entMeta structs.EnterpriseMeta @@ -603,3 +482,125 @@ func parseIntentionStringComponent(input string, entMeta *structs.EnterpriseMeta return "", "", "", fmt.Errorf("input can contain at most two '/'") } } + +// IntentionSpecific handles the endpoint for /v1/connect/intentions/:id. +// Deprecated: use IntentionExact. +func (s *HTTPHandlers) IntentionSpecific(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + id := strings.TrimPrefix(req.URL.Path, "/v1/connect/intentions/") + + switch req.Method { + case "GET": + return s.IntentionSpecificGet(id, resp, req) + + case "PUT": + return s.IntentionSpecificUpdate(id, resp, req) + + case "DELETE": + return s.IntentionSpecificDelete(id, resp, req) + + default: + return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} + } +} + +// Deprecated: use IntentionGetExact. +func (s *HTTPHandlers) IntentionSpecificGet(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Method is tested in IntentionEndpoint + + args := structs.IntentionQueryRequest{ + IntentionID: id, + } + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { + return nil, nil + } + + var reply structs.IndexedIntentions + if err := s.agent.RPC("Intention.Get", &args, &reply); err != nil { + // We have to check the string since the RPC sheds the error type + if err.Error() == consul.ErrIntentionNotFound.Error() { + resp.WriteHeader(http.StatusNotFound) + fmt.Fprint(resp, err.Error()) + return nil, nil + } + + // Not ideal, but there are a number of error scenarios that are not + // user error (400). We look for a specific case of invalid UUID + // to detect a parameter error and return a 400 response. The error + // is not a constant type or message, so we have to use strings.Contains + if strings.Contains(err.Error(), "UUID") { + return nil, BadRequestError{Reason: err.Error()} + } + + return nil, err + } + + // This shouldn't happen since the called API documents it shouldn't, + // but we check since the alternative if it happens is a panic. + if len(reply.Intentions) == 0 { + resp.WriteHeader(http.StatusNotFound) + return nil, nil + } + + return reply.Intentions[0], nil +} + +// Deprecated: use IntentionPutExact. +func (s *HTTPHandlers) IntentionSpecificUpdate(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Method is tested in IntentionEndpoint + + var entMeta structs.EnterpriseMeta + if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { + return nil, err + } + if entMeta.PartitionOrDefault() != structs.PartitionOrDefault("") { + return nil, BadRequestError{Reason: "Cannot use a partition with this endpoint"} + } + + args := structs.IntentionRequest{ + Op: structs.IntentionOpUpdate, + } + s.parseDC(req, &args.Datacenter) + s.parseToken(req, &args.Token) + if err := decodeBody(req.Body, &args.Intention); err != nil { + return nil, BadRequestError{Reason: fmt.Sprintf("Request decode failed: %v", err)} + } + + if args.Intention.DestinationPartition != "" && args.Intention.DestinationPartition != "default" { + return nil, BadRequestError{Reason: "Cannot specify a destination partition with this endpoint"} + } + if args.Intention.SourcePartition != "" && args.Intention.SourcePartition != "default" { + return nil, BadRequestError{Reason: "Cannot specify a source partition with this endpoint"} + } + + args.Intention.FillPartitionAndNamespace(&entMeta, false) + + // Use the ID from the URL + args.Intention.ID = id + + var reply string + if err := s.agent.RPC("Intention.Apply", &args, &reply); err != nil { + return nil, err + } + + // Update uses the same create response + return intentionCreateResponse{reply}, nil +} + +// Deprecated: use IntentionDeleteExact. +func (s *HTTPHandlers) IntentionSpecificDelete(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Method is tested in IntentionEndpoint + + args := structs.IntentionRequest{ + Op: structs.IntentionOpDelete, + Intention: &structs.Intention{ID: id}, + } + s.parseDC(req, &args.Datacenter) + s.parseToken(req, &args.Token) + + var reply string + if err := s.agent.RPC("Intention.Apply", &args, &reply); err != nil { + return nil, err + } + + return true, nil +} diff --git a/agent/intentions_endpoint_oss_test.go b/agent/intentions_endpoint_oss_test.go index f5bd381aa..eb04d978b 100644 --- a/agent/intentions_endpoint_oss_test.go +++ b/agent/intentions_endpoint_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent diff --git a/agent/keyring_test.go b/agent/keyring_test.go index a777973cf..3362a2c70 100644 --- a/agent/keyring_test.go +++ b/agent/keyring_test.go @@ -302,10 +302,17 @@ func TestAgentKeyring_ACL(t *testing.T) { dataDir := testutil.TempDir(t, "keyfile") writeKeyRings(t, key1, dataDir) - a := StartTestAgent(t, TestAgent{HCL: TestACLConfig() + ` - acl_datacenter = "dc1" - acl_master_token = "root" - acl_default_policy = "deny" + a := StartTestAgent(t, TestAgent{HCL: ` + primary_datacenter = "dc1" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + } + } `, DataDir: dataDir}) defer a.Shutdown() diff --git a/agent/local/state.go b/agent/local/state.go index b279f3b1d..a729bf06c 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -1082,6 +1082,19 @@ func (l *State) updateSyncState() error { ls.Service.Tags = make([]string, len(rs.Tags)) copy(ls.Service.Tags, rs.Tags) } + + // Merge any tagged addresses with the consul- prefix (set by the server) + // back into the local state. + if !reflect.DeepEqual(ls.Service.TaggedAddresses, rs.TaggedAddresses) { + if ls.Service.TaggedAddresses == nil { + ls.Service.TaggedAddresses = make(map[string]structs.ServiceAddress) + } + for k, v := range rs.TaggedAddresses { + if strings.HasPrefix(k, structs.MetaKeyReservedPrefix) { + ls.Service.TaggedAddresses[k] = v + } + } + } ls.InSync = ls.Service.IsSame(rs) } @@ -1175,6 +1188,9 @@ func (l *State) SyncChanges() error { defer l.Unlock() // Sync the node level info if we need to. + // At the start to guarantee sync even if services or checks fail, + // which is more likely because there are more syncs happening for them. + if l.nodeInfoInSync { l.logger.Debug("Node info in sync") } else { @@ -1183,10 +1199,6 @@ func (l *State) SyncChanges() error { } } - // We will do node-level info syncing at the end, since it will get - // updated by a service or check sync anyway, given how the register - // API works. - // Sync the services // (logging happens in the helper methods) for id, s := range l.services { diff --git a/agent/local/state_test.go b/agent/local/state_test.go index f4be09ba1..af8944309 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -374,8 +374,17 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { assert.Len(services.NodeServices.Services, 5) // All the services should match + vips := make(map[string]struct{}) + srv1.TaggedAddresses = nil + srv2.TaggedAddresses = nil for id, serv := range services.NodeServices.Services { serv.CreateIndex, serv.ModifyIndex = 0, 0 + if serv.TaggedAddresses != nil { + serviceVIP := serv.TaggedAddresses[structs.TaggedAddressVirtualIP].Address + assert.NotEmpty(serviceVIP) + vips[serviceVIP] = struct{}{} + } + serv.TaggedAddresses = nil switch id { case "mysql-proxy": assert.Equal(srv1, serv) @@ -392,6 +401,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { } } + assert.Len(vips, 4) assert.Nil(servicesInSync(a.State, 4, structs.DefaultEnterpriseMetaInDefaultPartition())) // Remove one of the services @@ -786,9 +796,17 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) { t.Parallel() a := agent.NewTestAgent(t, ` - acl_datacenter = "dc1" - acl_master_token = "root" - acl_default_policy = "deny" `) + primary_datacenter = "dc1" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + } + } + `) defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") @@ -1231,9 +1249,17 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { t.Parallel() dc := "dc1" a := &agent.TestAgent{HCL: ` - acl_datacenter = "` + dc + `" - acl_master_token = "root" - acl_default_policy = "deny" `} + primary_datacenter = "` + dc + `" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + } + } + `} if err := a.Start(t); err != nil { t.Fatal(err) } @@ -2042,6 +2068,7 @@ func TestAgent_sendCoordinate(t *testing.T) { } t.Parallel() + a := agent.StartTestAgent(t, agent.TestAgent{Overrides: ` sync_coordinate_interval_min = "1ms" sync_coordinate_rate_target = 10.0 diff --git a/agent/metadata/server.go b/agent/metadata/server.go index 6fdad57c8..3715032c6 100644 --- a/agent/metadata/server.go +++ b/agent/metadata/server.go @@ -32,6 +32,7 @@ type Server struct { SegmentAddrs map[string]string SegmentPorts map[string]int WanJoinPort int + LanJoinPort int Bootstrap bool Expect int Build version.Version @@ -168,6 +169,7 @@ func IsConsulServer(m serf.Member) (bool, *Server) { SegmentAddrs: segmentAddrs, SegmentPorts: segmentPorts, WanJoinPort: wanJoinPort, + LanJoinPort: int(m.Port), Bootstrap: bootstrap, Expect: expect, Addr: addr, diff --git a/agent/operator_endpoint.go b/agent/operator_endpoint.go index 644fe7fba..e43302aef 100644 --- a/agent/operator_endpoint.go +++ b/agent/operator_endpoint.go @@ -6,11 +6,12 @@ import ( "strconv" "time" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" + + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/api" ) // OperatorRaftConfiguration is used to inspect the current Raft configuration. @@ -172,6 +173,11 @@ func keyringErrorsOrNil(responses []*structs.KeyringResponse) error { if response.WAN { pool = "WAN" } + if response.Segment != "" { + pool += " [segment: " + response.Segment + "]" + } else if !structs.IsDefaultPartition(response.Partition) { + pool += " [partition: " + response.Partition + "]" + } errs = multierror.Append(errs, fmt.Errorf("%s error: %s", pool, response.Error)) for key, message := range response.Messages { errs = multierror.Append(errs, fmt.Errorf("%s: %s", key, message)) diff --git a/agent/operator_endpoint_oss.go b/agent/operator_endpoint_oss.go index ff87332b6..9367cc367 100644 --- a/agent/operator_endpoint_oss.go +++ b/agent/operator_endpoint_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent diff --git a/agent/proxycfg/ingress_gateway.go b/agent/proxycfg/ingress_gateway.go index 79ceac6bc..985535d97 100644 --- a/agent/proxycfg/ingress_gateway.go +++ b/agent/proxycfg/ingress_gateway.go @@ -155,7 +155,7 @@ func makeUpstream(g *structs.GatewayService) structs.Upstream { upstream := structs.Upstream{ DestinationName: g.Service.Name, DestinationNamespace: g.Service.NamespaceOrDefault(), - DestinationPartition: g.Gateway.PartitionOrDefault(), + DestinationPartition: g.Service.PartitionOrDefault(), LocalBindPort: g.Port, IngressHosts: g.Hosts, // Pass the protocol that was configured on the ingress listener in order @@ -232,6 +232,7 @@ func (s *handlerIngressGateway) generateIngressDNSSANs(snap *ConfigSnapshot) []s } } + // TODO(partitions): How should these be updated for partitions? for ns := range namespaces { // The default namespace is special cased in DNS resolution, so special // case it here. diff --git a/agent/proxycfg/mesh_gateway_oss.go b/agent/proxycfg/mesh_gateway_oss.go index e1b2113c9..b32884452 100644 --- a/agent/proxycfg/mesh_gateway_oss.go +++ b/agent/proxycfg/mesh_gateway_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package proxycfg diff --git a/agent/proxycfg/state.go b/agent/proxycfg/state.go index cd3000353..237279b99 100644 --- a/agent/proxycfg/state.go +++ b/agent/proxycfg/state.go @@ -114,11 +114,12 @@ func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error for idx := range proxyCfg.Upstreams { us := &proxyCfg.Upstreams[idx] if us.DestinationType != structs.UpstreamDestTypePreparedQuery && us.DestinationNamespace == "" { - // default the upstreams target namespace to the namespace of the proxy + // default the upstreams target namespace and partition to those of the proxy // doing this here prevents needing much more complex logic a bunch of other // places and makes tracking these upstreams simpler as we can dedup them // with the maps tracking upstream ids being watched. proxyCfg.Upstreams[idx].DestinationNamespace = ns.EnterpriseMeta.NamespaceOrDefault() + proxyCfg.Upstreams[idx].DestinationPartition = ns.EnterpriseMeta.PartitionOrDefault() } } diff --git a/agent/proxycfg/state_test.go b/agent/proxycfg/state_test.go index b4d1fc202..b172fdee8 100644 --- a/agent/proxycfg/state_test.go +++ b/agent/proxycfg/state_test.go @@ -1686,6 +1686,7 @@ func TestState_WatchesAndUpdates(t *testing.T) { db.String(): { DestinationName: "db", DestinationNamespace: structs.IntentionDefaultNamespace, + DestinationPartition: structs.IntentionDefaultNamespace, }, } require.Equal(t, expectUpstreams, snap.ConnectProxy.UpstreamConfig) diff --git a/agent/proxycfg/testing.go b/agent/proxycfg/testing.go index 33444598b..110762376 100644 --- a/agent/proxycfg/testing.go +++ b/agent/proxycfg/testing.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io/ioutil" + "math" "path" "path/filepath" "sync" @@ -1506,6 +1507,56 @@ func TestConfigSnapshotMeshGatewayUsingFederationStates(t testing.T) *ConfigSnap return testConfigSnapshotMeshGateway(t, true, true) } +func TestConfigSnapshotMeshGatewayNewerInformationInFederationStates(t testing.T) *ConfigSnapshot { + snap := TestConfigSnapshotMeshGateway(t) + + // Create a duplicate entry in FedStateGateways, with a high ModifyIndex, to + // verify that fresh data in the federation state is preferred over stale data + // in GatewayGroups. + svc := structs.TestNodeServiceMeshGatewayWithAddrs(t, + "10.0.1.3", 8443, + structs.ServiceAddress{Address: "10.0.1.3", Port: 8443}, + structs.ServiceAddress{Address: "198.18.1.3", Port: 443}, + ) + svc.RaftIndex.ModifyIndex = math.MaxUint64 + + snap.MeshGateway.FedStateGateways = map[string]structs.CheckServiceNodes{ + "dc2": { + { + Node: snap.MeshGateway.GatewayGroups["dc2"][0].Node, + Service: svc, + }, + }, + } + + return snap +} + +func TestConfigSnapshotMeshGatewayOlderInformationInFederationStates(t testing.T) *ConfigSnapshot { + snap := TestConfigSnapshotMeshGateway(t) + + // Create a duplicate entry in FedStateGateways, with a low ModifyIndex, to + // verify that stale data in the federation state is ignored in favor of the + // fresher data in GatewayGroups. + svc := structs.TestNodeServiceMeshGatewayWithAddrs(t, + "10.0.1.3", 8443, + structs.ServiceAddress{Address: "10.0.1.3", Port: 8443}, + structs.ServiceAddress{Address: "198.18.1.3", Port: 443}, + ) + svc.RaftIndex.ModifyIndex = 0 + + snap.MeshGateway.FedStateGateways = map[string]structs.CheckServiceNodes{ + "dc2": { + { + Node: snap.MeshGateway.GatewayGroups["dc2"][0].Node, + Service: svc, + }, + }, + } + + return snap +} + func TestConfigSnapshotMeshGatewayNoServices(t testing.T) *ConfigSnapshot { return testConfigSnapshotMeshGateway(t, false, false) } diff --git a/agent/remote_exec_test.go b/agent/remote_exec_test.go index fad3a1a9d..dc6489fa5 100644 --- a/agent/remote_exec_test.go +++ b/agent/remote_exec_test.go @@ -117,10 +117,17 @@ func TestRemoteExecGetSpec_ACLToken(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecGetSpec(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + default = "root" + } + } `, "root", true, dc) } @@ -132,10 +139,17 @@ func TestRemoteExecGetSpec_ACLAgentToken(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecGetSpec(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_agent_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + agent = "root" + } + } `, "root", true, dc) } @@ -147,9 +161,16 @@ func TestRemoteExecGetSpec_ACLDeny(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecGetSpec(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + } + } `, "root", false, dc) } @@ -207,10 +228,17 @@ func TestRemoteExecWrites_ACLToken(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecWrites(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + default = "root" + } + } `, "root", true, dc) } @@ -222,10 +250,17 @@ func TestRemoteExecWrites_ACLAgentToken(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecWrites(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_agent_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + agent = "root" + } + } `, "root", true, dc) } @@ -237,9 +272,16 @@ func TestRemoteExecWrites_ACLDeny(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecWrites(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + } + } `, "root", false, dc) } diff --git a/agent/router/grpc.go b/agent/router/grpc.go index c4fe96d25..44600d42a 100644 --- a/agent/router/grpc.go +++ b/agent/router/grpc.go @@ -1,13 +1,16 @@ package router -import "github.com/hashicorp/consul/agent/metadata" +import ( + "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/types" +) // ServerTracker is called when Router is notified of a server being added or // removed. type ServerTracker interface { NewRebalancer(dc string) func() - AddServer(*metadata.Server) - RemoveServer(*metadata.Server) + AddServer(types.AreaID, *metadata.Server) + RemoveServer(types.AreaID, *metadata.Server) } // Rebalancer is called periodically to re-order the servers so that the load on the @@ -24,7 +27,7 @@ func (NoOpServerTracker) NewRebalancer(string) func() { } // AddServer does nothing -func (NoOpServerTracker) AddServer(*metadata.Server) {} +func (NoOpServerTracker) AddServer(types.AreaID, *metadata.Server) {} // RemoveServer does nothing -func (NoOpServerTracker) RemoveServer(*metadata.Server) {} +func (NoOpServerTracker) RemoveServer(types.AreaID, *metadata.Server) {} diff --git a/agent/router/router.go b/agent/router/router.go index 9aaae8739..1389a30f6 100644 --- a/agent/router/router.go +++ b/agent/router/router.go @@ -175,7 +175,7 @@ func (r *Router) AddArea(areaID types.AreaID, cluster RouterSerfCluster, pinger continue } - if err := r.addServer(area, parts); err != nil { + if err := r.addServer(areaID, area, parts); err != nil { return fmt.Errorf("failed to add server %q to area %q: %v", m.Name, areaID, err) } } @@ -276,7 +276,7 @@ func (r *Router) maybeInitializeManager(area *areaInfo, dc string) *Manager { } // addServer does the work of AddServer once the write lock is held. -func (r *Router) addServer(area *areaInfo, s *metadata.Server) error { +func (r *Router) addServer(areaID types.AreaID, area *areaInfo, s *metadata.Server) error { // Make the manager on the fly if this is the first we've seen of it, // and add it to the index. manager := r.maybeInitializeManager(area, s.Datacenter) @@ -288,7 +288,7 @@ func (r *Router) addServer(area *areaInfo, s *metadata.Server) error { } manager.AddServer(s) - r.grpcServerTracker.AddServer(s) + r.grpcServerTracker.AddServer(areaID, s) return nil } @@ -302,7 +302,7 @@ func (r *Router) AddServer(areaID types.AreaID, s *metadata.Server) error { if !ok { return fmt.Errorf("area ID %q does not exist", areaID) } - return r.addServer(area, s) + return r.addServer(areaID, area, s) } // RemoveServer should be called whenever a server is removed from an area. This @@ -324,7 +324,7 @@ func (r *Router) RemoveServer(areaID types.AreaID, s *metadata.Server) error { return nil } info.manager.RemoveServer(s) - r.grpcServerTracker.RemoveServer(s) + r.grpcServerTracker.RemoveServer(areaID, s) // If this manager is empty then remove it so we don't accumulate cruft // and waste time during request routing. diff --git a/agent/router/serf_adapter.go b/agent/router/serf_adapter.go index b051b2f96..7208fe123 100644 --- a/agent/router/serf_adapter.go +++ b/agent/router/serf_adapter.go @@ -1,10 +1,11 @@ package router import ( - "github.com/hashicorp/consul/agent/metadata" - "github.com/hashicorp/consul/types" "github.com/hashicorp/go-hclog" "github.com/hashicorp/serf/serf" + + "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/types" ) // routerFn selects one of the router operations to map to incoming Serf events. @@ -50,7 +51,18 @@ func handleMemberEvent(logger hclog.Logger, fn routerFn, areaID types.AreaID, e // HandleSerfEvents is a long-running goroutine that pushes incoming events from // a Serf manager's channel into the given router. This will return when the // shutdown channel is closed. -func HandleSerfEvents(logger hclog.Logger, router *Router, areaID types.AreaID, shutdownCh <-chan struct{}, eventCh <-chan serf.Event) { +// +// If membershipNotifyCh is non-nil, it must be a buffered channel of size one +// with one consumer. That consumer will be notified when +// Join/Leave/Failed/Update occur on this serf pool. +func HandleSerfEvents( + logger hclog.Logger, + router *Router, + areaID types.AreaID, + shutdownCh <-chan struct{}, + eventCh <-chan serf.Event, + membershipNotifyCh chan<- struct{}, +) { for { select { case <-shutdownCh: @@ -60,15 +72,19 @@ func HandleSerfEvents(logger hclog.Logger, router *Router, areaID types.AreaID, switch e.EventType() { case serf.EventMemberJoin: handleMemberEvent(logger, router.AddServer, areaID, e) + notifyMembershipPossibleChange(membershipNotifyCh) case serf.EventMemberLeave, serf.EventMemberReap: handleMemberEvent(logger, router.RemoveServer, areaID, e) + notifyMembershipPossibleChange(membershipNotifyCh) case serf.EventMemberFailed: handleMemberEvent(logger, router.FailServer, areaID, e) + notifyMembershipPossibleChange(membershipNotifyCh) case serf.EventMemberUpdate: handleMemberEvent(logger, router.AddServer, areaID, e) + notifyMembershipPossibleChange(membershipNotifyCh) // All of these event types are ignored. case serf.EventUser: @@ -80,3 +96,15 @@ func HandleSerfEvents(logger hclog.Logger, router *Router, areaID types.AreaID, } } } + +func notifyMembershipPossibleChange(membershipNotifyCh chan<- struct{}) { + if membershipNotifyCh == nil { + return + } + + // Notify if not already notified. + select { + case membershipNotifyCh <- struct{}{}: + default: + } +} diff --git a/agent/routine-leak-checker/leak_test.go b/agent/routine-leak-checker/leak_test.go index 7b8de34c7..fd64e9c05 100644 --- a/agent/routine-leak-checker/leak_test.go +++ b/agent/routine-leak-checker/leak_test.go @@ -55,7 +55,7 @@ func setupPrimaryServer(t *testing.T) *agent.TestAgent { require.NoError(t, ioutil.WriteFile(keyPath, []byte(keyPEM), 0600)) require.NoError(t, ioutil.WriteFile(caPath, []byte(caPEM), 0600)) - aclParams := agent.DefaulTestACLConfigParams() + aclParams := agent.DefaultTestACLConfigParams() aclParams.PrimaryDatacenter = "primary" aclParams.EnableTokenReplication = true @@ -76,7 +76,7 @@ func setupPrimaryServer(t *testing.T) *agent.TestAgent { a := agent.NewTestAgent(t, config) t.Cleanup(func() { a.Shutdown() }) - testrpc.WaitForTestAgent(t, a.RPC, "primary", testrpc.WithToken(agent.TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, a.RPC, "primary", testrpc.WithToken(agent.TestDefaultInitialManagementToken)) return a } diff --git a/agent/setup_oss.go b/agent/setup_oss.go index a0a23c19c..9fd22210e 100644 --- a/agent/setup_oss.go +++ b/agent/setup_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent diff --git a/agent/signal_unix.go b/agent/signal_unix.go index 2768a5588..a41e16a67 100644 --- a/agent/signal_unix.go +++ b/agent/signal_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package agent diff --git a/agent/signal_windows.go b/agent/signal_windows.go index 3e5b8d724..ba3d0fde9 100644 --- a/agent/signal_windows.go +++ b/agent/signal_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package agent diff --git a/agent/structs/acl_oss.go b/agent/structs/acl_oss.go index d12201033..3a1457aad 100644 --- a/agent/structs/acl_oss.go +++ b/agent/structs/acl_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs @@ -93,3 +94,7 @@ func (r *ACLRole) NodeIdentityList() []*ACLNodeIdentity { } return out } + +func IsValidPartitionAndDatacenter(meta EnterpriseMeta, datacenters []string, primaryDatacenter string) bool { + return true +} diff --git a/agent/structs/autopilot_oss.go b/agent/structs/autopilot_oss.go index 56f6ee7e8..a495c26a0 100644 --- a/agent/structs/autopilot_oss.go +++ b/agent/structs/autopilot_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/catalog_oss.go b/agent/structs/catalog_oss.go index 3c02d1ebc..8165ffc42 100644 --- a/agent/structs/catalog_oss.go +++ b/agent/structs/catalog_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index a7703c45a..3ea7c18f3 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -27,7 +27,7 @@ const ( TerminatingGateway string = "terminating-gateway" ServiceIntentions string = "service-intentions" MeshConfig string = "mesh" - PartitionExports string = "partition-exports" + ExportedServices string = "exported-services" ProxyConfigGlobal string = "global" MeshConfigMesh string = "mesh" @@ -45,7 +45,7 @@ var AllConfigEntryKinds = []string{ TerminatingGateway, ServiceIntentions, MeshConfig, - PartitionExports, + ExportedServices, } // ConfigEntry is the interface for centralized configuration stored in Raft. @@ -533,8 +533,8 @@ func MakeConfigEntry(kind, name string) (ConfigEntry, error) { return &ServiceIntentionsConfigEntry{Name: name}, nil case MeshConfig: return &MeshConfigEntry{}, nil - case PartitionExports: - return &PartitionExportsConfigEntry{Name: name}, nil + case ExportedServices: + return &ExportedServicesConfigEntry{Name: name}, nil default: return nil, fmt.Errorf("invalid config entry kind: %s", kind) } diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go index 35287f442..27980a4d1 100644 --- a/agent/structs/config_entry_discoverychain.go +++ b/agent/structs/config_entry_discoverychain.go @@ -119,6 +119,9 @@ func (e *ServiceRouterConfigEntry) Normalize() error { if route.Destination != nil && route.Destination.Namespace == "" { route.Destination.Namespace = e.EnterpriseMeta.NamespaceOrEmpty() } + if route.Destination != nil && route.Destination.Partition == "" { + route.Destination.Partition = e.EnterpriseMeta.PartitionOrEmpty() + } } return nil @@ -381,6 +384,13 @@ type ServiceRouteDestination struct { // splitting. Namespace string `json:",omitempty"` + // Partition is the partition to resolve the service from instead of the + // current partition. If empty the current partition is assumed. + // + // If this field is specified then this route is ineligible for further + // splitting. + Partition string `json:",omitempty"` + // PrefixRewrite allows for the proxied request to have its matching path // prefix modified before being sent to the destination. Described more // below in the envoy implementation section. @@ -557,8 +567,8 @@ func (e *ServiceSplitterConfigEntry) Validate() error { } if _, ok := found[splitKey]; ok { return fmt.Errorf( - "split destination occurs more than once: service=%q, subset=%q, namespace=%q", - splitKey.Service, splitKey.ServiceSubset, splitKey.Namespace, + "split destination occurs more than once: service=%q, subset=%q, namespace=%q, partition=%q", + splitKey.Service, splitKey.ServiceSubset, splitKey.Namespace, splitKey.Partition, ) } found[splitKey] = struct{}{} @@ -665,7 +675,12 @@ type ServiceSplit struct { // splitting. Namespace string `json:",omitempty"` - // NOTE: Partition is not represented here by design. Do not add it. + // Partition is the partition to resolve the service from instead of the + // current partition. If empty the current partition is assumed (optional). + // + // If this field is specified then this route is ineligible for further + // splitting. + Partition string `json:",omitempty"` // NOTE: Any configuration added to Splits that needs to be passed to the // proxy needs special handling MergeParent below. @@ -930,9 +945,13 @@ func (e *ServiceResolverConfigEntry) Validate() error { } if e.Redirect != nil { - if e.PartitionOrEmpty() != acl.DefaultPartitionName && e.Redirect.Datacenter != "" { - return fmt.Errorf("Cross datacenters redirect is not allowed for non default partition") + if !e.InDefaultPartition() && e.Redirect.Datacenter != "" { + return fmt.Errorf("Cross-datacenter redirect is only supported in the default partition") } + if PartitionOrDefault(e.Redirect.Partition) != e.PartitionOrDefault() && e.Redirect.Datacenter != "" { + return fmt.Errorf("Cross-datacenter and cross-partition redirect is not supported") + } + r := e.Redirect if len(e.Failover) > 0 { @@ -941,7 +960,7 @@ func (e *ServiceResolverConfigEntry) Validate() error { // TODO(rb): prevent subsets and default subsets from being defined? - if r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Datacenter == "" { + if r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" { return fmt.Errorf("Redirect is empty") } @@ -952,6 +971,9 @@ func (e *ServiceResolverConfigEntry) Validate() error { if r.Namespace != "" { return fmt.Errorf("Redirect.Namespace defined without Redirect.Service") } + if r.Partition != "" { + return fmt.Errorf("Redirect.Partition defined without Redirect.Service") + } } else if r.Service == e.Name { if r.ServiceSubset != "" && !isSubset(r.ServiceSubset) { return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, r.Service) @@ -962,9 +984,10 @@ func (e *ServiceResolverConfigEntry) Validate() error { if len(e.Failover) > 0 { for subset, f := range e.Failover { - if e.PartitionOrEmpty() != acl.DefaultPartitionName && len(f.Datacenters) != 0 { - return fmt.Errorf("Cross datacenters failover is not allowed for non default partition") + if !e.InDefaultPartition() && len(f.Datacenters) != 0 { + return fmt.Errorf("Cross-datacenter failover is only supported in the default partition") } + if subset != "*" && !isSubset(subset) { return fmt.Errorf("Bad Failover[%q]: not a valid subset", subset) } @@ -1141,6 +1164,10 @@ type ServiceResolverRedirect struct { // current one (optional). Namespace string `json:",omitempty"` + // Partition is the partition to resolve the service from instead of the + // current one (optional). + Partition string `json:",omitempty"` + // Datacenter is the datacenter to resolve the service from instead of the // current one (optional). Datacenter string `json:",omitempty"` @@ -1309,19 +1336,20 @@ func canWriteDiscoveryChain(entry discoveryChainConfigEntry, authz acl.Authorize // DiscoveryChainConfigEntries wraps just the raw cross-referenced config // entries. None of these are defaulted. type DiscoveryChainConfigEntries struct { - Routers map[ServiceID]*ServiceRouterConfigEntry - Splitters map[ServiceID]*ServiceSplitterConfigEntry - Resolvers map[ServiceID]*ServiceResolverConfigEntry - Services map[ServiceID]*ServiceConfigEntry - GlobalProxy *ProxyConfigEntry + Routers map[ServiceID]*ServiceRouterConfigEntry + Splitters map[ServiceID]*ServiceSplitterConfigEntry + Resolvers map[ServiceID]*ServiceResolverConfigEntry + Services map[ServiceID]*ServiceConfigEntry + ProxyDefaults map[string]*ProxyConfigEntry } func NewDiscoveryChainConfigEntries() *DiscoveryChainConfigEntries { return &DiscoveryChainConfigEntries{ - Routers: make(map[ServiceID]*ServiceRouterConfigEntry), - Splitters: make(map[ServiceID]*ServiceSplitterConfigEntry), - Resolvers: make(map[ServiceID]*ServiceResolverConfigEntry), - Services: make(map[ServiceID]*ServiceConfigEntry), + Routers: make(map[ServiceID]*ServiceRouterConfigEntry), + Splitters: make(map[ServiceID]*ServiceSplitterConfigEntry), + Resolvers: make(map[ServiceID]*ServiceResolverConfigEntry), + Services: make(map[ServiceID]*ServiceConfigEntry), + ProxyDefaults: make(map[string]*ProxyConfigEntry), } } @@ -1353,6 +1381,13 @@ func (e *DiscoveryChainConfigEntries) GetService(sid ServiceID) *ServiceConfigEn return nil } +func (e *DiscoveryChainConfigEntries) GetProxyDefaults(partition string) *ProxyConfigEntry { + if e.ProxyDefaults != nil { + return e.ProxyDefaults[partition] + } + return nil +} + // AddRouters adds router configs. Convenience function for testing. func (e *DiscoveryChainConfigEntries) AddRouters(entries ...*ServiceRouterConfigEntry) { if e.Routers == nil { @@ -1393,6 +1428,16 @@ func (e *DiscoveryChainConfigEntries) AddServices(entries ...*ServiceConfigEntry } } +// AddProxyDefaults adds proxy-defaults configs. Convenience function for testing. +func (e *DiscoveryChainConfigEntries) AddProxyDefaults(entries ...*ProxyConfigEntry) { + if e.ProxyDefaults == nil { + e.ProxyDefaults = make(map[string]*ProxyConfigEntry) + } + for _, entry := range entries { + e.ProxyDefaults[entry.PartitionOrDefault()] = entry + } +} + // AddEntries adds generic configs. Convenience function for testing. Panics on // operator error. func (e *DiscoveryChainConfigEntries) AddEntries(entries ...ConfigEntry) { @@ -1410,7 +1455,7 @@ func (e *DiscoveryChainConfigEntries) AddEntries(entries ...ConfigEntry) { if entry.GetName() != ProxyConfigGlobal { panic("the only supported proxy-defaults name is '" + ProxyConfigGlobal + "'") } - e.GlobalProxy = entry.(*ProxyConfigEntry) + e.AddProxyDefaults(entry.(*ProxyConfigEntry)) default: panic("unhandled config entry kind: " + entry.GetKind()) } @@ -1418,7 +1463,7 @@ func (e *DiscoveryChainConfigEntries) AddEntries(entries ...ConfigEntry) { } func (e *DiscoveryChainConfigEntries) IsEmpty() bool { - return e.IsChainEmpty() && len(e.Services) == 0 && e.GlobalProxy == nil + return e.IsChainEmpty() && len(e.Services) == 0 && len(e.ProxyDefaults) == 0 } func (e *DiscoveryChainConfigEntries) IsChainEmpty() bool { diff --git a/agent/structs/config_entry_discoverychain_oss.go b/agent/structs/config_entry_discoverychain_oss.go index 17f8b8473..cd22c9686 100644 --- a/agent/structs/config_entry_discoverychain_oss.go +++ b/agent/structs/config_entry_discoverychain_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/config_entry_exports.go b/agent/structs/config_entry_exports.go index 044f9d62a..4f11a4b7b 100644 --- a/agent/structs/config_entry_exports.go +++ b/agent/structs/config_entry_exports.go @@ -7,9 +7,9 @@ import ( "github.com/hashicorp/consul/acl" ) -// PartitionExportsConfigEntry is the top-level struct for exporting a service to be exposed +// ExportedServicesConfigEntry is the top-level struct for exporting a service to be exposed // across other admin partitions. -type PartitionExportsConfigEntry struct { +type ExportedServicesConfigEntry struct { Name string // Services is a list of services to be exported and the list of partitions @@ -40,7 +40,7 @@ type ServiceConsumer struct { Partition string } -func (e *PartitionExportsConfigEntry) ToMap() map[string]map[string][]string { +func (e *ExportedServicesConfigEntry) ToMap() map[string]map[string][]string { resp := make(map[string]map[string][]string) for _, svc := range e.Services { if _, ok := resp[svc.Namespace]; !ok { @@ -57,7 +57,7 @@ func (e *PartitionExportsConfigEntry) ToMap() map[string]map[string][]string { return resp } -func (e *PartitionExportsConfigEntry) Clone() *PartitionExportsConfigEntry { +func (e *ExportedServicesConfigEntry) Clone() *ExportedServicesConfigEntry { e2 := *e e2.Services = make([]ExportedService, len(e.Services)) for _, svc := range e.Services { @@ -72,11 +72,11 @@ func (e *PartitionExportsConfigEntry) Clone() *PartitionExportsConfigEntry { return &e2 } -func (e *PartitionExportsConfigEntry) GetKind() string { - return PartitionExports +func (e *ExportedServicesConfigEntry) GetKind() string { + return ExportedServices } -func (e *PartitionExportsConfigEntry) GetName() string { +func (e *ExportedServicesConfigEntry) GetName() string { if e == nil { return "" } @@ -84,14 +84,14 @@ func (e *PartitionExportsConfigEntry) GetName() string { return e.Name } -func (e *PartitionExportsConfigEntry) GetMeta() map[string]string { +func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { if e == nil { return nil } return e.Meta } -func (e *PartitionExportsConfigEntry) Normalize() error { +func (e *ExportedServicesConfigEntry) Normalize() error { if e == nil { return fmt.Errorf("config entry is nil") } @@ -105,15 +105,20 @@ func (e *PartitionExportsConfigEntry) Normalize() error { return nil } -func (e *PartitionExportsConfigEntry) Validate() error { +func (e *ExportedServicesConfigEntry) Validate() error { if e.Name == "" { return fmt.Errorf("Name is required") } if e.Name == WildcardSpecifier { - return fmt.Errorf("partition-exports Name must be the name of a partition, and not a wildcard") + return fmt.Errorf("exported-services Name must be the name of a partition, and not a wildcard") } - validationErr := validateConfigEntryMeta(e.Meta) + if err := requireEnterprise(e.GetKind()); err != nil { + return err + } + if err := validateConfigEntryMeta(e.Meta); err != nil { + return err + } for _, svc := range e.Services { if svc.Name == "" { @@ -128,23 +133,22 @@ func (e *PartitionExportsConfigEntry) Validate() error { } } } - - return validationErr + return nil } -func (e *PartitionExportsConfigEntry) CanRead(authz acl.Authorizer) bool { +func (e *ExportedServicesConfigEntry) CanRead(authz acl.Authorizer) bool { var authzContext acl.AuthorizerContext e.FillAuthzContext(&authzContext) return authz.MeshRead(&authzContext) == acl.Allow } -func (e *PartitionExportsConfigEntry) CanWrite(authz acl.Authorizer) bool { +func (e *ExportedServicesConfigEntry) CanWrite(authz acl.Authorizer) bool { var authzContext acl.AuthorizerContext e.FillAuthzContext(&authzContext) return authz.MeshWrite(&authzContext) == acl.Allow } -func (e *PartitionExportsConfigEntry) GetRaftIndex() *RaftIndex { +func (e *ExportedServicesConfigEntry) GetRaftIndex() *RaftIndex { if e == nil { return &RaftIndex{} } @@ -152,7 +156,7 @@ func (e *PartitionExportsConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *PartitionExportsConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *ExportedServicesConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { if e == nil { return nil } @@ -164,13 +168,13 @@ func (e *PartitionExportsConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { // correct type. // This method is implemented on the structs type (as apposed to the api type) // because that is what the API currently uses to return a response. -func (e *PartitionExportsConfigEntry) MarshalJSON() ([]byte, error) { - type Alias PartitionExportsConfigEntry +func (e *ExportedServicesConfigEntry) MarshalJSON() ([]byte, error) { + type Alias ExportedServicesConfigEntry source := &struct { Kind string *Alias }{ - Kind: PartitionExports, + Kind: ExportedServices, Alias: (*Alias)(e), } return json.Marshal(source) diff --git a/agent/structs/config_entry_gateways.go b/agent/structs/config_entry_gateways.go index aebb0a0f3..bad63294a 100644 --- a/agent/structs/config_entry_gateways.go +++ b/agent/structs/config_entry_gateways.go @@ -266,10 +266,7 @@ func (e *IngressGatewayConfigEntry) Validate() error { declaredHosts := make(map[string]bool) serviceNames := make(map[ServiceID]struct{}) - for i, s := range listener.Services { - if err := validateInnerEnterpriseMeta(&s.EnterpriseMeta, &e.EnterpriseMeta); err != nil { - return fmt.Errorf("services[%d]: %w", i, err) - } + for _, s := range listener.Services { sn := NewServiceName(s.Name, &s.EnterpriseMeta) if err := s.RequestHeaders.Validate(listener.Protocol); err != nil { return fmt.Errorf("request headers %s (service %q on listener on port %d)", err, sn.String(), listener.Port) diff --git a/agent/structs/config_entry_intentions_oss.go b/agent/structs/config_entry_intentions_oss.go index 90a113a43..d2edadd27 100644 --- a/agent/structs/config_entry_intentions_oss.go +++ b/agent/structs/config_entry_intentions_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/config_entry_mesh_oss.go b/agent/structs/config_entry_mesh_oss.go index 8826c5181..087edc8e5 100644 --- a/agent/structs/config_entry_mesh_oss.go +++ b/agent/structs/config_entry_mesh_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/config_entry_oss.go b/agent/structs/config_entry_oss.go index 133d697c1..f7ccac38c 100644 --- a/agent/structs/config_entry_oss.go +++ b/agent/structs/config_entry_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs @@ -34,3 +35,7 @@ func validateUnusedKeys(unused []string) error { func validateInnerEnterpriseMeta(_, _ *EnterpriseMeta) error { return nil } + +func requireEnterprise(kind string) error { + return fmt.Errorf("Config entry kind %q requires Consul Enterprise", kind) +} diff --git a/agent/structs/config_entry_oss_test.go b/agent/structs/config_entry_oss_test.go index 8e2cc13e7..fbf0a9ebd 100644 --- a/agent/structs/config_entry_oss_test.go +++ b/agent/structs/config_entry_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index 294c9e40b..febe75012 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -1665,9 +1665,9 @@ func TestDecodeConfigEntry(t *testing.T) { }, }, { - name: "partition-exports", + name: "exported-services", snake: ` - kind = "partition-exports" + kind = "exported-services" name = "foo" meta { "foo" = "bar" @@ -1698,7 +1698,7 @@ func TestDecodeConfigEntry(t *testing.T) { ] `, camel: ` - Kind = "partition-exports" + Kind = "exported-services" Name = "foo" Meta { "foo" = "bar" @@ -1728,7 +1728,7 @@ func TestDecodeConfigEntry(t *testing.T) { } ] `, - expect: &PartitionExportsConfigEntry{ + expect: &ExportedServicesConfigEntry{ Name: "foo", Meta: map[string]string{ "foo": "bar", diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index e7e9822bc..9da766d75 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -86,11 +86,20 @@ type CARoot struct { NotBefore time.Time NotAfter time.Time - // RootCert is the PEM-encoded public certificate. + // RootCert is the PEM-encoded public certificate for the root CA. The + // certificate is the same for all federated clusters. RootCert string // IntermediateCerts is a list of PEM-encoded intermediate certs to - // attach to any leaf certs signed by this CA. + // attach to any leaf certs signed by this CA. The list may include a + // certificate cross-signed by an old root CA, any subordinate CAs below the + // root CA, and the intermediate CA used to sign leaf certificates in the + // local Datacenter. + // + // If the provider which created this root uses an intermediate to sign + // leaf certificates (Vault provider), or this is a secondary Datacenter then + // the intermediate used to sign leaf certificates will be the last in the + // list. IntermediateCerts []string // SigningCert is the PEM-encoded signing certificate and SigningKey @@ -481,6 +490,14 @@ type VaultCAProviderConfig struct { KeyFile string TLSServerName string TLSSkipVerify bool + + AuthMethod *VaultAuthMethod `alias:"auth_method"` +} + +type VaultAuthMethod struct { + Type string + MountPath string `alias:"mount_path"` + Params map[string]interface{} } type AWSCAProviderConfig struct { diff --git a/agent/structs/connect_oss.go b/agent/structs/connect_oss.go index 30e168104..ac4a6405f 100644 --- a/agent/structs/connect_oss.go +++ b/agent/structs/connect_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/connect_proxy_config_oss.go b/agent/structs/connect_proxy_config_oss.go index d930420fa..21dead1e7 100644 --- a/agent/structs/connect_proxy_config_oss.go +++ b/agent/structs/connect_proxy_config_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/discovery_chain_oss.go b/agent/structs/discovery_chain_oss.go index f8c44ac99..3b7f091c5 100644 --- a/agent/structs/discovery_chain_oss.go +++ b/agent/structs/discovery_chain_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/intention_oss.go b/agent/structs/intention_oss.go index 0c445b0a8..11a57e07d 100644 --- a/agent/structs/intention_oss.go +++ b/agent/structs/intention_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/protobuf_compat.go b/agent/structs/protobuf_compat.go index 667443c9e..090190e62 100644 --- a/agent/structs/protobuf_compat.go +++ b/agent/structs/protobuf_compat.go @@ -45,6 +45,8 @@ type QueryMetaCompat interface { GetConsistencyLevel() string SetConsistencyLevel(string) GetBackend() QueryBackend + GetResultsFilteredByACLs() bool + SetResultsFilteredByACLs(bool) } // GetToken helps implement the QueryOptionsCompat interface @@ -274,3 +276,15 @@ func (q *QueryMeta) SetConsistencyLevel(consistencyLevel string) { func (q *QueryMeta) GetBackend() QueryBackend { return q.Backend } + +// GetResultsFilteredByACLs is needed to implement the structs.QueryMetaCompat +// interface. +func (q *QueryMeta) GetResultsFilteredByACLs() bool { + return q.ResultsFilteredByACLs +} + +// SetResultsFilteredByACLs is needed to implement the structs.QueryMetaCompat +// interface. +func (q *QueryMeta) SetResultsFilteredByACLs(v bool) { + q.ResultsFilteredByACLs = v +} diff --git a/agent/structs/sanitize_oss.go b/agent/structs/sanitize_oss.go index 4e0c0b99f..2834ebf59 100644 --- a/agent/structs/sanitize_oss.go +++ b/agent/structs/sanitize_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/structs.go b/agent/structs/structs.go index aeb3d8e44..cd7733b9b 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -70,6 +70,9 @@ const ( ChunkingStateType = 29 FederationStateRequestType = 30 SystemMetadataRequestType = 31 + ServiceVirtualIPRequestType = 32 + FreeVirtualIPRequestType = 33 + KindServiceNamesType = 34 ) // if a new request type is added above it must be @@ -110,6 +113,9 @@ var requestTypeStrings = map[MessageType]string{ ChunkingStateType: "ChunkingState", FederationStateRequestType: "FederationState", SystemMetadataRequestType: "SystemMetadata", + ServiceVirtualIPRequestType: "ServiceVirtualIP", + FreeVirtualIPRequestType: "FreeVirtualIP", + KindServiceNamesType: "KindServiceName", } const ( @@ -127,7 +133,7 @@ const ( ServiceMaintPrefix = "_service_maintenance:" // The meta key prefix reserved for Consul's internal use - metaKeyReservedPrefix = "consul-" + MetaKeyReservedPrefix = "consul-" // metaMaxKeyPairs is maximum number of metadata key pairs allowed to be registered metaMaxKeyPairs = 64 @@ -148,6 +154,9 @@ const ( // MetaExternalSource is the metadata key used when a resource is managed by a source outside Consul like nomad/k8s MetaExternalSource = "external-source" + // TaggedAddressVirtualIP is the key used to store tagged virtual IPs generated by Consul. + TaggedAddressVirtualIP = "consul-virtual" + // MaxLockDelay provides a maximum LockDelay value for // a session. Any value above this will not be respected. MaxLockDelay = 60 * time.Second @@ -385,6 +394,11 @@ type QueryMeta struct { // Backend used to handle this query, either blocking-query or streaming. Backend QueryBackend + + // ResultsFilteredByACLs is true when some of the query's results were + // filtered out by enforcing ACLs. It may be false because nothing was + // removed, or because the endpoint does not yet support this flag. + ResultsFilteredByACLs bool } // RegisterRequest is used for the Catalog.Register endpoint @@ -449,9 +463,11 @@ func (r *RegisterRequest) ChangesNode(node *Node) bool { return false } -// DeregisterRequest is used for the Catalog.Deregister endpoint -// to deregister a node as providing a service. If no service is -// provided the entire node is deregistered. +// DeregisterRequest is used for the Catalog.Deregister endpoint to +// deregister a service, check, or node (only one should be provided). +// If ServiceID or CheckID are not provided, the entire node is deregistered. +// If a ServiceID is provided, any associated Checks with that service +// are also deregistered. type DeregisterRequest struct { Datacenter string Node string @@ -847,9 +863,9 @@ func validateMetaPair(key, value string, allowConsulPrefix bool, allowedConsulKe if len(key) > metaKeyMaxLength { return fmt.Errorf("Key is too long (limit: %d characters)", metaKeyMaxLength) } - if strings.HasPrefix(key, metaKeyReservedPrefix) { + if strings.HasPrefix(key, MetaKeyReservedPrefix) { if _, ok := allowedConsulKeys[key]; !allowConsulPrefix && !ok { - return fmt.Errorf("Key prefix '%s' is reserved for internal use", metaKeyReservedPrefix) + return fmt.Errorf("Key prefix '%s' is reserved for internal use", MetaKeyReservedPrefix) } } if len(value) > metaValueMaxLength { @@ -1017,6 +1033,13 @@ type ServiceNodes []*ServiceNode // ServiceKind is the kind of service being registered. type ServiceKind string +func (k ServiceKind) Normalized() string { + if k == ServiceKindTypical { + return "typical" + } + return string(k) +} + const ( // ServiceKindTypical is a typical, classic Consul service. This is // represented by the absence of a value. This was chosen for ease of @@ -1467,11 +1490,15 @@ func (s *NodeService) ToServiceNode(node string) *ServiceNode { } } +// NodeServices represents services provided by Node. +// Services is a map of service IDs to services. type NodeServices struct { Node *Node Services map[string]*NodeService } +// NodeServiceList represents services provided by Node. +// Services is a list of services. type NodeServiceList struct { Node *Node Services []*NodeService @@ -1865,6 +1892,18 @@ type CheckID struct { EnterpriseMeta } +// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer +// receiver for this method. Remove once that is fixed. +func (c CheckID) NamespaceOrDefault() string { + return c.EnterpriseMeta.NamespaceOrDefault() +} + +// PartitionOrDefault exists because structs.EnterpriseMeta uses a pointer +// receiver for this method. Remove once that is fixed. +func (c CheckID) PartitionOrDefault() string { + return c.EnterpriseMeta.PartitionOrDefault() +} + func NewCheckID(id types.CheckID, entMeta *EnterpriseMeta) CheckID { var cid CheckID cid.ID = id @@ -2233,6 +2272,11 @@ func (d *DirEntry) Equal(o *DirEntry) bool { d.Session == o.Session } +// IDValue implements the state.singleValueID interface for indexing. +func (d *DirEntry) IDValue() string { + return d.Key +} + type DirEntries []*DirEntry // KVSRequest is used to operate on the Key-Value store @@ -2320,6 +2364,11 @@ type ServiceCheck struct { Namespace string } +// IDValue implements the state.singleValueID interface for indexing. +func (s *Session) IDValue() string { + return s.ID +} + func (s *Session) UnmarshalJSON(data []byte) (err error) { type Alias Session aux := &struct { @@ -2581,6 +2630,7 @@ type KeyringResponse struct { WAN bool Datacenter string Segment string + Partition string `json:",omitempty"` Messages map[string]string `json:",omitempty"` Keys map[string]int PrimaryKeys map[string]int @@ -2588,6 +2638,10 @@ type KeyringResponse struct { Error string `json:",omitempty"` } +func (r *KeyringResponse) PartitionOrDefault() string { + return PartitionOrDefault(r.Partition) +} + // KeyringResponses holds multiple responses to keyring queries. Each // datacenter replies independently, and KeyringResponses is used as a // container for the set of all responses. diff --git a/agent/structs/structs_oss.go b/agent/structs/structs_oss.go index aa43c8fab..a003adc85 100644 --- a/agent/structs/structs_oss.go +++ b/agent/structs/structs_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/structs_oss_test.go b/agent/structs/structs_oss_test.go index 94793e268..28b6e3797 100644 --- a/agent/structs/structs_oss_test.go +++ b/agent/structs/structs_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package structs diff --git a/agent/structs/structs_test.go b/agent/structs/structs_test.go index 29d7692ef..72616fc02 100644 --- a/agent/structs/structs_test.go +++ b/agent/structs/structs_test.go @@ -1561,7 +1561,7 @@ func TestStructs_ValidateServiceAndNodeMetadata(t *testing.T) { }, "reserved key prefix denied": { map[string]string{ - metaKeyReservedPrefix + "key": "value1", + MetaKeyReservedPrefix + "key": "value1", }, false, "reserved for internal use", @@ -1570,7 +1570,7 @@ func TestStructs_ValidateServiceAndNodeMetadata(t *testing.T) { }, "reserved key prefix allowed": { map[string]string{ - metaKeyReservedPrefix + "key": "value1", + MetaKeyReservedPrefix + "key": "value1", }, true, "", @@ -1640,13 +1640,13 @@ func TestStructs_validateMetaPair(t *testing.T) { // key too long {longKey, "value", "Key is too long", false, nil}, // reserved prefix - {metaKeyReservedPrefix + "key", "value", "reserved for internal use", false, nil}, + {MetaKeyReservedPrefix + "key", "value", "reserved for internal use", false, nil}, // reserved prefix, allowed - {metaKeyReservedPrefix + "key", "value", "", true, nil}, + {MetaKeyReservedPrefix + "key", "value", "", true, nil}, // reserved prefix, not allowed via an allowlist - {metaKeyReservedPrefix + "bad", "value", "reserved for internal use", false, map[string]struct{}{metaKeyReservedPrefix + "good": {}}}, + {MetaKeyReservedPrefix + "bad", "value", "reserved for internal use", false, map[string]struct{}{MetaKeyReservedPrefix + "good": {}}}, // reserved prefix, allowed via an allowlist - {metaKeyReservedPrefix + "good", "value", "", true, map[string]struct{}{metaKeyReservedPrefix + "good": {}}}, + {MetaKeyReservedPrefix + "good", "value", "", true, map[string]struct{}{MetaKeyReservedPrefix + "good": {}}}, // value too long {"key", longValue, "Value is too long", false, nil}, } @@ -2449,10 +2449,11 @@ func TestSnapshotRequestResponse_MsgpackEncodeDecode(t *testing.T) { in := &SnapshotResponse{ Error: "blah", QueryMeta: QueryMeta{ - Index: 3, - LastContact: 5 * time.Second, - KnownLeader: true, - ConsistencyLevel: "default", + Index: 3, + LastContact: 5 * time.Second, + KnownLeader: true, + ConsistencyLevel: "default", + ResultsFilteredByACLs: true, }, } TestMsgpackEncodeDecode(t, in, true) diff --git a/agent/structs/system_metadata.go b/agent/structs/system_metadata.go index 7243d5fb0..dd90ba22c 100644 --- a/agent/structs/system_metadata.go +++ b/agent/structs/system_metadata.go @@ -28,6 +28,7 @@ const ( SystemMetadataIntentionFormatKey = "intention-format" SystemMetadataIntentionFormatConfigValue = "config-entry" SystemMetadataIntentionFormatLegacyValue = "legacy" + SystemMetadataVirtualIPsEnabled = "virtual-ips" ) type SystemMetadataEntry struct { diff --git a/agent/structs/testing_catalog.go b/agent/structs/testing_catalog.go index a701cbfc6..c9fcf017d 100644 --- a/agent/structs/testing_catalog.go +++ b/agent/structs/testing_catalog.go @@ -130,6 +130,9 @@ func TestNodeServiceMeshGatewayWithAddrs(t testing.T, address string, port int, TaggedAddressLAN: lanAddr, TaggedAddressWAN: wanAddr, }, + RaftIndex: RaftIndex{ + ModifyIndex: 1, + }, } } diff --git a/agent/testagent.go b/agent/testagent.go index b421f3cec..0119a612e 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -165,8 +165,7 @@ func (a *TestAgent) Start(t *testing.T) error { Name: name, }) - portsConfig, returnPortsFn := randomPortsSource(a.UseTLS) - t.Cleanup(returnPortsFn) + portsConfig := randomPortsSource(t, a.UseTLS) // Create NodeID outside the closure, so that it does not change testHCLConfig := TestConfigHCL(NodeID()) @@ -378,8 +377,8 @@ func (a *TestAgent) consulConfig() *consul.Config { // chance of port conflicts for concurrently executed test binaries. // Instead of relying on one set of ports to be sufficient we retry // starting the agent with different ports on port conflict. -func randomPortsSource(tls bool) (data string, returnPortsFn func()) { - ports := freeport.MustTake(7) +func randomPortsSource(t *testing.T, tls bool) string { + ports := freeport.GetN(t, 7) var http, https int if tls { @@ -400,7 +399,7 @@ func randomPortsSource(tls bool) (data string, returnPortsFn func()) { server = ` + strconv.Itoa(ports[5]) + ` grpc = ` + strconv.Itoa(ports[6]) + ` } - `, func() { freeport.Return(ports) } + ` } func NodeID() string { @@ -462,55 +461,62 @@ func TestConfig(logger hclog.Logger, sources ...config.Source) *config.RuntimeCo // with ACLs. func TestACLConfig() string { return ` - acl_datacenter = "dc1" - acl_default_policy = "deny" - acl_master_token = "root" - acl_agent_token = "root" - acl_agent_master_token = "towel" + primary_datacenter = "dc1" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + agent = "root" + agent_recovery = "towel" + } + } ` } const ( - TestDefaultMasterToken = "d9f05e83-a7ae-47ce-839e-c0d53a68c00a" - TestDefaultAgentMasterToken = "bca580d4-db07-4074-b766-48acc9676955'" + TestDefaultInitialManagementToken = "d9f05e83-a7ae-47ce-839e-c0d53a68c00a" + TestDefaultAgentRecoveryToken = "bca580d4-db07-4074-b766-48acc9676955'" ) type TestACLConfigParams struct { PrimaryDatacenter string DefaultPolicy string - MasterToken string + InitialManagementToken string AgentToken string DefaultToken string - AgentMasterToken string + AgentRecoveryToken string ReplicationToken string EnableTokenReplication bool } -func DefaulTestACLConfigParams() *TestACLConfigParams { +func DefaultTestACLConfigParams() *TestACLConfigParams { return &TestACLConfigParams{ - PrimaryDatacenter: "dc1", - DefaultPolicy: "deny", - MasterToken: TestDefaultMasterToken, - AgentToken: TestDefaultMasterToken, - AgentMasterToken: TestDefaultAgentMasterToken, + PrimaryDatacenter: "dc1", + DefaultPolicy: "deny", + InitialManagementToken: TestDefaultInitialManagementToken, + AgentToken: TestDefaultInitialManagementToken, + AgentRecoveryToken: TestDefaultAgentRecoveryToken, } } func (p *TestACLConfigParams) HasConfiguredTokens() bool { - return p.MasterToken != "" || + return p.InitialManagementToken != "" || p.AgentToken != "" || p.DefaultToken != "" || - p.AgentMasterToken != "" || + p.AgentRecoveryToken != "" || p.ReplicationToken != "" } func TestACLConfigNew() string { return TestACLConfigWithParams(&TestACLConfigParams{ - PrimaryDatacenter: "dc1", - DefaultPolicy: "deny", - MasterToken: "root", - AgentToken: "root", - AgentMasterToken: "towel", + PrimaryDatacenter: "dc1", + DefaultPolicy: "deny", + InitialManagementToken: "root", + AgentToken: "root", + AgentRecoveryToken: "towel", }) } @@ -526,14 +532,14 @@ var aclConfigTpl = template.Must(template.New("ACL Config").Parse(` enable_token_replication = {{printf "%t" .EnableTokenReplication }} {{- if .HasConfiguredTokens}} tokens { - {{- if ne .MasterToken ""}} - master = "{{ .MasterToken }}" + {{- if ne .InitialManagementToken ""}} + initial_management = "{{ .InitialManagementToken }}" {{- end}} {{- if ne .AgentToken ""}} agent = "{{ .AgentToken }}" {{- end}} - {{- if ne .AgentMasterToken "" }} - agent_master = "{{ .AgentMasterToken }}" + {{- if ne .AgentRecoveryToken "" }} + agent_recovery = "{{ .AgentRecoveryToken }}" {{- end}} {{- if ne .DefaultToken "" }} default = "{{ .DefaultToken }}" @@ -551,7 +557,7 @@ func TestACLConfigWithParams(params *TestACLConfigParams) string { cfg := params if params == nil { - cfg = DefaulTestACLConfigParams() + cfg = DefaultTestACLConfigParams() } err := aclConfigTpl.Execute(&buf, &cfg) diff --git a/agent/token/persistence.go b/agent/token/persistence.go index c36b90364..c78e22891 100644 --- a/agent/token/persistence.go +++ b/agent/token/persistence.go @@ -17,12 +17,12 @@ type Logger interface { // Config used by Store.Load, which includes tokens and settings for persistence. type Config struct { - EnablePersistence bool - DataDir string - ACLDefaultToken string - ACLAgentToken string - ACLAgentMasterToken string - ACLReplicationToken string + EnablePersistence bool + DataDir string + ACLDefaultToken string + ACLAgentToken string + ACLAgentRecoveryToken string + ACLReplicationToken string EnterpriseConfig } @@ -69,10 +69,10 @@ func (t *Store) WithPersistenceLock(f func() error) error { } type persistedTokens struct { - Replication string `json:"replication,omitempty"` - AgentMaster string `json:"agent_master,omitempty"` - Default string `json:"default,omitempty"` - Agent string `json:"agent,omitempty"` + Replication string `json:"replication,omitempty"` + AgentRecovery string `json:"agent_recovery,omitempty"` + Default string `json:"default,omitempty"` + Agent string `json:"agent,omitempty"` } type fileStore struct { @@ -110,14 +110,14 @@ func loadTokens(s *Store, cfg Config, tokens persistedTokens, logger Logger) { s.UpdateAgentToken(cfg.ACLAgentToken, TokenSourceConfig) } - if tokens.AgentMaster != "" { - s.UpdateAgentMasterToken(tokens.AgentMaster, TokenSourceAPI) + if tokens.AgentRecovery != "" { + s.UpdateAgentRecoveryToken(tokens.AgentRecovery, TokenSourceAPI) - if cfg.ACLAgentMasterToken != "" { - logger.Warn("\"agent_master\" token present in both the configuration and persisted token store, using the persisted token") + if cfg.ACLAgentRecoveryToken != "" { + logger.Warn("\"agent_recovery\" token present in both the configuration and persisted token store, using the persisted token") } } else { - s.UpdateAgentMasterToken(cfg.ACLAgentMasterToken, TokenSourceConfig) + s.UpdateAgentRecoveryToken(cfg.ACLAgentRecoveryToken, TokenSourceConfig) } if tokens.Replication != "" { @@ -134,22 +134,32 @@ func loadTokens(s *Store, cfg Config, tokens persistedTokens, logger Logger) { } func readPersistedFromFile(filename string) (persistedTokens, error) { - tokens := persistedTokens{} + var tokens struct { + persistedTokens + + // Support reading tokens persisted by versions <1.11, where agent_master was + // renamed to agent_recovery. + LegacyAgentMaster string `json:"agent_master"` + } buf, err := ioutil.ReadFile(filename) switch { case os.IsNotExist(err): // non-existence is not an error we care about - return tokens, nil + return tokens.persistedTokens, nil case err != nil: - return tokens, fmt.Errorf("failed reading tokens file %q: %w", filename, err) + return tokens.persistedTokens, fmt.Errorf("failed reading tokens file %q: %w", filename, err) } if err := json.Unmarshal(buf, &tokens); err != nil { - return tokens, fmt.Errorf("failed to decode tokens file %q: %w", filename, err) + return tokens.persistedTokens, fmt.Errorf("failed to decode tokens file %q: %w", filename, err) } - return tokens, nil + if tokens.AgentRecovery == "" { + tokens.AgentRecovery = tokens.LegacyAgentMaster + } + + return tokens.persistedTokens, nil } func (p *fileStore) withPersistenceLock(s *Store, f func() error) error { @@ -170,8 +180,8 @@ func (p *fileStore) saveToFile(s *Store) error { tokens.Agent = tok } - if tok, source := s.AgentMasterTokenAndSource(); tok != "" && source == TokenSourceAPI { - tokens.AgentMaster = tok + if tok, source := s.AgentRecoveryTokenAndSource(); tok != "" && source == TokenSourceAPI { + tokens.AgentRecovery = tok } if tok, source := s.ReplicationTokenAndSource(); tok != "" && source == TokenSourceAPI { diff --git a/agent/token/persistence_test.go b/agent/token/persistence_test.go index ec8e7e60e..1bfe971fd 100644 --- a/agent/token/persistence_test.go +++ b/agent/token/persistence_test.go @@ -18,47 +18,47 @@ func TestStore_Load(t *testing.T) { t.Run("with empty store", func(t *testing.T) { cfg := Config{ - DataDir: dataDir, - ACLAgentToken: "alfa", - ACLAgentMasterToken: "bravo", - ACLDefaultToken: "charlie", - ACLReplicationToken: "delta", + DataDir: dataDir, + ACLAgentToken: "alfa", + ACLAgentRecoveryToken: "bravo", + ACLDefaultToken: "charlie", + ACLReplicationToken: "delta", } require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "alfa", store.AgentToken()) - require.Equal(t, "bravo", store.AgentMasterToken()) + require.Equal(t, "bravo", store.AgentRecoveryToken()) require.Equal(t, "charlie", store.UserToken()) require.Equal(t, "delta", store.ReplicationToken()) }) t.Run("updated from Config", func(t *testing.T) { cfg := Config{ - DataDir: dataDir, - ACLDefaultToken: "echo", - ACLAgentToken: "foxtrot", - ACLAgentMasterToken: "golf", - ACLReplicationToken: "hotel", + DataDir: dataDir, + ACLDefaultToken: "echo", + ACLAgentToken: "foxtrot", + ACLAgentRecoveryToken: "golf", + ACLReplicationToken: "hotel", } // ensures no error for missing persisted tokens file require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "echo", store.UserToken()) require.Equal(t, "foxtrot", store.AgentToken()) - require.Equal(t, "golf", store.AgentMasterToken()) + require.Equal(t, "golf", store.AgentRecoveryToken()) require.Equal(t, "hotel", store.ReplicationToken()) }) t.Run("with persisted tokens", func(t *testing.T) { cfg := Config{ - DataDir: dataDir, - ACLDefaultToken: "echo", - ACLAgentToken: "foxtrot", - ACLAgentMasterToken: "golf", - ACLReplicationToken: "hotel", + DataDir: dataDir, + ACLDefaultToken: "echo", + ACLAgentToken: "foxtrot", + ACLAgentRecoveryToken: "golf", + ACLReplicationToken: "hotel", } tokens := `{ "agent" : "india", - "agent_master" : "juliett", + "agent_recovery" : "juliett", "default": "kilo", "replication" : "lima" }` @@ -69,14 +69,14 @@ func TestStore_Load(t *testing.T) { // no updates since token persistence is not enabled require.Equal(t, "echo", store.UserToken()) require.Equal(t, "foxtrot", store.AgentToken()) - require.Equal(t, "golf", store.AgentMasterToken()) + require.Equal(t, "golf", store.AgentRecoveryToken()) require.Equal(t, "hotel", store.ReplicationToken()) cfg.EnablePersistence = true require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "india", store.AgentToken()) - require.Equal(t, "juliett", store.AgentMasterToken()) + require.Equal(t, "juliett", store.AgentRecoveryToken()) require.Equal(t, "kilo", store.UserToken()) require.Equal(t, "lima", store.ReplicationToken()) @@ -84,28 +84,42 @@ func TestStore_Load(t *testing.T) { require.NotNil(t, store.persistence) }) + t.Run("persisted tokens include pre-1.11 agent_master naming", func(t *testing.T) { + cfg := Config{ + EnablePersistence: true, + DataDir: dataDir, + ACLAgentRecoveryToken: "golf", + } + + tokens := `{"agent_master": "juliett"}` + require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) + require.NoError(t, store.Load(cfg, logger)) + + require.Equal(t, "juliett", store.AgentRecoveryToken()) + }) + t.Run("with persisted tokens, persisted tokens override config", func(t *testing.T) { tokens := `{ "agent" : "mike", - "agent_master" : "november", + "agent_recovery" : "november", "default": "oscar", "replication" : "papa" }` cfg := Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "quebec", - ACLAgentToken: "romeo", - ACLAgentMasterToken: "sierra", - ACLReplicationToken: "tango", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "quebec", + ACLAgentToken: "romeo", + ACLAgentRecoveryToken: "sierra", + ACLReplicationToken: "tango", } require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "mike", store.AgentToken()) - require.Equal(t, "november", store.AgentMasterToken()) + require.Equal(t, "november", store.AgentRecoveryToken()) require.Equal(t, "oscar", store.UserToken()) require.Equal(t, "papa", store.ReplicationToken()) }) @@ -113,35 +127,35 @@ func TestStore_Load(t *testing.T) { t.Run("with some persisted tokens", func(t *testing.T) { tokens := `{ "agent" : "uniform", - "agent_master" : "victor" + "agent_recovery" : "victor" }` cfg := Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "whiskey", - ACLAgentToken: "xray", - ACLAgentMasterToken: "yankee", - ACLReplicationToken: "zulu", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "whiskey", + ACLAgentToken: "xray", + ACLAgentRecoveryToken: "yankee", + ACLReplicationToken: "zulu", } require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "uniform", store.AgentToken()) - require.Equal(t, "victor", store.AgentMasterToken()) + require.Equal(t, "victor", store.AgentRecoveryToken()) require.Equal(t, "whiskey", store.UserToken()) require.Equal(t, "zulu", store.ReplicationToken()) }) t.Run("persisted file contains invalid data", func(t *testing.T) { cfg := Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "one", - ACLAgentToken: "two", - ACLAgentMasterToken: "three", - ACLReplicationToken: "four", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "one", + ACLAgentToken: "two", + ACLAgentRecoveryToken: "three", + ACLReplicationToken: "four", } require.NoError(t, ioutil.WriteFile(tokenFile, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, 0600)) @@ -151,18 +165,18 @@ func TestStore_Load(t *testing.T) { require.Equal(t, "one", store.UserToken()) require.Equal(t, "two", store.AgentToken()) - require.Equal(t, "three", store.AgentMasterToken()) + require.Equal(t, "three", store.AgentRecoveryToken()) require.Equal(t, "four", store.ReplicationToken()) }) t.Run("persisted file contains invalid json", func(t *testing.T) { cfg := Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "alfa", - ACLAgentToken: "bravo", - ACLAgentMasterToken: "charlie", - ACLReplicationToken: "foxtrot", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "alfa", + ACLAgentToken: "bravo", + ACLAgentRecoveryToken: "charlie", + ACLReplicationToken: "foxtrot", } require.NoError(t, ioutil.WriteFile(tokenFile, []byte("[1,2,3]"), 0600)) @@ -172,7 +186,7 @@ func TestStore_Load(t *testing.T) { require.Equal(t, "alfa", store.UserToken()) require.Equal(t, "bravo", store.AgentToken()) - require.Equal(t, "charlie", store.AgentMasterToken()) + require.Equal(t, "charlie", store.AgentRecoveryToken()) require.Equal(t, "foxtrot", store.ReplicationToken()) }) } @@ -181,12 +195,12 @@ func TestStore_WithPersistenceLock(t *testing.T) { dataDir := testutil.TempDir(t, "datadir") store := new(Store) cfg := Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "default-token", - ACLAgentToken: "agent-token", - ACLAgentMasterToken: "master-token", - ACLReplicationToken: "replication-token", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "default-token", + ACLAgentToken: "agent-token", + ACLAgentRecoveryToken: "recovery-token", + ACLReplicationToken: "replication-token", } err := store.Load(cfg, hclog.New(nil)) require.NoError(t, err) @@ -195,7 +209,7 @@ func TestStore_WithPersistenceLock(t *testing.T) { updated := store.UpdateUserToken("the-new-token", TokenSourceAPI) require.True(t, updated) - updated = store.UpdateAgentMasterToken("the-new-master-token", TokenSourceAPI) + updated = store.UpdateAgentRecoveryToken("the-new-recovery-token", TokenSourceAPI) require.True(t, updated) return nil } @@ -206,8 +220,8 @@ func TestStore_WithPersistenceLock(t *testing.T) { tokens, err := readPersistedFromFile(filepath.Join(dataDir, tokensPath)) require.NoError(t, err) expected := persistedTokens{ - Default: "the-new-token", - AgentMaster: "the-new-master-token", + Default: "the-new-token", + AgentRecovery: "the-new-recovery-token", } require.Equal(t, expected, tokens) } diff --git a/agent/token/store.go b/agent/token/store.go index 456190f70..ec2bac38f 100644 --- a/agent/token/store.go +++ b/agent/token/store.go @@ -17,7 +17,7 @@ type TokenKind int const ( TokenKindAgent TokenKind = iota - TokenKindAgentMaster + TokenKindAgentRecovery TokenKindUser TokenKindReplication ) @@ -59,13 +59,13 @@ type Store struct { // agentTokenSource indicates where this token originated from agentTokenSource TokenSource - // agentMasterToken is a special token that's only used locally for + // agentRecoveryToken is a special token that's only used locally for // access to the /v1/agent utility operations if the servers aren't // available. - agentMasterToken string + agentRecoveryToken string - // agentMasterTokenSource indicates where this token originated from - agentMasterTokenSource TokenSource + // agentRecoveryTokenSource indicates where this token originated from + agentRecoveryTokenSource TokenSource // replicationToken is a special token that's used by servers to // replicate data from the primary datacenter. @@ -188,15 +188,15 @@ func (t *Store) UpdateAgentToken(token string, source TokenSource) bool { return changed } -// UpdateAgentMasterToken replaces the current agent master token in the store. +// UpdateAgentRecoveryToken replaces the current agent recovery token in the store. // Returns true if it was changed. -func (t *Store) UpdateAgentMasterToken(token string, source TokenSource) bool { +func (t *Store) UpdateAgentRecoveryToken(token string, source TokenSource) bool { t.l.Lock() - changed := t.agentMasterToken != token || t.agentMasterTokenSource != source - t.agentMasterToken = token - t.agentMasterTokenSource = source + changed := t.agentRecoveryToken != token || t.agentRecoveryTokenSource != source + t.agentRecoveryToken = token + t.agentRecoveryTokenSource = source if changed { - t.sendNotificationLocked(TokenKindAgentMaster) + t.sendNotificationLocked(TokenKindAgentRecovery) } t.l.Unlock() return changed @@ -239,11 +239,11 @@ func (t *Store) AgentToken() string { return t.userToken } -func (t *Store) AgentMasterToken() string { +func (t *Store) AgentRecoveryToken() string { t.l.RLock() defer t.l.RUnlock() - return t.agentMasterToken + return t.agentRecoveryToken } // ReplicationToken returns the replication token. @@ -270,11 +270,11 @@ func (t *Store) AgentTokenAndSource() (string, TokenSource) { return t.agentToken, t.agentTokenSource } -func (t *Store) AgentMasterTokenAndSource() (string, TokenSource) { +func (t *Store) AgentRecoveryTokenAndSource() (string, TokenSource) { t.l.RLock() defer t.l.RUnlock() - return t.agentMasterToken, t.agentMasterTokenSource + return t.agentRecoveryToken, t.agentRecoveryTokenSource } // ReplicationToken returns the replication token. @@ -285,11 +285,11 @@ func (t *Store) ReplicationTokenAndSource() (string, TokenSource) { return t.replicationToken, t.replicationTokenSource } -// IsAgentMasterToken checks to see if a given token is the agent master token. +// IsAgentRecoveryToken checks to see if a given token is the agent recovery token. // This will never match an empty token for safety. -func (t *Store) IsAgentMasterToken(token string) bool { +func (t *Store) IsAgentRecoveryToken(token string) bool { t.l.RLock() defer t.l.RUnlock() - return (token != "") && (subtle.ConstantTimeCompare([]byte(token), []byte(t.agentMasterToken)) == 1) + return (token != "") && (subtle.ConstantTimeCompare([]byte(token), []byte(t.agentRecoveryToken)) == 1) } diff --git a/agent/token/store_oss.go b/agent/token/store_oss.go index 16123052e..ef8b5336d 100644 --- a/agent/token/store_oss.go +++ b/agent/token/store_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package token diff --git a/agent/token/store_test.go b/agent/token/store_test.go index 6df812257..06b44558d 100644 --- a/agent/token/store_test.go +++ b/agent/token/store_test.go @@ -8,14 +8,14 @@ import ( func TestStore_RegularTokens(t *testing.T) { type tokens struct { - userSource TokenSource - user string - agent string - agentSource TokenSource - master string - masterSource TokenSource - repl string - replSource TokenSource + userSource TokenSource + user string + agent string + agentSource TokenSource + recovery string + recoverySource TokenSource + repl string + replSource TokenSource } tests := []struct { @@ -67,22 +67,22 @@ func TestStore_RegularTokens(t *testing.T) { effective: tokens{repl: "R"}, }, { - name: "set master - config", - set: tokens{master: "M", masterSource: TokenSourceConfig}, - raw: tokens{master: "M", masterSource: TokenSourceConfig}, - effective: tokens{master: "M"}, + name: "set recovery - config", + set: tokens{recovery: "M", recoverySource: TokenSourceConfig}, + raw: tokens{recovery: "M", recoverySource: TokenSourceConfig}, + effective: tokens{recovery: "M"}, }, { - name: "set master - api", - set: tokens{master: "M", masterSource: TokenSourceAPI}, - raw: tokens{master: "M", masterSource: TokenSourceAPI}, - effective: tokens{master: "M"}, + name: "set recovery - api", + set: tokens{recovery: "M", recoverySource: TokenSourceAPI}, + raw: tokens{recovery: "M", recoverySource: TokenSourceAPI}, + effective: tokens{recovery: "M"}, }, { name: "set all", - set: tokens{user: "U", agent: "A", repl: "R", master: "M"}, - raw: tokens{user: "U", agent: "A", repl: "R", master: "M"}, - effective: tokens{user: "U", agent: "A", repl: "R", master: "M"}, + set: tokens{user: "U", agent: "A", repl: "R", recovery: "M"}, + raw: tokens{user: "U", agent: "A", repl: "R", recovery: "M"}, + effective: tokens{user: "U", agent: "A", repl: "R", recovery: "M"}, }, } for _, tt := range tests { @@ -100,19 +100,19 @@ func TestStore_RegularTokens(t *testing.T) { require.True(t, s.UpdateReplicationToken(tt.set.repl, tt.set.replSource)) } - if tt.set.master != "" { - require.True(t, s.UpdateAgentMasterToken(tt.set.master, tt.set.masterSource)) + if tt.set.recovery != "" { + require.True(t, s.UpdateAgentRecoveryToken(tt.set.recovery, tt.set.recoverySource)) } // If they don't change then they return false. require.False(t, s.UpdateUserToken(tt.set.user, tt.set.userSource)) require.False(t, s.UpdateAgentToken(tt.set.agent, tt.set.agentSource)) require.False(t, s.UpdateReplicationToken(tt.set.repl, tt.set.replSource)) - require.False(t, s.UpdateAgentMasterToken(tt.set.master, tt.set.masterSource)) + require.False(t, s.UpdateAgentRecoveryToken(tt.set.recovery, tt.set.recoverySource)) require.Equal(t, tt.effective.user, s.UserToken()) require.Equal(t, tt.effective.agent, s.AgentToken()) - require.Equal(t, tt.effective.master, s.AgentMasterToken()) + require.Equal(t, tt.effective.recovery, s.AgentRecoveryToken()) require.Equal(t, tt.effective.repl, s.ReplicationToken()) tok, src := s.UserTokenAndSource() @@ -123,9 +123,9 @@ func TestStore_RegularTokens(t *testing.T) { require.Equal(t, tt.raw.agent, tok) require.Equal(t, tt.raw.agentSource, src) - tok, src = s.AgentMasterTokenAndSource() - require.Equal(t, tt.raw.master, tok) - require.Equal(t, tt.raw.masterSource, src) + tok, src = s.AgentRecoveryTokenAndSource() + require.Equal(t, tt.raw.recovery, tok) + require.Equal(t, tt.raw.recoverySource, src) tok, src = s.ReplicationTokenAndSource() require.Equal(t, tt.raw.repl, tok) @@ -134,27 +134,27 @@ func TestStore_RegularTokens(t *testing.T) { } } -func TestStore_AgentMasterToken(t *testing.T) { +func TestStore_AgentRecoveryToken(t *testing.T) { s := new(Store) verify := func(want bool, toks ...string) { for _, tok := range toks { - require.Equal(t, want, s.IsAgentMasterToken(tok)) + require.Equal(t, want, s.IsAgentRecoveryToken(tok)) } } verify(false, "", "nope") - s.UpdateAgentMasterToken("master", TokenSourceConfig) - verify(true, "master") + s.UpdateAgentRecoveryToken("recovery", TokenSourceConfig) + verify(true, "recovery") verify(false, "", "nope") - s.UpdateAgentMasterToken("another", TokenSourceConfig) + s.UpdateAgentRecoveryToken("another", TokenSourceConfig) verify(true, "another") - verify(false, "", "nope", "master") + verify(false, "", "nope", "recovery") - s.UpdateAgentMasterToken("", TokenSourceConfig) - verify(false, "", "nope", "master", "another") + s.UpdateAgentRecoveryToken("", TokenSourceConfig) + verify(false, "", "nope", "recovery", "another") } func TestStore_Notify(t *testing.T) { @@ -180,7 +180,7 @@ func TestStore_Notify(t *testing.T) { agentNotifier := newNotification(t, s, TokenKindAgent) userNotifier := newNotification(t, s, TokenKindUser) - agentMasterNotifier := newNotification(t, s, TokenKindAgentMaster) + agentRecoveryNotifier := newNotification(t, s, TokenKindAgentRecovery) replicationNotifier := newNotification(t, s, TokenKindReplication) replicationNotifier2 := newNotification(t, s, TokenKindReplication) @@ -193,7 +193,7 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentNotifier.Ch) requireNotifiedOnce(t, userNotifier.Ch) requireNotNotified(t, replicationNotifier.Ch) - requireNotNotified(t, agentMasterNotifier.Ch) + requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) // now update the agent token which should send notificaitons to the agent and all notifier @@ -202,16 +202,16 @@ func TestStore_Notify(t *testing.T) { requireNotifiedOnce(t, agentNotifier.Ch) requireNotNotified(t, userNotifier.Ch) requireNotNotified(t, replicationNotifier.Ch) - requireNotNotified(t, agentMasterNotifier.Ch) + requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) - // now update the agent master token which should send notificaitons to the agent master and all notifier - require.True(t, s.UpdateAgentMasterToken("789badc8-f850-43e1-8742-9b9f484957cc", TokenSourceAPI)) + // now update the agent recovery token which should send notificaitons to the agent recovery and all notifier + require.True(t, s.UpdateAgentRecoveryToken("789badc8-f850-43e1-8742-9b9f484957cc", TokenSourceAPI)) requireNotNotified(t, agentNotifier.Ch) requireNotNotified(t, userNotifier.Ch) requireNotNotified(t, replicationNotifier.Ch) - requireNotifiedOnce(t, agentMasterNotifier.Ch) + requireNotifiedOnce(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) // now update the replication token which should send notificaitons to the replication and all notifier @@ -220,7 +220,7 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentNotifier.Ch) requireNotNotified(t, userNotifier.Ch) requireNotifiedOnce(t, replicationNotifier.Ch) - requireNotNotified(t, agentMasterNotifier.Ch) + requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotifiedOnce(t, replicationNotifier2.Ch) s.StopNotify(replicationNotifier2) @@ -231,12 +231,12 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentNotifier.Ch) requireNotNotified(t, userNotifier.Ch) requireNotifiedOnce(t, replicationNotifier.Ch) - requireNotNotified(t, agentMasterNotifier.Ch) + requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) // request updates but that are not changes require.False(t, s.UpdateAgentToken("5d748ec2-d536-461f-8e2a-1f7eae98d559", TokenSourceAPI)) - require.False(t, s.UpdateAgentMasterToken("789badc8-f850-43e1-8742-9b9f484957cc", TokenSourceAPI)) + require.False(t, s.UpdateAgentRecoveryToken("789badc8-f850-43e1-8742-9b9f484957cc", TokenSourceAPI)) require.False(t, s.UpdateUserToken("47788919-f944-476a-bda5-446d64be1df8", TokenSourceAPI)) require.False(t, s.UpdateReplicationToken("eb0b56b9-fa65-4ae1-902a-c64457c62ac6", TokenSourceAPI)) @@ -244,5 +244,5 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentNotifier.Ch) requireNotNotified(t, userNotifier.Ch) requireNotNotified(t, replicationNotifier.Ch) - requireNotNotified(t, agentMasterNotifier.Ch) + requireNotNotified(t, agentRecoveryNotifier.Ch) } diff --git a/agent/txn_endpoint_test.go b/agent/txn_endpoint_test.go index b3770fb16..1d6d3f01b 100644 --- a/agent/txn_endpoint_test.go +++ b/agent/txn_endpoint_test.go @@ -495,7 +495,7 @@ func TestTxnEndpoint_KV_Actions(t *testing.T) { if resp.Code != 409 { t.Fatalf("expected 409, got %d", resp.Code) } - if !bytes.Contains(resp.Body.Bytes(), []byte("failed session lookup")) { + if !bytes.Contains(resp.Body.Bytes(), []byte("invalid session")) { t.Fatalf("bad: %s", resp.Body.String()) } }) diff --git a/agent/ui_endpoint_oss_test.go b/agent/ui_endpoint_oss_test.go index fc2f01814..2022c32c6 100644 --- a/agent/ui_endpoint_oss_test.go +++ b/agent/ui_endpoint_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package agent diff --git a/agent/xds/endpoints.go b/agent/xds/endpoints.go index 57c014995..e8521670c 100644 --- a/agent/xds/endpoints.go +++ b/agent/xds/endpoints.go @@ -123,13 +123,56 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C continue } - endpoints, ok := cfgSnap.MeshGateway.GatewayGroups[key.String()] - if !ok { - endpoints, ok = cfgSnap.MeshGateway.FedStateGateways[key.String()] - if !ok { // not possible - s.Logger.Error("skipping mesh gateway endpoints because no definition found", "datacenter", key) - continue + // Mesh gateways in remote DCs are discovered in two ways: + // + // 1. Via an Internal.ServiceDump RPC in the remote DC (GatewayGroups). + // 2. In the federation state that is replicated from the primary DC (FedStateGateways). + // + // We determine which set to use based on whichever contains the highest + // raft ModifyIndex (and is therefore most up-to-date). + // + // Previously, GatewayGroups was always given presedence over FedStateGateways + // but this was problematic when using mesh gateways for WAN federation. + // + // Consider the following example: + // + // - Primary and Secondary DCs are WAN Federated via local mesh gateways. + // + // - Secondary DC's mesh gateway is running on an ephemeral compute instance + // and is abruptly terminated and rescheduled with a *new IP address*. + // + // - Primary DC's mesh gateway is no longer able to connect to the Secondary + // DC as its proxy is configured with the old IP address. Therefore any RPC + // from the Primary to the Secondary DC will fail (including the one to + // discover the gateway's new IP address). + // + // - Secondary DC performs its regular anti-entropy of federation state data + // to the Primary DC (this succeeds as there is still connectivity in this + // direction). + // + // - At this point the Primary DC's mesh gateway should observe the new IP + // address and reconfigure its proxy, however as we always prioritised + // GatewayGroups this didn't happen and the connection remained severed. + maxModifyIndex := func(vals structs.CheckServiceNodes) uint64 { + var max uint64 + for _, v := range vals { + if i := v.Service.RaftIndex.ModifyIndex; i > max { + max = i + } } + return max + } + + endpoints := cfgSnap.MeshGateway.GatewayGroups[key.String()] + fedStateEndpoints := cfgSnap.MeshGateway.FedStateGateways[key.String()] + + if maxModifyIndex(fedStateEndpoints) > maxModifyIndex(endpoints) { + endpoints = fedStateEndpoints + } + + if len(endpoints) == 0 { + s.Logger.Error("skipping mesh gateway endpoints because no definition found", "datacenter", key) + continue } { // standard connect diff --git a/agent/xds/endpoints_test.go b/agent/xds/endpoints_test.go index e9a982be0..a9f9f0da9 100644 --- a/agent/xds/endpoints_test.go +++ b/agent/xds/endpoints_test.go @@ -245,6 +245,14 @@ func TestEndpointsFromSnapshot(t *testing.T) { create: proxycfg.TestConfigSnapshotMeshGatewayUsingFederationStates, setup: nil, }, + { + name: "mesh-gateway-newer-information-in-federation-states", + create: proxycfg.TestConfigSnapshotMeshGatewayNewerInformationInFederationStates, + }, + { + name: "mesh-gateway-older-information-in-federation-states", + create: proxycfg.TestConfigSnapshotMeshGatewayOlderInformationInFederationStates, + }, { name: "mesh-gateway-no-services", create: proxycfg.TestConfigSnapshotMeshGatewayNoServices, diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 276ea58f7..4cb85aea0 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -107,17 +107,31 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. continue } + // RDS, Envoy's Route Discovery Service, is only used for HTTP services with a customized discovery chain. + useRDS := chain.Protocol != "tcp" && !chain.IsDefault() + + var clusterName string + if !useRDS { + // When not using RDS we must generate a cluster name to attach to the filter chain. + // With RDS, cluster names get attached to the dynamic routes instead. + target, err := simpleChainTarget(chain) + if err != nil { + return nil, err + } + clusterName = CustomizeClusterName(target.Name, chain) + } + + filterName := fmt.Sprintf("%s.%s.%s.%s", chain.ServiceName, chain.Namespace, chain.Partition, chain.Datacenter) + // Generate the upstream listeners for when they are explicitly set with a local bind port or socket path - if outboundListener == nil || (upstreamCfg != nil && upstreamCfg.HasLocalPortOrSocket()) { - filterChain, err := s.makeUpstreamFilterChainForDiscoveryChain( - id, - "", - cfg.Protocol, - upstreamCfg, - chain, - cfgSnap, - nil, - ) + if upstreamCfg != nil && upstreamCfg.HasLocalPortOrSocket() { + filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{ + routeName: id, + clusterName: clusterName, + filterName: filterName, + protocol: cfg.Protocol, + useRDS: useRDS, + }) if err != nil { return nil, err } @@ -135,15 +149,14 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. // The rest of this loop is used exclusively for transparent proxies. // Below we create a filter chain per upstream, rather than a listener per upstream // as we do for explicit upstreams above. - filterChain, err := s.makeUpstreamFilterChainForDiscoveryChain( - id, - "", - cfg.Protocol, - upstreamCfg, - chain, - cfgSnap, - nil, - ) + + filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{ + routeName: id, + clusterName: clusterName, + filterName: filterName, + protocol: cfg.Protocol, + useRDS: useRDS, + }) if err != nil { return nil, err } @@ -155,13 +168,22 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. // We do not match on all endpoints here since it would lead to load balancing across // all instances when any instance address is dialed. for _, e := range endpoints { - if vip := e.Service.TaggedAddresses[virtualIPTag]; vip.Address != "" { + if vip := e.Service.TaggedAddresses[structs.TaggedAddressVirtualIP]; vip.Address != "" { uniqueAddrs[vip.Address] = struct{}{} } + + // The virtualIPTag is used by consul-k8s to store the ClusterIP for a service. + // We only match on this virtual IP if the upstream is in the proxy's partition. + // This is because the IP is not guaranteed to be unique across k8s clusters. + if structs.EqualPartitions(e.Node.PartitionOrDefault(), cfgSnap.ProxyID.PartitionOrDefault()) { + if vip := e.Service.TaggedAddresses[virtualIPTag]; vip.Address != "" { + uniqueAddrs[vip.Address] = struct{}{} + } + } } - if len(uniqueAddrs) > 1 { - s.Logger.Warn("detected multiple virtual IPs for an upstream, all will be used to match traffic", - "upstream", id) + if len(uniqueAddrs) > 2 { + s.Logger.Debug("detected multiple virtual IPs for an upstream, all will be used to match traffic", + "upstream", id, "ip_count", len(uniqueAddrs)) } // For every potential address we collected, create the appropriate address prefix to match on. @@ -188,17 +210,13 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. DestinationPartition: sn.PartitionOrDefault(), } - filterChain, err := s.makeUpstreamFilterChainForDiscoveryChain( - "", - "passthrough~"+passthrough.SNI, + filterName := fmt.Sprintf("%s.%s.%s.%s", u.DestinationName, u.DestinationNamespace, u.DestinationPartition, cfgSnap.Datacenter) - // TODO(tproxy) This should use the protocol configured on the upstream's config entry - "tcp", - &u, - nil, - cfgSnap, - nil, - ) + filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{ + clusterName: "passthrough~" + passthrough.SNI, + filterName: filterName, + protocol: "tcp", + }) if err != nil { return nil, err } @@ -219,15 +237,11 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. if cfgSnap.ConnectProxy.MeshConfig == nil || !cfgSnap.ConnectProxy.MeshConfig.TransparentProxy.MeshDestinationsOnly { - filterChain, err := s.makeUpstreamFilterChainForDiscoveryChain( - "", - OriginalDestinationClusterName, - "tcp", - nil, - nil, - cfgSnap, - nil, - ) + filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{ + clusterName: OriginalDestinationClusterName, + filterName: OriginalDestinationClusterName, + protocol: "tcp", + }) if err != nil { return nil, err } @@ -268,15 +282,13 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. upstreamListener := makeListener(id, u, envoy_core_v3.TrafficDirection_OUTBOUND) - filterChain, err := s.makeUpstreamFilterChainForDiscoveryChain( - id, - "", - cfg.Protocol, - u, - nil, - cfgSnap, - nil, - ) + filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{ + // TODO (SNI partition) add partition for upstream SNI + clusterName: connect.UpstreamSNI(u, "", cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain), + filterName: id, + routeName: id, + protocol: cfg.Protocol, + }) if err != nil { return nil, err } @@ -1212,112 +1224,32 @@ func (s *ResourceGenerator) makeMeshGatewayListener(name, addr string, port int, return l, nil } -func (s *ResourceGenerator) makeUpstreamFilterChainForDiscoveryChain( - id string, - overrideCluster string, - protocol string, - u *structs.Upstream, - chain *structs.CompiledDiscoveryChain, - cfgSnap *proxycfg.ConfigSnapshot, - tlsContext *envoy_tls_v3.DownstreamTlsContext, -) (*envoy_listener_v3.FilterChain, error) { - // TODO (freddy) Make this actually legible - useRDS := true +type filterChainOpts struct { + routeName string + clusterName string + filterName string + protocol string + useRDS bool + tlsContext *envoy_tls_v3.DownstreamTlsContext +} - var ( - clusterName string - destination, datacenter, partition, namespace string - ) - - // TODO (SNI partition) add partition for SNI - if chain != nil { - destination, datacenter, partition, namespace = chain.ServiceName, chain.Datacenter, chain.Partition, chain.Namespace - } - if (chain == nil || chain.IsDefault()) && u != nil { - useRDS = false - - if datacenter == "" { - datacenter = u.Datacenter - } - if datacenter == "" { - datacenter = cfgSnap.Datacenter - } - if destination == "" { - destination = u.DestinationName - } - if partition == "" { - partition = u.DestinationPartition - } - if namespace == "" { - namespace = u.DestinationNamespace - } - - sni := connect.UpstreamSNI(u, "", datacenter, cfgSnap.Roots.TrustDomain) - clusterName = CustomizeClusterName(sni, chain) - - } else { - if protocol == "tcp" && chain != nil { - useRDS = false - - startNode := chain.Nodes[chain.StartNode] - if startNode == nil { - return nil, fmt.Errorf("missing first node in compiled discovery chain for: %s", chain.ServiceName) - } - if startNode.Type != structs.DiscoveryGraphNodeTypeResolver { - return nil, fmt.Errorf("unexpected first node in discovery chain using protocol=%q: %s", protocol, startNode.Type) - } - targetID := startNode.Resolver.Target - target := chain.Targets[targetID] - - clusterName = CustomizeClusterName(target.Name, chain) - } - } - - // Default the namespace to match how SNIs are generated - if namespace == "" { - namespace = structs.IntentionDefaultNamespace - } - - // Default the partition to match how SNIs are generated - if partition == "" { - partition = structs.IntentionDefaultNamespace - } - - filterName := fmt.Sprintf("%s.%s.%s.%s", destination, namespace, partition, datacenter) - if u != nil && u.DestinationType == structs.UpstreamDestTypePreparedQuery { - // Avoid encoding dc and namespace for prepared queries. - // Those are defined in the query itself and are not available here. - filterName = id - } - if overrideCluster != "" { - useRDS = false - clusterName = overrideCluster - - if destination == "" { - filterName = overrideCluster - } - } - - opts := listenerFilterOpts{ - useRDS: useRDS, - protocol: protocol, - filterName: filterName, - routeName: id, - cluster: clusterName, - statPrefix: "upstream.", - routePath: "", - ingressGateway: false, - httpAuthzFilter: nil, - } - filter, err := makeListenerFilter(opts) - if err != nil { - return nil, err - } - transportSocket, err := makeDownstreamTLSTransportSocket(tlsContext) +func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envoy_listener_v3.FilterChain, error) { + filter, err := makeListenerFilter(listenerFilterOpts{ + useRDS: opts.useRDS, + protocol: opts.protocol, + filterName: opts.filterName, + routeName: opts.routeName, + cluster: opts.clusterName, + statPrefix: "upstream.", + }) if err != nil { return nil, err } + transportSocket, err := makeDownstreamTLSTransportSocket(opts.tlsContext) + if err != nil { + return nil, err + } return &envoy_listener_v3.FilterChain{ Filters: []*envoy_listener_v3.Filter{ filter, @@ -1326,111 +1258,19 @@ func (s *ResourceGenerator) makeUpstreamFilterChainForDiscoveryChain( }, nil } -// TODO(freddy) Replace in favor of new function above. Currently in use for ingress gateways. -func (s *ResourceGenerator) makeUpstreamListenerForDiscoveryChain( - u *structs.Upstream, - address string, - chain *structs.CompiledDiscoveryChain, - cfgSnap *proxycfg.ConfigSnapshot, - tlsContext *envoy_tls_v3.DownstreamTlsContext, -) (proto.Message, error) { - - // Best understanding is this only makes sense for port listeners.... - if u.LocalBindSocketPath != "" { - return nil, fmt.Errorf("makeUpstreamListenerForDiscoveryChain not supported for unix domain sockets %s %+v", - address, u) +// simpleChainTarget returns the discovery target for a chain with a single node. +// A chain can have a single target if it is for a TCP service or an HTTP service without +// multiple splits/routes/failovers. +func simpleChainTarget(chain *structs.CompiledDiscoveryChain) (*structs.DiscoveryTarget, error) { + startNode := chain.Nodes[chain.StartNode] + if startNode == nil { + return nil, fmt.Errorf("missing first node in compiled discovery chain for: %s", chain.ServiceName) } - - upstreamID := u.Identifier() - l := makePortListenerWithDefault(upstreamID, address, u.LocalBindPort, envoy_core_v3.TrafficDirection_OUTBOUND) - cfg := s.getAndModifyUpstreamConfigForListener(upstreamID, u, chain) - if cfg.EnvoyListenerJSON != "" { - return makeListenerFromUserConfig(cfg.EnvoyListenerJSON) + if startNode.Type != structs.DiscoveryGraphNodeTypeResolver { + return nil, fmt.Errorf("expected discovery chain with single node, found unexpected start node: %s", startNode.Type) } - - useRDS := true - var ( - clusterName string - destination, datacenter, partition, namespace string - ) - if chain == nil || chain.IsDefault() { - useRDS = false - - dc := u.Datacenter - if dc == "" { - dc = cfgSnap.Datacenter - } - destination, datacenter, partition, namespace = u.DestinationName, dc, u.DestinationPartition, u.DestinationNamespace - - sni := connect.UpstreamSNI(u, "", dc, cfgSnap.Roots.TrustDomain) - clusterName = CustomizeClusterName(sni, chain) - - } else { - destination, datacenter, partition, namespace = chain.ServiceName, chain.Datacenter, chain.Partition, chain.Namespace - - if cfg.Protocol == "tcp" { - useRDS = false - - startNode := chain.Nodes[chain.StartNode] - if startNode == nil { - return nil, fmt.Errorf("missing first node in compiled discovery chain for: %s", chain.ServiceName) - } - if startNode.Type != structs.DiscoveryGraphNodeTypeResolver { - return nil, fmt.Errorf("unexpected first node in discovery chain using protocol=%q: %s", cfg.Protocol, startNode.Type) - } - targetID := startNode.Resolver.Target - target := chain.Targets[targetID] - - clusterName = CustomizeClusterName(target.Name, chain) - } - } - - // Default the namespace to match how SNIs are generated - if namespace == "" { - namespace = structs.IntentionDefaultNamespace - } - - // Default the partition to match how SNIs are generated - if partition == "" { - partition = structs.IntentionDefaultNamespace - } - filterName := fmt.Sprintf("%s.%s.%s.%s", destination, namespace, partition, datacenter) - - if u.DestinationType == structs.UpstreamDestTypePreparedQuery { - // Avoid encoding dc and namespace for prepared queries. - // Those are defined in the query itself and are not available here. - filterName = upstreamID - } - - opts := listenerFilterOpts{ - useRDS: useRDS, - protocol: cfg.Protocol, - filterName: filterName, - routeName: upstreamID, - cluster: clusterName, - statPrefix: "upstream.", - routePath: "", - httpAuthzFilter: nil, - } - filter, err := makeListenerFilter(opts) - if err != nil { - return nil, err - } - - transportSocket, err := makeDownstreamTLSTransportSocket(tlsContext) - if err != nil { - return nil, err - } - - l.FilterChains = []*envoy_listener_v3.FilterChain{ - { - Filters: []*envoy_listener_v3.Filter{ - filter, - }, - TransportSocket: transportSocket, - }, - } - return l, nil + targetID := startNode.Resolver.Target + return chain.Targets[targetID], nil } func (s *ResourceGenerator) getAndModifyUpstreamConfigForListener(id string, u *structs.Upstream, chain *structs.CompiledDiscoveryChain) structs.UpstreamConfig { diff --git a/agent/xds/listeners_ingress.go b/agent/xds/listeners_ingress.go index 5a494fe99..7668bd02e 100644 --- a/agent/xds/listeners_ingress.go +++ b/agent/xds/listeners_ingress.go @@ -2,7 +2,6 @@ package xds import ( "fmt" - envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" @@ -54,19 +53,47 @@ func (s *ResourceGenerator) makeIngressGatewayListeners(address string, cfgSnap id := u.Identifier() chain := cfgSnap.IngressGateway.DiscoveryChain[id] + if chain == nil { + // Wait until a chain is present in the snapshot. + continue + } - var upstreamListener proto.Message - upstreamListener, err := s.makeUpstreamListenerForDiscoveryChain( - &u, - address, - chain, - cfgSnap, - tlsContext, - ) + cfg := s.getAndModifyUpstreamConfigForListener(id, &u, chain) + + // RDS, Envoy's Route Discovery Service, is only used for HTTP services with a customized discovery chain. + // TODO(freddy): Why can the protocol of the listener be overridden here? + useRDS := cfg.Protocol != "tcp" && !chain.IsDefault() + + var clusterName string + if !useRDS { + // When not using RDS we must generate a cluster name to attach to the filter chain. + // With RDS, cluster names get attached to the dynamic routes instead. + target, err := simpleChainTarget(chain) + if err != nil { + return nil, err + } + clusterName = CustomizeClusterName(target.Name, chain) + } + + filterName := fmt.Sprintf("%s.%s.%s.%s", chain.ServiceName, chain.Namespace, chain.Partition, chain.Datacenter) + + l := makePortListenerWithDefault(id, address, u.LocalBindPort, envoy_core_v3.TrafficDirection_OUTBOUND) + filterChain, err := s.makeUpstreamFilterChain(filterChainOpts{ + routeName: id, + useRDS: useRDS, + clusterName: clusterName, + filterName: filterName, + protocol: cfg.Protocol, + tlsContext: tlsContext, + }) if err != nil { return nil, err } - resources = append(resources, upstreamListener) + l.FilterChains = []*envoy_listener_v3.FilterChain{ + filterChain, + } + resources = append(resources, l) + } else { // If multiple upstreams share this port, make a special listener for the protocol. listener := makePortListener(listenerKey.Protocol, address, listenerKey.Port, envoy_core_v3.TrafficDirection_OUTBOUND) diff --git a/agent/xds/listeners_test.go b/agent/xds/listeners_test.go index aa7bf512a..acf197961 100644 --- a/agent/xds/listeners_test.go +++ b/agent/xds/listeners_test.go @@ -259,6 +259,23 @@ func TestListenersFromSnapshot(t *testing.T) { create: proxycfg.TestConfigSnapshotDiscoveryChainWithFailoverThroughLocalGateway, setup: nil, }, + { + name: "connect-proxy-upstream-defaults", + create: proxycfg.TestConfigSnapshot, + setup: func(snap *proxycfg.ConfigSnapshot) { + for _, v := range snap.ConnectProxy.UpstreamConfig { + // Prepared queries do not get centrally configured upstream defaults merged into them. + if v.DestinationType == structs.UpstreamDestTypePreparedQuery { + continue + } + // Represent upstream config as if it came from centrally configured upstream defaults. + // The name/namespace must not make it onto the cluster name attached to the outbound listener. + v.CentrallyConfigured = true + v.DestinationNamespace = structs.WildcardSpecifier + v.DestinationName = structs.WildcardSpecifier + } + }, + }, { name: "expose-paths-local-app-paths", create: proxycfg.TestConfigSnapshotExposeConfig, @@ -567,7 +584,7 @@ func TestListenersFromSnapshot(t *testing.T) { snap.IngressGateway.Upstreams = map[proxycfg.IngressListenerKey]structs.Upstreams{ {Protocol: "tcp", Port: 8080}: { { - DestinationName: "foo", + DestinationName: "db", LocalBindPort: 8080, }, }, @@ -640,6 +657,30 @@ func TestListenersFromSnapshot(t *testing.T) { }, }, } + + // Every ingress upstream has an associated discovery chain in the snapshot + secureChain := discoverychain.TestCompileConfigEntries( + t, + "secure", + "default", + "default", + "dc1", + connect.TestClusterID+".consul", + nil, + ) + snap.IngressGateway.DiscoveryChain["secure"] = secureChain + + insecureChain := discoverychain.TestCompileConfigEntries( + t, + "insecure", + "default", + "default", + "dc1", + connect.TestClusterID+".consul", + nil, + ) + snap.IngressGateway.DiscoveryChain["insecure"] = insecureChain + snap.IngressGateway.Listeners = map[proxycfg.IngressListenerKey]structs.IngressListener{ {Protocol: "tcp", Port: 8080}: { Port: 8080, @@ -822,7 +863,8 @@ func TestListenersFromSnapshot(t *testing.T) { Address: "9.9.9.9", Port: 9090, TaggedAddresses: map[string]structs.ServiceAddress{ - "virtual": {Address: "10.0.0.1"}, + "virtual": {Address: "10.0.0.1"}, + structs.TaggedAddressVirtualIP: {Address: "240.0.0.1"}, }, }, }, diff --git a/agent/xds/net_fallback.go b/agent/xds/net_fallback.go index 08857991b..a09fc1498 100644 --- a/agent/xds/net_fallback.go +++ b/agent/xds/net_fallback.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package xds diff --git a/agent/xds/net_linux.go b/agent/xds/net_linux.go index 743f756cb..1be5d80c3 100644 --- a/agent/xds/net_linux.go +++ b/agent/xds/net_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package xds diff --git a/agent/xds/server_oss.go b/agent/xds/server_oss.go index ef5adcec1..a9a01908d 100644 --- a/agent/xds/server_oss.go +++ b/agent/xds/server_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package xds diff --git a/agent/xds/testdata/endpoints/mesh-gateway-newer-information-in-federation-states.envoy-1-20-x.golden b/agent/xds/testdata/endpoints/mesh-gateway-newer-information-in-federation-states.envoy-1-20-x.golden new file mode 100644 index 000000000..8c3d619ca --- /dev/null +++ b/agent/xds/testdata/endpoints/mesh-gateway-newer-information-in-federation-states.envoy-1-20-x.golden @@ -0,0 +1,133 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "bar.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.6", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.7", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.8", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "dc2.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "198.18.1.3", + "portValue": 443 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "foo.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.3", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.4", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.5", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.9", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + } + ], + "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/mesh-gateway-older-information-in-federation-states.envoy-1-20-x.golden b/agent/xds/testdata/endpoints/mesh-gateway-older-information-in-federation-states.envoy-1-20-x.golden new file mode 100644 index 000000000..81f2a3eec --- /dev/null +++ b/agent/xds/testdata/endpoints/mesh-gateway-older-information-in-federation-states.envoy-1-20-x.golden @@ -0,0 +1,145 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "bar.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.6", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.7", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.8", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "dc2.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "198.18.1.1", + "portValue": 443 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "198.18.1.2", + "portValue": 443 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "foo.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.3", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.4", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.5", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "172.16.1.9", + "portValue": 2222 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + } + ], + "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/connect-proxy-upstream-defaults.envoy-1-20-x.golden b/agent/xds/testdata/listeners/connect-proxy-upstream-defaults.envoy-1-20-x.golden new file mode 100644 index 000000000..57d50f71c --- /dev/null +++ b/agent/xds/testdata/listeners/connect-proxy-upstream-defaults.envoy-1-20-x.golden @@ -0,0 +1,119 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "db:127.0.0.1:9191", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9191 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "prepared_query:geo-cache:127.10.10.10:8181", + "address": { + "socketAddress": { + "address": "127.10.10.10", + "portValue": 8181 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.prepared_query_geo-cache", + "cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": { + + }, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/ingress-with-sds-listener-listener-level.envoy-1-20-x.golden b/agent/xds/testdata/listeners/ingress-with-sds-listener-listener-level.envoy-1-20-x.golden index 8e223cf3e..aabf3d6f9 100644 --- a/agent/xds/testdata/listeners/ingress-with-sds-listener-listener-level.envoy-1-20-x.golden +++ b/agent/xds/testdata/listeners/ingress-with-sds-listener-listener-level.envoy-1-20-x.golden @@ -3,7 +3,7 @@ "resources": [ { "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", - "name": "foo:1.2.3.4:8080", + "name": "db:1.2.3.4:8080", "address": { "socketAddress": { "address": "1.2.3.4", @@ -17,8 +17,8 @@ "name": "envoy.filters.network.tcp_proxy", "typedConfig": { "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", - "statPrefix": "upstream.foo.default.default.dc1", - "cluster": "foo.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" } } ], diff --git a/agent/xds/testdata/listeners/transparent-proxy.envoy-1-20-x.golden b/agent/xds/testdata/listeners/transparent-proxy.envoy-1-20-x.golden index 6c6691c61..d390e3d9f 100644 --- a/agent/xds/testdata/listeners/transparent-proxy.envoy-1-20-x.golden +++ b/agent/xds/testdata/listeners/transparent-proxy.envoy-1-20-x.golden @@ -42,6 +42,10 @@ { "addressPrefix": "10.0.0.1", "prefixLen": 32 + }, + { + "addressPrefix": "240.0.0.1", + "prefixLen": 32 } ] }, diff --git a/api/agent.go b/api/agent.go index c4efa0efe..e3b5d362a 100644 --- a/api/agent.go +++ b/api/agent.go @@ -1021,25 +1021,36 @@ func (a *Agent) Leave() error { return nil } +type ForceLeaveOpts struct { + // Prune indicates if we should remove a failed agent from the list of + // members in addition to ejecting it. + Prune bool + + // WAN indicates that the request should exclusively target the WAN pool. + WAN bool +} + // ForceLeave is used to have the agent eject a failed node func (a *Agent) ForceLeave(node string) error { - r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) - _, resp, err := a.c.doRequest(r) - if err != nil { - return err - } - defer closeResponseBody(resp) - if err := requireOK(resp); err != nil { - return err - } - return nil + return a.ForceLeaveOpts(node, ForceLeaveOpts{}) } // ForceLeavePrune is used to have an a failed agent removed // from the list of members func (a *Agent) ForceLeavePrune(node string) error { + return a.ForceLeaveOpts(node, ForceLeaveOpts{Prune: true}) +} + +// ForceLeaveOpts is used to have the agent eject a failed node or remove it +// completely from the list of members. +func (a *Agent) ForceLeaveOpts(node string, opts ForceLeaveOpts) error { r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) - r.params.Set("prune", "1") + if opts.Prune { + r.params.Set("prune", "1") + } + if opts.WAN { + r.params.Set("wan", "1") + } _, resp, err := a.c.doRequest(r) if err != nil { return err @@ -1287,25 +1298,33 @@ func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*Write // UpdateDefaultACLToken updates the agent's "default" token. See updateToken // for more details func (a *Agent) UpdateDefaultACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateTokenFallback("default", "acl_token", token, q) + return a.updateTokenFallback(token, q, "default", "acl_token") } // UpdateAgentACLToken updates the agent's "agent" token. See updateToken // for more details func (a *Agent) UpdateAgentACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateTokenFallback("agent", "acl_agent_token", token, q) + return a.updateTokenFallback(token, q, "agent", "acl_agent_token") +} + +// UpdateAgentRecoveryACLToken updates the agent's "agent_recovery" token. See updateToken +// for more details. +func (a *Agent) UpdateAgentRecoveryACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback(token, q, "agent_recovery", "agent_master", "acl_agent_master_token") } // UpdateAgentMasterACLToken updates the agent's "agent_master" token. See updateToken -// for more details +// for more details. +// +// DEPRECATED - Prefer UpdateAgentRecoveryACLToken for v1.11 and above. func (a *Agent) UpdateAgentMasterACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateTokenFallback("agent_master", "acl_agent_master_token", token, q) + return a.updateTokenFallback(token, q, "agent_master", "acl_agent_master_token") } // UpdateReplicationACLToken updates the agent's "replication" token. See updateToken // for more details func (a *Agent) UpdateReplicationACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateTokenFallback("replication", "acl_replication_token", token, q) + return a.updateTokenFallback(token, q, "replication", "acl_replication_token") } // updateToken can be used to update one of an agent's ACL tokens after the agent has @@ -1316,10 +1335,21 @@ func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, return meta, err } -func (a *Agent) updateTokenFallback(target, fallback, token string, q *WriteOptions) (*WriteMeta, error) { - meta, status, err := a.updateTokenOnce(target, token, q) - if err != nil && status == 404 { - meta, _, err = a.updateTokenOnce(fallback, token, q) +func (a *Agent) updateTokenFallback(token string, q *WriteOptions, targets ...string) (*WriteMeta, error) { + if len(targets) == 0 { + panic("targets must not be empty") + } + + var ( + meta *WriteMeta + err error + ) + for _, target := range targets { + var status int + meta, status, err = a.updateTokenOnce(target, token, q) + if err == nil && status != http.StatusNotFound { + return meta, err + } } return meta, err } diff --git a/api/agent_test.go b/api/agent_test.go index f87509251..16fc066ff 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -1518,6 +1518,10 @@ func TestAPI_AgentUpdateToken(t *testing.T) { t.Fatalf("err: %v", err) } + if _, err := agent.UpdateAgentRecoveryACLToken("root", nil); err != nil { + t.Fatalf("err: %v", err) + } + if _, err := agent.UpdateReplicationACLToken("root", nil); err != nil { t.Fatalf("err: %v", err) } @@ -1570,6 +1574,9 @@ func TestAPI_AgentUpdateToken(t *testing.T) { _, err = agent.UpdateAgentMasterACLToken("root", nil) require.NoError(t, err) + _, err = agent.UpdateAgentRecoveryACLToken("root", nil) + require.NoError(t, err) + _, err = agent.UpdateReplicationACLToken("root", nil) require.NoError(t, err) }) diff --git a/api/api.go b/api/api.go index 47830f25e..d97f1879f 100644 --- a/api/api.go +++ b/api/api.go @@ -281,6 +281,11 @@ type QueryMeta struct { // defined policy. This can be "allow" which means ACLs are used to // deny-list, or "deny" which means ACLs are allow-lists. DefaultACLPolicy string + + // ResultsFilteredByACLs is true when some of the query's results were + // filtered out by enforcing ACLs. It may be false because nothing was + // removed, or because the endpoint does not yet support this flag. + ResultsFilteredByACLs bool } // WriteMeta is used to return meta data about a write @@ -1071,6 +1076,14 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error { q.DefaultACLPolicy = v } + // Parse the X-Consul-Results-Filtered-By-ACLs + switch header.Get("X-Consul-Results-Filtered-By-ACLs") { + case "true": + q.ResultsFilteredByACLs = true + default: + q.ResultsFilteredByACLs = false + } + // Parse Cache info if cacheStr := header.Get("X-Cache"); cacheStr != "" { q.CacheHit = strings.EqualFold(cacheStr, "HIT") diff --git a/api/api_test.go b/api/api_test.go index 89852d373..01dc0b681 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -932,6 +932,7 @@ func TestAPI_ParseQueryMeta(t *testing.T) { resp.Header.Set("X-Consul-KnownLeader", "true") resp.Header.Set("X-Consul-Translate-Addresses", "true") resp.Header.Set("X-Consul-Default-ACL-Policy", "deny") + resp.Header.Set("X-Consul-Results-Filtered-By-ACLs", "true") qm := &QueryMeta{} if err := parseQueryMeta(resp, qm); err != nil { @@ -953,6 +954,9 @@ func TestAPI_ParseQueryMeta(t *testing.T) { if qm.DefaultACLPolicy != "deny" { t.Fatalf("Bad: %v", qm) } + if !qm.ResultsFilteredByACLs { + t.Fatalf("Bad: %v", qm) + } } func TestAPI_UnixSocket(t *testing.T) { diff --git a/api/config_entry.go b/api/config_entry.go index f5fbbbce4..91c407bb5 100644 --- a/api/config_entry.go +++ b/api/config_entry.go @@ -22,7 +22,7 @@ const ( TerminatingGateway string = "terminating-gateway" ServiceIntentions string = "service-intentions" MeshConfig string = "mesh" - PartitionExports string = "partition-exports" + ExportedServices string = "exported-services" ProxyConfigGlobal string = "global" MeshConfigMesh string = "mesh" @@ -277,8 +277,8 @@ func makeConfigEntry(kind, name string) (ConfigEntry, error) { return &ServiceIntentionsConfigEntry{Kind: kind, Name: name}, nil case MeshConfig: return &MeshConfigEntry{}, nil - case PartitionExports: - return &PartitionExportsConfigEntry{Name: name}, nil + case ExportedServices: + return &ExportedServicesConfigEntry{Name: name}, nil default: return nil, fmt.Errorf("invalid config entry kind: %s", kind) } diff --git a/api/config_entry_discoverychain.go b/api/config_entry_discoverychain.go index b9c599485..dfb2bcc10 100644 --- a/api/config_entry_discoverychain.go +++ b/api/config_entry_discoverychain.go @@ -63,10 +63,10 @@ type ServiceRouteHTTPMatchQueryParam struct { } type ServiceRouteDestination struct { - Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty" alias:"service_subset"` - // Referencing other partitions is not supported. + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` PrefixRewrite string `json:",omitempty" alias:"prefix_rewrite"` RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"` NumRetries uint32 `json:",omitempty" alias:"num_retries"` @@ -134,11 +134,11 @@ func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.Crea func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } type ServiceSplit struct { - Weight float32 - Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty" alias:"service_subset"` - // Referencing other partitions is not supported. + Weight float32 + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` } @@ -216,9 +216,9 @@ type ServiceResolverSubset struct { type ServiceResolverRedirect struct { Service string `json:",omitempty"` ServiceSubset string `json:",omitempty" alias:"service_subset"` - // Referencing other partitions is not supported. - Namespace string `json:",omitempty"` - Datacenter string `json:",omitempty"` + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` + Datacenter string `json:",omitempty"` } type ServiceResolverFailover struct { diff --git a/api/config_entry_discoverychain_test.go b/api/config_entry_discoverychain_test.go index 357f1ff5c..b56372a26 100644 --- a/api/config_entry_discoverychain_test.go +++ b/api/config_entry_discoverychain_test.go @@ -242,6 +242,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { Service: "test-failover", ServiceSubset: "v2", Namespace: defaultNamespace, + Partition: defaultPartition, PrefixRewrite: "/", RequestTimeout: 5 * time.Second, NumRetries: 5, diff --git a/api/config_entry_exports.go b/api/config_entry_exports.go index 0b6650107..ae9cb2ff6 100644 --- a/api/config_entry_exports.go +++ b/api/config_entry_exports.go @@ -2,14 +2,14 @@ package api import "encoding/json" -// PartitionExportsConfigEntry manages the exported services for a single admin partition. +// ExportedServicesConfigEntry manages the exported services for a single admin partition. // Admin Partitions are a Consul Enterprise feature. -type PartitionExportsConfigEntry struct { - // Name is the name of the partition the PartitionExportsConfigEntry applies to. +type ExportedServicesConfigEntry struct { + // Name is the name of the partition the ExportedServicesConfigEntry applies to. // Partitioning is a Consul Enterprise feature. Name string `json:",omitempty"` - // Partition is the partition where the PartitionExportsConfigEntry is stored. + // Partition is the partition where the ExportedServicesConfigEntry is stored. // If the partition does not match the name, the name will overwrite the partition. // Partitioning is a Consul Enterprise feature. Partition string `json:",omitempty"` @@ -49,23 +49,23 @@ type ServiceConsumer struct { Partition string } -func (e *PartitionExportsConfigEntry) GetKind() string { return PartitionExports } -func (e *PartitionExportsConfigEntry) GetName() string { return e.Name } -func (e *PartitionExportsConfigEntry) GetPartition() string { return e.Name } -func (e *PartitionExportsConfigEntry) GetNamespace() string { return IntentionDefaultNamespace } -func (e *PartitionExportsConfigEntry) GetMeta() map[string]string { return e.Meta } -func (e *PartitionExportsConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *PartitionExportsConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } +func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices } +func (e *ExportedServicesConfigEntry) GetName() string { return e.Name } +func (e *ExportedServicesConfigEntry) GetPartition() string { return e.Name } +func (e *ExportedServicesConfigEntry) GetNamespace() string { return IntentionDefaultNamespace } +func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *ExportedServicesConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ExportedServicesConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } // MarshalJSON adds the Kind field so that the JSON can be decoded back into the // correct type. -func (e *PartitionExportsConfigEntry) MarshalJSON() ([]byte, error) { - type Alias PartitionExportsConfigEntry +func (e *ExportedServicesConfigEntry) MarshalJSON() ([]byte, error) { + type Alias ExportedServicesConfigEntry source := &struct { Kind string *Alias }{ - Kind: PartitionExports, + Kind: ExportedServices, Alias: (*Alias)(e), } return json.Marshal(source) diff --git a/api/config_entry_gateways.go b/api/config_entry_gateways.go index c3eb07f12..0792ad824 100644 --- a/api/config_entry_gateways.go +++ b/api/config_entry_gateways.go @@ -103,12 +103,14 @@ type IngressService struct { // using a "tcp" listener. Hosts []string - // Referencing other partitions is not supported. - // Namespace is the namespace where the service is located. // Namespacing is a Consul Enterprise feature. Namespace string `json:",omitempty"` + // Partition is the partition where the service is located. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + // TLS allows specifying some TLS configuration per listener. TLS *GatewayServiceTLSConfig `json:",omitempty"` diff --git a/api/config_entry_gateways_test.go b/api/config_entry_gateways_test.go index bc8d6d0e4..f80a4d74e 100644 --- a/api/config_entry_gateways_test.go +++ b/api/config_entry_gateways_test.go @@ -157,8 +157,9 @@ func TestAPI_ConfigEntries_IngressGateway(t *testing.T) { require.Len(t, readIngress.Listeners, 1) require.Len(t, readIngress.Listeners[0].Services, 1) - // Set namespace to blank so that OSS and ent can utilize the same tests + // Set namespace and partition to blank so that OSS and ent can utilize the same tests readIngress.Listeners[0].Services[0].Namespace = "" + readIngress.Listeners[0].Services[0].Partition = "" require.Equal(t, ingress1.Listeners, readIngress.Listeners) case "bar": @@ -168,8 +169,9 @@ func TestAPI_ConfigEntries_IngressGateway(t *testing.T) { require.Equal(t, ingress2.Name, readIngress.Name) require.Len(t, readIngress.Listeners, 1) require.Len(t, readIngress.Listeners[0].Services, 1) - // Set namespace to blank so that OSS and ent can utilize the same tests + // Set namespace and partition to blank so that OSS and ent can utilize the same tests readIngress.Listeners[0].Services[0].Namespace = "" + readIngress.Listeners[0].Services[0].Partition = "" require.Equal(t, ingress2.Listeners, readIngress.Listeners) } diff --git a/api/config_entry_test.go b/api/config_entry_test.go index 699d4ddc7..04419ea37 100644 --- a/api/config_entry_test.go +++ b/api/config_entry_test.go @@ -964,7 +964,8 @@ func TestDecodeConfigEntry(t *testing.T) { "Services": [ { "Name": "web", - "Namespace": "foo" + "Namespace": "foo", + "Partition": "bar" }, { "Name": "db" @@ -1001,6 +1002,7 @@ func TestDecodeConfigEntry(t *testing.T) { { Name: "web", Namespace: "foo", + Partition: "bar", }, { Name: "db", diff --git a/api/connect_intention.go b/api/connect_intention.go index 34dc69b89..734d4ab0f 100644 --- a/api/connect_intention.go +++ b/api/connect_intention.go @@ -30,6 +30,11 @@ type Intention struct { SourceNS, SourceName string DestinationNS, DestinationName string + // SourcePartition and DestinationPartition cannot be wildcards "*" and + // are not compatible with legacy intentions. + SourcePartition string `json:",omitempty"` + DestinationPartition string `json:",omitempty"` + // SourceType is the type of the value for the source. SourceType IntentionSourceType @@ -363,8 +368,8 @@ func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, * func (c *Connect) IntentionUpsert(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { r := c.c.newRequest("PUT", "/v1/connect/intentions/exact") r.setWriteOptions(q) - r.params.Set("source", maybePrefixNamespace(ixn.SourceNS, ixn.SourceName)) - r.params.Set("destination", maybePrefixNamespace(ixn.DestinationNS, ixn.DestinationName)) + r.params.Set("source", maybePrefixNamespaceAndPartition(ixn.SourcePartition, ixn.SourceNS, ixn.SourceName)) + r.params.Set("destination", maybePrefixNamespaceAndPartition(ixn.DestinationPartition, ixn.DestinationNS, ixn.DestinationName)) r.obj = ixn rtt, resp, err := c.c.doRequest(r) if err != nil { @@ -380,11 +385,17 @@ func (c *Connect) IntentionUpsert(ixn *Intention, q *WriteOptions) (*WriteMeta, return wm, nil } -func maybePrefixNamespace(ns, name string) string { - if ns == "" { +func maybePrefixNamespaceAndPartition(part, ns, name string) string { + switch { + case part == "" && ns == "": return name + case part == "" && ns != "": + return ns + "/" + name + case part != "" && ns == "": + return part + "/" + IntentionDefaultNamespace + "/" + name + default: + return part + "/" + ns + "/" + name } - return ns + "/" + name } // IntentionCreate will create a new intention. The ID in the given diff --git a/api/connect_intention_test.go b/api/connect_intention_test.go index e854c1fad..232ce344c 100644 --- a/api/connect_intention_test.go +++ b/api/connect_intention_test.go @@ -33,6 +33,8 @@ func TestAPI_ConnectIntentionCreateListGetUpdateDelete(t *testing.T) { ixn.UpdatedAt = actual.UpdatedAt ixn.CreateIndex = actual.CreateIndex ixn.ModifyIndex = actual.ModifyIndex + ixn.SourcePartition = actual.SourcePartition + ixn.DestinationPartition = actual.DestinationPartition ixn.Hash = actual.Hash require.Equal(t, ixn, actual) diff --git a/api/go.mod b/api/go.mod index 348ad8a73..6a37c10dd 100644 --- a/api/go.mod +++ b/api/go.mod @@ -10,7 +10,7 @@ require ( github.com/hashicorp/go-hclog v0.12.0 github.com/hashicorp/go-rootcerts v1.0.2 github.com/hashicorp/go-uuid v1.0.1 - github.com/hashicorp/serf v0.9.5 + github.com/hashicorp/serf v0.9.6 github.com/mitchellh/mapstructure v1.1.2 github.com/stretchr/testify v1.4.0 ) diff --git a/api/go.sum b/api/go.sum index b95bd4744..16706f53f 100644 --- a/api/go.sum +++ b/api/go.sum @@ -36,11 +36,11 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -56,9 +56,9 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -83,20 +83,18 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 h1:4qWs8cYYH6PoEFy4dfhDFgoMGkwAcETd+MmPdCPMzUc= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -105,10 +103,16 @@ golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44 h1:Bli41pIlzTzf3KEY06n+xnzK/BESIg2ze4Pgfh/aI8c= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/api/kv.go b/api/kv.go index 47dfa6694..85a9d7750 100644 --- a/api/kv.go +++ b/api/kv.go @@ -44,6 +44,10 @@ type KVPair struct { // Namespace is the namespace the KVPair is associated with // Namespacing is a Consul Enterprise feature. Namespace string `json:",omitempty"` + + // Partition is the partition the KVPair is associated with + // Admin Partition is a Consul Enterprise feature. + Partition string `json:",omitempty"` } // KVPairs is a list of KVPair objects diff --git a/api/namespace_test.go b/api/namespace_test.go index f8edd992d..68bbde5a8 100644 --- a/api/namespace_test.go +++ b/api/namespace_test.go @@ -1,3 +1,4 @@ +//go:build consulent // +build consulent package api diff --git a/api/operator_keyring.go b/api/operator_keyring.go index 15f319bb1..6db31a252 100644 --- a/api/operator_keyring.go +++ b/api/operator_keyring.go @@ -16,6 +16,9 @@ type KeyringResponse struct { // Segment has the network segment this request corresponds to. Segment string + // Partition has the admin partition this request corresponds to. + Partition string `json:",omitempty"` + // Messages has information or errors from serf Messages map[string]string `json:",omitempty"` diff --git a/api/oss_test.go b/api/oss_test.go index f1e25d9f9..7006385f7 100644 --- a/api/oss_test.go +++ b/api/oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package api diff --git a/api/partition.go b/api/partition.go index 2b6bed8e5..88edfb7b0 100644 --- a/api/partition.go +++ b/api/partition.go @@ -6,13 +6,13 @@ import ( "time" ) -// AdminPartition is the configuration of a single admin partition. Admin Partitions are a Consul Enterprise feature. -type AdminPartition struct { +// Partition is the configuration of a single admin partition. Admin Partitions are a Consul Enterprise feature. +type Partition struct { // Name is the name of the Partition. Name string `json:"Name"` // Description is where the user puts any information they want - // about the partition. It is not used internally. + // about the admin partition. It is not used internally. Description string `json:"Description,omitempty"` // DeletedAt is the time when the Partition was marked for deletion @@ -26,11 +26,10 @@ type AdminPartition struct { ModifyIndex uint64 `json:"ModifyIndex,omitempty"` } -type AdminPartitions struct { - Partitions []*AdminPartition -} +// PartitionDefaultName is the default partition value. +const PartitionDefaultName = "default" -// Partitions can be used to manage Partitions in Consul Enterprise.. +// Partitions can be used to manage Partitions in Consul Enterprise. type Partitions struct { c *Client } @@ -40,7 +39,7 @@ func (c *Client) Partitions() *Partitions { return &Partitions{c} } -func (p *Partitions) Create(ctx context.Context, partition *AdminPartition, q *WriteOptions) (*AdminPartition, *WriteMeta, error) { +func (p *Partitions) Create(ctx context.Context, partition *Partition, q *WriteOptions) (*Partition, *WriteMeta, error) { if partition.Name == "" { return nil, nil, fmt.Errorf("Must specify a Name for Partition creation") } @@ -59,7 +58,7 @@ func (p *Partitions) Create(ctx context.Context, partition *AdminPartition, q *W } wm := &WriteMeta{RequestTime: rtt} - var out AdminPartition + var out Partition if err := decodeBody(resp, &out); err != nil { return nil, nil, err } @@ -67,7 +66,7 @@ func (p *Partitions) Create(ctx context.Context, partition *AdminPartition, q *W return &out, wm, nil } -func (p *Partitions) Update(ctx context.Context, partition *AdminPartition, q *WriteOptions) (*AdminPartition, *WriteMeta, error) { +func (p *Partitions) Update(ctx context.Context, partition *Partition, q *WriteOptions) (*Partition, *WriteMeta, error) { if partition.Name == "" { return nil, nil, fmt.Errorf("Must specify a Name for Partition updating") } @@ -86,7 +85,7 @@ func (p *Partitions) Update(ctx context.Context, partition *AdminPartition, q *W } wm := &WriteMeta{RequestTime: rtt} - var out AdminPartition + var out Partition if err := decodeBody(resp, &out); err != nil { return nil, nil, err } @@ -94,8 +93,8 @@ func (p *Partitions) Update(ctx context.Context, partition *AdminPartition, q *W return &out, wm, nil } -func (p *Partitions) Read(ctx context.Context, name string, q *QueryOptions) (*AdminPartition, *QueryMeta, error) { - var out AdminPartition +func (p *Partitions) Read(ctx context.Context, name string, q *QueryOptions) (*Partition, *QueryMeta, error) { + var out Partition r := p.c.newRequest("GET", "/v1/partition/"+name) r.setQueryOptions(q) r.ctx = ctx @@ -140,8 +139,8 @@ func (p *Partitions) Delete(ctx context.Context, name string, q *WriteOptions) ( return wm, nil } -func (p *Partitions) List(ctx context.Context, q *QueryOptions) (*AdminPartitions, *QueryMeta, error) { - var out *AdminPartitions +func (p *Partitions) List(ctx context.Context, q *QueryOptions) ([]*Partition, *QueryMeta, error) { + var out []*Partition r := p.c.newRequest("GET", "/v1/partitions") r.setQueryOptions(q) r.ctx = ctx diff --git a/api/txn_test.go b/api/txn_test.go index 3d69baff1..e418062e0 100644 --- a/api/txn_test.go +++ b/api/txn_test.go @@ -153,6 +153,7 @@ func TestAPI_ClientTxn(t *testing.T) { CreateIndex: ret.Results[0].KV.CreateIndex, ModifyIndex: ret.Results[0].KV.ModifyIndex, Namespace: ret.Results[0].KV.Namespace, + Partition: defaultPartition, }, }, &TxnResult{ @@ -164,6 +165,7 @@ func TestAPI_ClientTxn(t *testing.T) { CreateIndex: ret.Results[1].KV.CreateIndex, ModifyIndex: ret.Results[1].KV.ModifyIndex, Namespace: ret.Results[0].KV.Namespace, + Partition: defaultPartition, }, }, &TxnResult{ @@ -264,6 +266,7 @@ func TestAPI_ClientTxn(t *testing.T) { CreateIndex: ret.Results[0].KV.CreateIndex, ModifyIndex: ret.Results[0].KV.ModifyIndex, Namespace: ret.Results[0].KV.Namespace, + Partition: defaultPartition, }, }, &TxnResult{ diff --git a/command/acl/agenttokens/agent_tokens.go b/command/acl/agenttokens/agent_tokens.go index 914efb2c2..974c8e613 100644 --- a/command/acl/agenttokens/agent_tokens.go +++ b/command/acl/agenttokens/agent_tokens.go @@ -54,8 +54,8 @@ func (c *cmd) Run(args []string) int { _, err = client.Agent().UpdateDefaultACLToken(token, nil) case "agent": _, err = client.Agent().UpdateAgentACLToken(token, nil) - case "master": - _, err = client.Agent().UpdateAgentMasterACLToken(token, nil) + case "recovery": + _, err = client.Agent().UpdateAgentRecoveryACLToken(token, nil) case "replication": _, err = client.Agent().UpdateReplicationACLToken(token, nil) default: @@ -78,7 +78,7 @@ func (c *cmd) dataFromArgs(args []string) (string, string, error) { return "", "", fmt.Errorf("Missing TYPE and TOKEN arguments") case 1: switch args[0] { - case "default", "agent", "master", "replication": + case "default", "agent", "recovery", "replication": return "", "", fmt.Errorf("Missing TOKEN argument") default: return "", "", fmt.Errorf("MISSING TYPE argument") @@ -121,7 +121,7 @@ Usage: consul acl set-agent-token [options] TYPE TOKEN agent The token that the agent will use for internal agent operations. If not given then the default token is used for these operations. - master This sets the token that can be used to access the Agent APIs in + recovery This sets the token that can be used to access the Agent APIs in the event that the ACL datacenter cannot be reached. replication This is the token that the agent will use for replication diff --git a/command/acl/agenttokens/agent_tokens_test.go b/command/acl/agenttokens/agent_tokens_test.go index e5d20d047..5e6430a9b 100644 --- a/command/acl/agenttokens/agent_tokens_test.go +++ b/command/acl/agenttokens/agent_tokens_test.go @@ -33,7 +33,7 @@ func TestAgentTokensCommand(t *testing.T) { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -78,11 +78,11 @@ func TestAgentTokensCommand(t *testing.T) { assert.Empty(ui.ErrorWriter.String()) } - // master token + // recovery token { args := []string{ "-http-addr=" + a.HTTPAddr(), - "master", + "recovery", token.SecretID, } diff --git a/command/acl/authmethod/create/authmethod_create_oss.go b/command/acl/authmethod/create/authmethod_create_oss.go index 7fc34f29a..0df5e8efd 100644 --- a/command/acl/authmethod/create/authmethod_create_oss.go +++ b/command/acl/authmethod/create/authmethod_create_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package authmethodcreate diff --git a/command/acl/authmethod/create/authmethod_create_test.go b/command/acl/authmethod/create/authmethod_create_test.go index 26158c22a..0e45e624d 100644 --- a/command/acl/authmethod/create/authmethod_create_test.go +++ b/command/acl/authmethod/create/authmethod_create_test.go @@ -43,7 +43,7 @@ func TestAuthMethodCreateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -197,7 +197,7 @@ func TestAuthMethodCreateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -368,7 +368,7 @@ func TestAuthMethodCreateCommand_k8s(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -514,7 +514,7 @@ func TestAuthMethodCreateCommand_config(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/authmethod/delete/authmethod_delete_test.go b/command/acl/authmethod/delete/authmethod_delete_test.go index b112ab174..61ee7b169 100644 --- a/command/acl/authmethod/delete/authmethod_delete_test.go +++ b/command/acl/authmethod/delete/authmethod_delete_test.go @@ -36,7 +36,7 @@ func TestAuthMethodDeleteCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/authmethod/list/authmethod_list_test.go b/command/acl/authmethod/list/authmethod_list_test.go index 4925f1f11..edf963183 100644 --- a/command/acl/authmethod/list/authmethod_list_test.go +++ b/command/acl/authmethod/list/authmethod_list_test.go @@ -37,7 +37,7 @@ func TestAuthMethodListCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -118,7 +118,7 @@ func TestAuthMethodListCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/authmethod/read/authmethod_read_test.go b/command/acl/authmethod/read/authmethod_read_test.go index 1cc0f7771..0b4f04c0e 100644 --- a/command/acl/authmethod/read/authmethod_read_test.go +++ b/command/acl/authmethod/read/authmethod_read_test.go @@ -37,7 +37,7 @@ func TestAuthMethodReadCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -127,7 +127,7 @@ func TestAuthMethodReadCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/authmethod/update/authmethod_update_oss.go b/command/acl/authmethod/update/authmethod_update_oss.go index 240904c4e..e72289ab3 100644 --- a/command/acl/authmethod/update/authmethod_update_oss.go +++ b/command/acl/authmethod/update/authmethod_update_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package authmethodupdate diff --git a/command/acl/authmethod/update/authmethod_update_test.go b/command/acl/authmethod/update/authmethod_update_test.go index 9b9125085..8ebde83cd 100644 --- a/command/acl/authmethod/update/authmethod_update_test.go +++ b/command/acl/authmethod/update/authmethod_update_test.go @@ -42,7 +42,7 @@ func TestAuthMethodUpdateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -179,7 +179,7 @@ func TestAuthMethodUpdateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -270,7 +270,7 @@ func TestAuthMethodUpdateCommand_noMerge(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -374,7 +374,7 @@ func TestAuthMethodUpdateCommand_k8s(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -612,7 +612,7 @@ func TestAuthMethodUpdateCommand_k8s_noMerge(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -802,7 +802,7 @@ func TestAuthMethodUpdateCommand_config(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/bindingrule/create/bindingrule_create_test.go b/command/acl/bindingrule/create/bindingrule_create_test.go index 6a48096e6..60744954b 100644 --- a/command/acl/bindingrule/create/bindingrule_create_test.go +++ b/command/acl/bindingrule/create/bindingrule_create_test.go @@ -36,7 +36,7 @@ func TestBindingRuleCreateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -187,7 +187,7 @@ func TestBindingRuleCreateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/bindingrule/delete/bindingrule_delete_test.go b/command/acl/bindingrule/delete/bindingrule_delete_test.go index d2ce8d6ee..1c05cd610 100644 --- a/command/acl/bindingrule/delete/bindingrule_delete_test.go +++ b/command/acl/bindingrule/delete/bindingrule_delete_test.go @@ -35,7 +35,7 @@ func TestBindingRuleDeleteCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/bindingrule/list/bindingrule_list_test.go b/command/acl/bindingrule/list/bindingrule_list_test.go index fedf2cda9..55d75cfa3 100644 --- a/command/acl/bindingrule/list/bindingrule_list_test.go +++ b/command/acl/bindingrule/list/bindingrule_list_test.go @@ -37,7 +37,7 @@ func TestBindingRuleListCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/bindingrule/read/bindingrule_read_test.go b/command/acl/bindingrule/read/bindingrule_read_test.go index eeab59ff9..fcb55785d 100644 --- a/command/acl/bindingrule/read/bindingrule_read_test.go +++ b/command/acl/bindingrule/read/bindingrule_read_test.go @@ -36,7 +36,7 @@ func TestBindingRuleReadCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/bindingrule/update/bindingrule_update_test.go b/command/acl/bindingrule/update/bindingrule_update_test.go index 3be2f162b..1ec873630 100644 --- a/command/acl/bindingrule/update/bindingrule_update_test.go +++ b/command/acl/bindingrule/update/bindingrule_update_test.go @@ -37,7 +37,7 @@ func TestBindingRuleUpdateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -478,7 +478,7 @@ func TestBindingRuleUpdateCommand_noMerge(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/policy/create/policy_create_test.go b/command/acl/policy/create/policy_create_test.go index 50fa4d569..8632ca228 100644 --- a/command/acl/policy/create/policy_create_test.go +++ b/command/acl/policy/create/policy_create_test.go @@ -37,7 +37,7 @@ func TestPolicyCreateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -78,7 +78,7 @@ func TestPolicyCreateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/policy/delete/policy_delete_test.go b/command/acl/policy/delete/policy_delete_test.go index 1548bb7d1..3057d8000 100644 --- a/command/acl/policy/delete/policy_delete_test.go +++ b/command/acl/policy/delete/policy_delete_test.go @@ -33,7 +33,7 @@ func TestPolicyDeleteCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/policy/list/policy_list_test.go b/command/acl/policy/list/policy_list_test.go index 4784a46cb..208c7a82e 100644 --- a/command/acl/policy/list/policy_list_test.go +++ b/command/acl/policy/list/policy_list_test.go @@ -34,7 +34,7 @@ func TestPolicyListCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -89,7 +89,7 @@ func TestPolicyListCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/policy/read/policy_read_test.go b/command/acl/policy/read/policy_read_test.go index b36528719..34d35e177 100644 --- a/command/acl/policy/read/policy_read_test.go +++ b/command/acl/policy/read/policy_read_test.go @@ -34,7 +34,7 @@ func TestPolicyReadCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -98,7 +98,7 @@ func TestPolicyReadCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/policy/update/policy_update_test.go b/command/acl/policy/update/policy_update_test.go index a2f57e865..164eba699 100644 --- a/command/acl/policy/update/policy_update_test.go +++ b/command/acl/policy/update/policy_update_test.go @@ -37,7 +37,7 @@ func TestPolicyUpdateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -88,7 +88,7 @@ func TestPolicyUpdateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/role/create/role_create_test.go b/command/acl/role/create/role_create_test.go index fb9decc9e..b7a31add4 100644 --- a/command/acl/role/create/role_create_test.go +++ b/command/acl/role/create/role_create_test.go @@ -33,7 +33,7 @@ func TestRoleCreateCommand_Pretty(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -126,7 +126,7 @@ func TestRoleCreateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/role/delete/role_delete_test.go b/command/acl/role/delete/role_delete_test.go index 22e7182db..a1b941cf6 100644 --- a/command/acl/role/delete/role_delete_test.go +++ b/command/acl/role/delete/role_delete_test.go @@ -32,7 +32,7 @@ func TestRoleDeleteCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/role/list/role_list_test.go b/command/acl/role/list/role_list_test.go index 5de61a10b..60803c8da 100644 --- a/command/acl/role/list/role_list_test.go +++ b/command/acl/role/list/role_list_test.go @@ -35,7 +35,7 @@ func TestRoleListCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -93,7 +93,7 @@ func TestRoleListCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/role/read/role_read_test.go b/command/acl/role/read/role_read_test.go index 20946c60e..8751d4b44 100644 --- a/command/acl/role/read/role_read_test.go +++ b/command/acl/role/read/role_read_test.go @@ -35,7 +35,7 @@ func TestRoleReadCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -203,7 +203,7 @@ func TestRoleReadCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/role/update/role_update_test.go b/command/acl/role/update/role_update_test.go index e3b8ecfa5..ebc49945b 100644 --- a/command/acl/role/update/role_update_test.go +++ b/command/acl/role/update/role_update_test.go @@ -35,7 +35,7 @@ func TestRoleUpdateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -209,7 +209,7 @@ func TestRoleUpdateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -292,7 +292,7 @@ func TestRoleUpdateCommand_noMerge(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/rules/translate_test.go b/command/acl/rules/translate_test.go index 73fb34693..830cb2403 100644 --- a/command/acl/rules/translate_test.go +++ b/command/acl/rules/translate_test.go @@ -35,7 +35,7 @@ func TestRulesTranslateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/clone/token_clone_test.go b/command/acl/token/clone/token_clone_test.go index a198597f7..4d5c86b37 100644 --- a/command/acl/token/clone/token_clone_test.go +++ b/command/acl/token/clone/token_clone_test.go @@ -70,7 +70,7 @@ func TestTokenCloneCommand_Pretty(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -178,7 +178,7 @@ func TestTokenCloneCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/create/token_create_test.go b/command/acl/token/create/token_create_test.go index e35866cb4..f3174988f 100644 --- a/command/acl/token/create/token_create_test.go +++ b/command/acl/token/create/token_create_test.go @@ -33,7 +33,7 @@ func TestTokenCreateCommand_Pretty(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -131,7 +131,7 @@ func TestTokenCreateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/delete/token_delete_test.go b/command/acl/token/delete/token_delete_test.go index 4aca85830..36a1d521c 100644 --- a/command/acl/token/delete/token_delete_test.go +++ b/command/acl/token/delete/token_delete_test.go @@ -33,7 +33,7 @@ func TestTokenDeleteCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/list/token_list_test.go b/command/acl/token/list/token_list_test.go index 21ceffa1a..ba6d3949c 100644 --- a/command/acl/token/list/token_list_test.go +++ b/command/acl/token/list/token_list_test.go @@ -35,7 +35,7 @@ func TestTokenListCommand_Pretty(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -90,7 +90,7 @@ func TestTokenListCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/read/token_read_test.go b/command/acl/token/read/token_read_test.go index dd41a736e..c74c5eec9 100644 --- a/command/acl/token/read/token_read_test.go +++ b/command/acl/token/read/token_read_test.go @@ -35,7 +35,7 @@ func TestTokenReadCommand_Pretty(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -83,7 +83,7 @@ func TestTokenReadCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/update/token_update_test.go b/command/acl/token/update/token_update_test.go index 0e3547e1e..924e6052c 100644 --- a/command/acl/token/update/token_update_test.go +++ b/command/acl/token/update/token_update_test.go @@ -34,7 +34,7 @@ func TestTokenUpdateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -166,7 +166,7 @@ func TestTokenUpdateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/catalog/helpers_oss.go b/command/catalog/helpers_oss.go index 1469bfd3c..bc70b3358 100644 --- a/command/catalog/helpers_oss.go +++ b/command/catalog/helpers_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package catalog diff --git a/command/config/write/config_write_test.go b/command/config/write/config_write_test.go index 5e145b51f..0ff5152c9 100644 --- a/command/config/write/config_write_test.go +++ b/command/config/write/config_write_test.go @@ -791,6 +791,7 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "service-router" name = "main" + partition = "pepper" meta { "foo" = "bar" "gir" = "zim" @@ -830,12 +831,13 @@ func TestParseConfigEntry(t *testing.T) { } } destination { - service = "carrot" - service_subset = "kale" - namespace = "leek" - prefix_rewrite = "/alternate" - request_timeout = "99s" - num_retries = 12345 + service = "carrot" + service_subset = "kale" + namespace = "leek" + partition = "chard" + prefix_rewrite = "/alternate" + request_timeout = "99s" + num_retries = 12345 retry_on_connect_failure = true retry_on_status_codes = [401, 209] } @@ -874,6 +876,7 @@ func TestParseConfigEntry(t *testing.T) { camel: ` Kind = "service-router" Name = "main" + Partition = "pepper" Meta { "foo" = "bar" "gir" = "zim" @@ -916,6 +919,7 @@ func TestParseConfigEntry(t *testing.T) { Service = "carrot" ServiceSubset = "kale" Namespace = "leek" + Partition = "chard" PrefixRewrite = "/alternate" RequestTimeout = "99s" NumRetries = 12345 @@ -958,6 +962,7 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "service-router", "name": "main", + "partition": "pepper", "meta" : { "foo": "bar", "gir": "zim" @@ -1000,6 +1005,7 @@ func TestParseConfigEntry(t *testing.T) { "service": "carrot", "service_subset": "kale", "namespace": "leek", + "partition": "chard", "prefix_rewrite": "/alternate", "request_timeout": "99s", "num_retries": 12345, @@ -1049,6 +1055,7 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "service-router", "Name": "main", + "Partition": "pepper", "Meta" : { "foo": "bar", "gir": "zim" @@ -1091,6 +1098,7 @@ func TestParseConfigEntry(t *testing.T) { "Service": "carrot", "ServiceSubset": "kale", "Namespace": "leek", + "Partition": "chard", "PrefixRewrite": "/alternate", "RequestTimeout": "99s", "NumRetries": 12345, @@ -1137,8 +1145,9 @@ func TestParseConfigEntry(t *testing.T) { } `, expect: &api.ServiceRouterConfigEntry{ - Kind: "service-router", - Name: "main", + Kind: "service-router", + Name: "main", + Partition: "pepper", Meta: map[string]string{ "foo": "bar", "gir": "zim", @@ -1181,6 +1190,7 @@ func TestParseConfigEntry(t *testing.T) { Service: "carrot", ServiceSubset: "kale", Namespace: "leek", + Partition: "chard", PrefixRewrite: "/alternate", RequestTimeout: 99 * time.Second, NumRetries: 12345, @@ -1225,6 +1235,7 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "service-splitter" name = "main" + partition = "east" meta { "foo" = "bar" "gir" = "zim" @@ -1242,12 +1253,14 @@ func TestParseConfigEntry(t *testing.T) { weight = 0.9 service = "other" namespace = "alt" + partition = "west" }, ] `, camel: ` Kind = "service-splitter" Name = "main" + Partition = "east" Meta { "foo" = "bar" "gir" = "zim" @@ -1265,6 +1278,7 @@ func TestParseConfigEntry(t *testing.T) { Weight = 0.9 Service = "other" Namespace = "alt" + Partition = "west" }, ] `, @@ -1272,6 +1286,7 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "service-splitter", "name": "main", + "partition": "east", "meta" : { "foo": "bar", "gir": "zim" @@ -1288,7 +1303,8 @@ func TestParseConfigEntry(t *testing.T) { { "weight": 0.9, "service": "other", - "namespace": "alt" + "namespace": "alt", + "partition": "west" } ] } @@ -1297,6 +1313,7 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "service-splitter", "Name": "main", + "Partition": "east", "Meta" : { "foo": "bar", "gir": "zim" @@ -1313,14 +1330,16 @@ func TestParseConfigEntry(t *testing.T) { { "Weight": 0.9, "Service": "other", - "Namespace": "alt" + "Namespace": "alt", + "Partition": "west" } ] } `, expect: &api.ServiceSplitterConfigEntry{ - Kind: api.ServiceSplitter, - Name: "main", + Kind: api.ServiceSplitter, + Name: "main", + Partition: "east", Meta: map[string]string{ "foo": "bar", "gir": "zim", @@ -1338,6 +1357,7 @@ func TestParseConfigEntry(t *testing.T) { Weight: 0.9, Service: "other", Namespace: "alt", + Partition: "west", }, }, }, @@ -1512,20 +1532,24 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "service-resolver" name = "main" + partition = "east" redirect { service = "other" service_subset = "backup" namespace = "alt" + partition = "west" datacenter = "dc9" } `, camel: ` Kind = "service-resolver" Name = "main" + Partition = "east" Redirect { Service = "other" ServiceSubset = "backup" Namespace = "alt" + Partition = "west" Datacenter = "dc9" } `, @@ -1533,10 +1557,12 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "service-resolver", "name": "main", + "partition": "east", "redirect": { "service": "other", "service_subset": "backup", "namespace": "alt", + "partition": "west", "datacenter": "dc9" } } @@ -1545,21 +1571,25 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "service-resolver", "Name": "main", + "Partition": "east", "Redirect": { "Service": "other", "ServiceSubset": "backup", "Namespace": "alt", + "Partition": "west", "Datacenter": "dc9" } } `, expect: &api.ServiceResolverConfigEntry{ - Kind: "service-resolver", - Name: "main", + Kind: "service-resolver", + Name: "main", + Partition: "east", Redirect: &api.ServiceResolverRedirect{ Service: "other", ServiceSubset: "backup", Namespace: "alt", + Partition: "west", Datacenter: "dc9", }, }, @@ -2722,9 +2752,9 @@ func TestParseConfigEntry(t *testing.T) { }, }, { - name: "partition-exports", + name: "exported-services", snake: ` - kind = "partition-exports" + kind = "exported-services" name = "foo" meta { "foo" = "bar" @@ -2755,7 +2785,7 @@ func TestParseConfigEntry(t *testing.T) { ] `, camel: ` - Kind = "partition-exports" + Kind = "exported-services" Name = "foo" Meta { "foo" = "bar" @@ -2787,7 +2817,7 @@ func TestParseConfigEntry(t *testing.T) { `, snakeJSON: ` { - "kind": "partition-exports", + "kind": "exported-services", "name": "foo", "meta": { "foo": "bar", @@ -2820,7 +2850,7 @@ func TestParseConfigEntry(t *testing.T) { `, camelJSON: ` { - "Kind": "partition-exports", + "Kind": "exported-services", "Name": "foo", "Meta": { "foo": "bar", @@ -2851,7 +2881,7 @@ func TestParseConfigEntry(t *testing.T) { ] } `, - expect: &api.PartitionExportsConfigEntry{ + expect: &api.ExportedServicesConfigEntry{ Name: "foo", Meta: map[string]string{ "foo": "bar", diff --git a/command/connect/envoy/envoy_oss_test.go b/command/connect/envoy/envoy_oss_test.go index 3c519cd46..6bcd5581d 100644 --- a/command/connect/envoy/envoy_oss_test.go +++ b/command/connect/envoy/envoy_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package envoy diff --git a/command/connect/envoy/exec_test.go b/command/connect/envoy/exec_test.go index 5ee29d147..e381a488b 100644 --- a/command/connect/envoy/exec_test.go +++ b/command/connect/envoy/exec_test.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin package envoy diff --git a/command/connect/envoy/exec_unix.go b/command/connect/envoy/exec_unix.go index e64a0098a..9ab83eecf 100644 --- a/command/connect/envoy/exec_unix.go +++ b/command/connect/envoy/exec_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin package envoy diff --git a/command/connect/envoy/exec_unsupported.go b/command/connect/envoy/exec_unsupported.go index 92771b443..d22b4c8cd 100644 --- a/command/connect/envoy/exec_unsupported.go +++ b/command/connect/envoy/exec_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !darwin // +build !linux,!darwin package envoy diff --git a/command/connect/expose/expose.go b/command/connect/expose/expose.go index ae84b2d7d..062a01afb 100644 --- a/command/connect/expose/expose.go +++ b/command/connect/expose/expose.go @@ -5,12 +5,12 @@ import ( "fmt" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/agent" - "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/flags" "github.com/hashicorp/consul/command/intention" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { @@ -36,12 +36,12 @@ type cmd struct { func (c *cmd) init() { c.flags = flag.NewFlagSet("", flag.ContinueOnError) c.flags.StringVar(&c.ingressGateway, "ingress-gateway", "", - "(Required) The name of the ingress gateway service to use. A namespace "+ - "can optionally be specified as a prefix via the 'namespace/service' format.") + "(Required) The name of the ingress gateway service to use. Namespace and partition "+ + "can optionally be specified as a prefix via the 'partition/namespace/service' format.") c.flags.StringVar(&c.service, "service", "", - "(Required) The name of destination service to expose. A namespace "+ - "can optionally be specified as a prefix via the 'namespace/service' format.") + "(Required) The name of destination service to expose. Namespace and partition "+ + "can optionally be specified as a prefix via the 'partition/namespace/service' format.") c.flags.IntVar(&c.port, "port", 0, "(Required) The listener port to use for the service on the Ingress gateway.") @@ -79,7 +79,7 @@ func (c *cmd) Run(args []string) int { c.UI.Error("A service name must be given via the -service flag.") return 1 } - svc, svcNamespace, err := intention.ParseIntentionTarget(c.service) + svc, svcNS, svcPart, err := intention.ParseIntentionTarget(c.service) if err != nil { c.UI.Error(fmt.Sprintf("Invalid service name: %s", err)) return 1 @@ -89,7 +89,7 @@ func (c *cmd) Run(args []string) int { c.UI.Error("An ingress gateway service must be given via the -ingress-gateway flag.") return 1 } - gateway, gatewayNamespace, err := intention.ParseIntentionTarget(c.ingressGateway) + gateway, gatewayNS, gatewayPart, err := intention.ParseIntentionTarget(c.ingressGateway) if err != nil { c.UI.Error(fmt.Sprintf("Invalid ingress gateway name: %s", err)) return 1 @@ -102,7 +102,9 @@ func (c *cmd) Run(args []string) int { // First get the config entry for the ingress gateway, if it exists. Don't error if it's a 404 as that // just means we'll need to create a new config entry. - conf, _, err := client.ConfigEntries().Get(api.IngressGateway, gateway, nil) + conf, _, err := client.ConfigEntries().Get( + api.IngressGateway, gateway, &api.QueryOptions{Partition: gatewayPart, Namespace: gatewayNS}, + ) if err != nil && !strings.Contains(err.Error(), agent.ConfigEntryNotFoundErr) { c.UI.Error(fmt.Sprintf("Error fetching existing ingress gateway configuration: %s", err)) return 1 @@ -111,7 +113,8 @@ func (c *cmd) Run(args []string) int { conf = &api.IngressGatewayConfigEntry{ Kind: api.IngressGateway, Name: gateway, - Namespace: gatewayNamespace, + Namespace: gatewayNS, + Partition: gatewayPart, } } @@ -127,7 +130,8 @@ func (c *cmd) Run(args []string) int { serviceIdx := -1 newService := api.IngressService{ Name: svc, - Namespace: svcNamespace, + Namespace: svcNS, + Partition: svcPart, Hosts: c.hosts, } for i, listener := range ingressConf.Listeners { @@ -145,7 +149,7 @@ func (c *cmd) Run(args []string) int { // Make sure the service isn't already exposed in this gateway for j, service := range listener.Services { - if service.Name == svc && namespaceMatch(service.Namespace, svcNamespace) { + if service.Name == svc && entMetaMatch(service.Namespace, service.Partition, svcNS, svcPart) { serviceIdx = j c.UI.Output(fmt.Sprintf("Updating service definition for %q on listener with port %d", c.service, listener.Port)) break @@ -170,7 +174,7 @@ func (c *cmd) Run(args []string) int { // Write the updated config entry using a check-and-set, so it fails if the entry // has been changed since we looked it up. - succeeded, _, err := client.ConfigEntries().CAS(ingressConf, ingressConf.GetModifyIndex(), nil) + succeeded, _, err := client.ConfigEntries().CAS(ingressConf, ingressConf.GetModifyIndex(), &api.WriteOptions{Partition: gatewayPart, Namespace: gatewayNS}) if err != nil { c.UI.Error(fmt.Sprintf("Error writing ingress config entry: %v", err)) return 1 @@ -194,12 +198,14 @@ func (c *cmd) Run(args []string) int { // Add the intention between the gateway service and the destination. ixn := &api.Intention{ - SourceName: gateway, - SourceNS: gatewayNamespace, - DestinationName: svc, - DestinationNS: svcNamespace, - SourceType: api.IntentionSourceConsul, - Action: api.IntentionActionAllow, + SourceName: gateway, + SourceNS: gatewayNS, + SourcePartition: gatewayPart, + DestinationName: svc, + DestinationNS: svcNS, + DestinationPartition: svcPart, + SourceType: api.IntentionSourceConsul, + Action: api.IntentionActionAllow, } if _, err = client.Connect().IntentionUpsert(ixn, nil); err != nil { c.UI.Error(fmt.Sprintf("Error upserting intention: %s", err)) @@ -210,17 +216,21 @@ func (c *cmd) Run(args []string) int { return 0 } -func namespaceMatch(a, b string) bool { - namespaceA := a - namespaceB := b - if namespaceA == "" { - namespaceA = structs.IntentionDefaultNamespace +func entMetaMatch(nsA, partitionA, nsB, partitionB string) bool { + if nsA == "" { + nsA = api.IntentionDefaultNamespace } - if namespaceB == "" { - namespaceB = structs.IntentionDefaultNamespace + if partitionA == "" { + partitionA = api.PartitionDefaultName + } + if nsB == "" { + nsB = api.IntentionDefaultNamespace + } + if partitionB == "" { + partitionB = api.PartitionDefaultName } - return namespaceA == namespaceB + return strings.EqualFold(partitionA, partitionB) && strings.EqualFold(nsA, nsB) } func (c *cmd) Synopsis() string { diff --git a/command/connect/expose/expose_test.go b/command/connect/expose/expose_test.go index f2984c3ac..a30b52d66 100644 --- a/command/connect/expose/expose_test.go +++ b/command/connect/expose/expose_test.go @@ -43,6 +43,7 @@ func TestConnectExpose(t *testing.T) { entry, _, err := client.ConfigEntries().Get(api.IngressGateway, "ingress", nil) require.NoError(err) ns := entry.(*api.IngressGatewayConfigEntry).Namespace + ap := entry.(*api.IngressGatewayConfigEntry).Partition expected := &api.IngressGatewayConfigEntry{ Kind: api.IngressGateway, Name: "ingress", @@ -55,6 +56,7 @@ func TestConnectExpose(t *testing.T) { { Name: "foo", Namespace: ns, + Partition: ap, }, }, }, @@ -95,6 +97,7 @@ func TestConnectExpose(t *testing.T) { { Name: "foo", Namespace: ns, + Partition: ap, }, }, }) @@ -283,6 +286,7 @@ func TestConnectExpose_existingConfig(t *testing.T) { ingressConf.Namespace = entryConf.Namespace for i, listener := range ingressConf.Listeners { listener.Services[0].Namespace = entryConf.Listeners[i].Services[0].Namespace + listener.Services[0].Partition = entryConf.Listeners[i].Services[0].Partition } ingressConf.CreateIndex = entry.GetCreateIndex() ingressConf.ModifyIndex = entry.GetModifyIndex() @@ -317,6 +321,7 @@ func TestConnectExpose_existingConfig(t *testing.T) { ingressConf.Listeners[1].Services = append(ingressConf.Listeners[1].Services, api.IngressService{ Name: "zoo", Namespace: entryConf.Listeners[1].Services[1].Namespace, + Partition: entryConf.Listeners[1].Services[1].Partition, Hosts: []string{"foo.com", "foo.net"}, }) ingressConf.CreateIndex = entry.GetCreateIndex() diff --git a/command/connect/redirecttraffic/redirect_traffic.go b/command/connect/redirecttraffic/redirect_traffic.go index 8d79e7599..6436c98cc 100644 --- a/command/connect/redirecttraffic/redirect_traffic.go +++ b/command/connect/redirecttraffic/redirect_traffic.go @@ -36,6 +36,7 @@ type cmd struct { client *api.Client // Flags. + consulDNSIP string proxyUID string proxyID string proxyInboundPort int @@ -50,6 +51,7 @@ type cmd struct { func (c *cmd) init() { c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.flags.StringVar(&c.consulDNSIP, "consul-dns-ip", "", "IP used to reach Consul DNS. If provided, DNS queries will be redirected to Consul.") c.flags.StringVar(&c.proxyUID, "proxy-uid", "", "The user ID of the proxy to exclude from traffic redirection.") c.flags.StringVar(&c.proxyID, "proxy-id", "", "The service ID of the proxy service registered with Consul.") c.flags.IntVar(&c.proxyInboundPort, "proxy-inbound-port", 0, "The inbound port that the proxy is listening on.") @@ -130,6 +132,7 @@ type trafficRedirectProxyConfig struct { // generateConfigFromFlags generates iptables.Config based on command flags. func (c *cmd) generateConfigFromFlags() (iptables.Config, error) { cfg := iptables.Config{ + ConsulDNSIP: c.consulDNSIP, ProxyUserID: c.proxyUID, ProxyInboundPort: c.proxyInboundPort, ProxyOutboundPort: c.proxyOutboundPort, diff --git a/command/connect/redirecttraffic/redirect_traffic_test.go b/command/connect/redirecttraffic/redirect_traffic_test.go index 5c31af036..e2ccf1c42 100644 --- a/command/connect/redirecttraffic/redirect_traffic_test.go +++ b/command/connect/redirecttraffic/redirect_traffic_test.go @@ -127,6 +127,35 @@ func TestGenerateConfigFromFlags(t *testing.T) { ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, }, }, + { + name: "proxyID with Consul DNS IP provided", + command: func() cmd { + var c cmd + c.init() + c.proxyUID = "1234" + c.proxyID = "test-proxy-id" + c.consulDNSIP = "10.0.34.16" + return c + }, + consulServices: []api.AgentServiceRegistration{ + { + Kind: api.ServiceKindConnectProxy, + ID: "test-proxy-id", + Name: "test-proxy", + Port: 20000, + Address: "1.1.1.1", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "foo", + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: "10.0.34.16", + ProxyUserID: "1234", + ProxyInboundPort: 20000, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + }, + }, { name: "proxyID with bind_port(string) provided", command: func() cmd { diff --git a/command/forceleave/forceleave.go b/command/forceleave/forceleave.go index 6b3672d39..513c17556 100644 --- a/command/forceleave/forceleave.go +++ b/command/forceleave/forceleave.go @@ -6,6 +6,7 @@ import ( "github.com/mitchellh/cli" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/flags" ) @@ -21,14 +22,17 @@ type cmd struct { http *flags.HTTPFlags help string - //flags + // flags prune bool + wan bool } func (c *cmd) init() { c.flags = flag.NewFlagSet("", flag.ContinueOnError) c.flags.BoolVar(&c.prune, "prune", false, "Remove agent completely from list of members") + c.flags.BoolVar(&c.wan, "wan", false, + "Exclusively leave the agent from the WAN serf pool.") c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.PartitionFlag()) @@ -54,12 +58,10 @@ func (c *cmd) Run(args []string) int { return 1 } - if c.prune { - err = client.Agent().ForceLeavePrune(nodes[0]) - } else { - err = client.Agent().ForceLeave(nodes[0]) - } - + err = client.Agent().ForceLeaveOpts(nodes[0], api.ForceLeaveOpts{ + Prune: c.prune, + WAN: c.wan, + }) if err != nil { c.UI.Error(fmt.Sprintf("Error force leaving: %s", err)) return 1 @@ -88,4 +90,5 @@ Usage: consul force-leave [options] name time before eventually reaping them. -prune Remove agent completely from list of members + -wan Exclusively leave the agent from the WAN serf pool. ` diff --git a/command/forceleave/forceleave_test.go b/command/forceleave/forceleave_test.go index e13808854..f69059ca0 100644 --- a/command/forceleave/forceleave_test.go +++ b/command/forceleave/forceleave_test.go @@ -18,7 +18,6 @@ func TestForceLeaveCommand_noTabs(t *testing.T) { } } -// TODO(partitions): split this test and verify it works in partitions func TestForceLeaveCommand(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -62,7 +61,6 @@ func TestForceLeaveCommand(t *testing.T) { }) } -// TODO(partitions): split this test and verify it works in partitions func TestForceLeaveCommand_NoNodeWithName(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/command/intention/create/create.go b/command/intention/create/create.go index 2ecd9e60d..a890a0974 100644 --- a/command/intention/create/create.go +++ b/command/intention/create/create.go @@ -153,24 +153,26 @@ func (c *cmd) ixnsFromArgs(args []string) ([]*api.Intention, error) { return nil, fmt.Errorf("Must specify two arguments: source and destination") } - srcName, srcNamespace, err := intention.ParseIntentionTarget(args[0]) + srcName, srcNS, srcPart, err := intention.ParseIntentionTarget(args[0]) if err != nil { return nil, fmt.Errorf("Invalid intention source: %v", err) } - dstName, dstNamespace, err := intention.ParseIntentionTarget(args[1]) + dstName, dstNS, dstPart, err := intention.ParseIntentionTarget(args[1]) if err != nil { return nil, fmt.Errorf("Invalid intention destination: %v", err) } return []*api.Intention{{ - SourceNS: srcNamespace, - SourceName: srcName, - DestinationNS: dstNamespace, - DestinationName: dstName, - SourceType: api.IntentionSourceConsul, - Action: c.ixnAction(), - Meta: c.flagMeta, + SourcePartition: srcPart, + SourceNS: srcNS, + SourceName: srcName, + DestinationPartition: dstPart, + DestinationNS: dstNS, + DestinationName: dstName, + SourceType: api.IntentionSourceConsul, + Action: c.ixnAction(), + Meta: c.flagMeta, }}, nil } diff --git a/command/intention/helpers.go b/command/intention/helpers.go index c8a3823ce..c1d3a15b0 100644 --- a/command/intention/helpers.go +++ b/command/intention/helpers.go @@ -7,25 +7,28 @@ import ( "github.com/hashicorp/consul/api" ) -// ParseIntentionTarget parses a target of the form / and returns -// the two distinct parts. In some cases the namespace may be elided and this function -// will return the empty string for the namespace then. -func ParseIntentionTarget(input string) (name string, namespace string, err error) { - // Get the index to the '/'. If it doesn't exist, we have just a name - // so just set that and return. - idx := strings.IndexByte(input, '/') - if idx == -1 { - // let the agent do token based defaulting of the namespace - return input, "", nil +// ParseIntentionTarget parses a target of the form // and returns +// the distinct parts. In some cases the partition and namespace may be elided and this function +// will return the empty string for them then. +// If two parts are present, it is assumed they are namespace/name and not partition/name. +func ParseIntentionTarget(input string) (name string, ns string, partition string, err error) { + ss := strings.Split(input, "/") + switch len(ss) { + case 1: // Name only + name = ss[0] + return + case 2: // namespace/name + ns = ss[0] + name = ss[1] + return + case 3: // partition/namespace/name + partition = ss[0] + ns = ss[1] + name = ss[2] + return + default: + return "", "", "", fmt.Errorf("input can contain at most two '/'") } - - namespace = input[:idx] - name = input[idx+1:] - if strings.IndexByte(name, '/') != -1 { - return "", "", fmt.Errorf("target can contain at most one '/'") - } - - return name, namespace, nil } func GetFromArgs(client *api.Client, args []string) (*api.Intention, error) { diff --git a/command/join/join_test.go b/command/join/join_test.go index 555014a5d..48b89b32e 100644 --- a/command/join/join_test.go +++ b/command/join/join_test.go @@ -16,8 +16,7 @@ func TestJoinCommand_noTabs(t *testing.T) { } } -// TODO(partitions): split this test and verify it works in partitions -func TestJoinCommandJoin_lan(t *testing.T) { +func TestJoinCommandJoin_LAN(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } diff --git a/command/keyring/keyring.go b/command/keyring/keyring.go index acdb0476f..6c73c7429 100644 --- a/command/keyring/keyring.go +++ b/command/keyring/keyring.go @@ -5,10 +5,12 @@ import ( "fmt" "strings" + "github.com/mitchellh/cli" + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/agent/structs" consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/flags" - "github.com/mitchellh/cli" ) func New(ui cli.Ui) *cmd { @@ -185,21 +187,25 @@ func (c *cmd) Run(args []string) int { func formatResponse(response *consulapi.KeyringResponse, keys map[string]int) string { b := new(strings.Builder) b.WriteString("\n") - b.WriteString(poolName(response.Datacenter, response.WAN, response.Segment)) + b.WriteString(poolName(response.Datacenter, response.WAN, response.Partition, response.Segment)) b.WriteString(formatMessages(response.Messages)) b.WriteString(formatKeys(keys, response.NumNodes)) return strings.TrimRight(b.String(), "\n") } -func poolName(dc string, wan bool, segment string) string { +func poolName(dc string, wan bool, partition, segment string) string { pool := fmt.Sprintf("%s (LAN)", dc) if wan { pool = "WAN" } + + var suffix string if segment != "" { - segment = fmt.Sprintf(" [%s]", segment) + suffix = fmt.Sprintf(" [%s]", segment) + } else if !structs.IsDefaultPartition(partition) { + suffix = fmt.Sprintf(" [partition: %s]", partition) } - return fmt.Sprintf("%s%s:\n", pool, segment) + return fmt.Sprintf("%s%s:\n", pool, suffix) } func formatMessages(messages map[string]string) string { diff --git a/command/keyring/keyring_test.go b/command/keyring/keyring_test.go index a9f6d396f..166129fe0 100644 --- a/command/keyring/keyring_test.go +++ b/command/keyring/keyring_test.go @@ -4,10 +4,11 @@ import ( "strings" "testing" - "github.com/hashicorp/consul/agent" - consulapi "github.com/hashicorp/consul/api" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent" + consulapi "github.com/hashicorp/consul/api" ) func TestKeyringCommand_noTabs(t *testing.T) { @@ -195,9 +196,8 @@ func removeKey(t *testing.T, addr string, key string) { } func TestKeyringCommand_poolName(t *testing.T) { - require.Equal(t, "dc1 (LAN):\n", poolName("dc1", false, "")) - require.Equal(t, "dc1 (LAN) [segment1]:\n", poolName("dc1", false, "segment1")) - require.Equal(t, "WAN:\n", poolName("dc1", true, "")) + require.Equal(t, "dc1 (LAN):\n", poolName("dc1", false, "", "")) + require.Equal(t, "WAN:\n", poolName("dc1", true, "", "")) } func TestKeyringCommand_formatKeys(t *testing.T) { diff --git a/command/kv/get/kv_get.go b/command/kv/get/kv_get.go index 136202b6c..099aedb9f 100644 --- a/command/kv/get/kv_get.go +++ b/command/kv/get/kv_get.go @@ -199,6 +199,9 @@ func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool) error { } else { fmt.Fprintf(tw, "Session\t%s\n", pair.Session) } + if pair.Partition != "" { + fmt.Fprintf(tw, "Partition\t%s\n", pair.Partition) + } if pair.Namespace != "" { fmt.Fprintf(tw, "Namespace\t%s\n", pair.Namespace) } diff --git a/command/kv/impexp/kvimpexp.go b/command/kv/impexp/kvimpexp.go index ed1472785..4f4c7e87e 100644 --- a/command/kv/impexp/kvimpexp.go +++ b/command/kv/impexp/kvimpexp.go @@ -11,6 +11,7 @@ type Entry struct { Flags uint64 `json:"flags"` Value string `json:"value"` Namespace string `json:"namespace,omitempty"` + Partition string `json:"partition,omitempty"` } func ToEntry(pair *api.KVPair) *Entry { @@ -19,5 +20,6 @@ func ToEntry(pair *api.KVPair) *Entry { Flags: pair.Flags, Value: base64.StdEncoding.EncodeToString(pair.Value), Namespace: pair.Namespace, + Partition: pair.Partition, } } diff --git a/command/lock/util_unix.go b/command/lock/util_unix.go index 99ba9247c..cc6902ff5 100644 --- a/command/lock/util_unix.go +++ b/command/lock/util_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package lock diff --git a/command/lock/util_windows.go b/command/lock/util_windows.go index b7d2c133c..14547e345 100644 --- a/command/lock/util_windows.go +++ b/command/lock/util_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package lock diff --git a/command/login/login_oss.go b/command/login/login_oss.go index c2fce854f..22466d56f 100644 --- a/command/login/login_oss.go +++ b/command/login/login_oss.go @@ -1,4 +1,5 @@ -//+build !consulent +//go:build !consulent +// +build !consulent package login diff --git a/command/login/login_test.go b/command/login/login_test.go index 8c9309b25..3d730548d 100644 --- a/command/login/login_test.go +++ b/command/login/login_test.go @@ -8,18 +8,18 @@ import ( "testing" "time" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + "gopkg.in/square/go-jose.v2/jwt" + "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/agent/consul/authmethod/kubeauth" "github.com/hashicorp/consul/agent/consul/authmethod/testauth" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/acl" "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" - "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" - "github.com/mitchellh/cli" - "github.com/stretchr/testify/require" - "gopkg.in/square/go-jose.v2/jwt" ) func TestLoginCommand_noTabs(t *testing.T) { @@ -44,7 +44,7 @@ func TestLoginCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -241,7 +241,7 @@ func TestLoginCommand_k8s(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -339,7 +339,7 @@ func TestLoginCommand_jwt(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -352,7 +352,7 @@ func TestLoginCommand_jwt(t *testing.T) { bearerTokenFile := filepath.Join(testDir, "bearer.token") // spin up a fake oidc server - oidcServer := startSSOTestServer(t) + oidcServer := oidcauthtest.Start(t) pubKey, privKey := oidcServer.SigningKeys() type mConfig = map[string]interface{} @@ -470,11 +470,3 @@ func TestLoginCommand_jwt(t *testing.T) { }) } } - -func startSSOTestServer(t *testing.T) *oidcauthtest.Server { - ports := freeport.MustTake(1) - return oidcauthtest.Start(t, oidcauthtest.WithPort( - ports[0], - func() { freeport.Return(ports) }, - )) -} diff --git a/command/logout/logout_test.go b/command/logout/logout_test.go index f0810a7a2..82e82bc03 100644 --- a/command/logout/logout_test.go +++ b/command/logout/logout_test.go @@ -35,7 +35,7 @@ func TestLogoutCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -165,7 +165,7 @@ func TestLogoutCommand_k8s(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/members/members_test.go b/command/members/members_test.go index 7f8a6479d..cc4a21742 100644 --- a/command/members/members_test.go +++ b/command/members/members_test.go @@ -1,6 +1,7 @@ package members import ( + "encoding/csv" "fmt" "math/rand" "sort" @@ -175,6 +176,35 @@ func TestMembersCommand_verticalBar(t *testing.T) { } } +func decodeOutput(t *testing.T, data string) []map[string]string { + r := csv.NewReader(strings.NewReader(data)) + r.Comma = ' ' + r.TrimLeadingSpace = true + + lines, err := r.ReadAll() + require.NoError(t, err) + if len(lines) < 2 { + return nil + } + + var out []map[string]string + for i := 1; i < len(lines); i++ { + m := zip(t, lines[0], lines[i]) + out = append(out, m) + } + return out +} + +func zip(t *testing.T, k, v []string) map[string]string { + require.Equal(t, len(k), len(v)) + + m := make(map[string]string) + for i := 0; i < len(k); i++ { + m[k[i]] = v[i] + } + return m +} + func TestSortByMemberNamePartitionAndSegment(t *testing.T) { lib.SeedMathRand() diff --git a/command/snapshot/save/snapshot_save_test.go b/command/snapshot/save/snapshot_save_test.go index b1b57289a..79df0dfc6 100644 --- a/command/snapshot/save/snapshot_save_test.go +++ b/command/snapshot/save/snapshot_save_test.go @@ -5,19 +5,20 @@ import ( "fmt" "io/ioutil" "net/http" + "net/http/httptest" "os" "path/filepath" "strings" "sync/atomic" "testing" - "github.com/hashicorp/consul/agent" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" ) func TestSnapshotSaveCommand_noTabs(t *testing.T) { @@ -138,7 +139,7 @@ func TestSnapshotSaveCommand_TruncatedStream(t *testing.T) { var fakeResult atomic.Value // Run a fake webserver to pretend to be the snapshot API. - fakeAddr := lib.StartTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { if req.URL.Path != "/v1/snapshot" { w.WriteHeader(http.StatusNotFound) return @@ -157,10 +158,11 @@ func TestSnapshotSaveCommand_TruncatedStream(t *testing.T) { data := raw.([]byte) _, _ = w.Write(data) })) + t.Cleanup(srv.Close) // Wait until the server is actually listening. retry.Run(t, func(r *retry.R) { - resp, err := http.Get("http://" + fakeAddr + "/not-real") + resp, err := srv.Client().Get(srv.URL + "/not-real") require.NoError(r, err) require.Equal(r, http.StatusNotFound, resp.StatusCode) }) @@ -179,7 +181,7 @@ func TestSnapshotSaveCommand_TruncatedStream(t *testing.T) { file := filepath.Join(dir, "backup.tgz") args := []string{ - "-http-addr=" + fakeAddr, // point to the fake + "-http-addr=" + srv.Listener.Addr().String(), // point to the fake file, } diff --git a/connect/proxy/listener_test.go b/connect/proxy/listener_test.go index 83cb39987..8a3006b69 100644 --- a/connect/proxy/listener_test.go +++ b/connect/proxy/listener_test.go @@ -112,15 +112,13 @@ func TestPublicListener(t *testing.T) { // Can't enable t.Parallel since we rely on the global metrics instance. ca := agConnect.TestCA(t, nil) - ports := freeport.MustTake(1) - defer freeport.Return(ports) - testApp := NewTestTCPServer(t) defer testApp.Close() + port := freeport.GetOne(t) cfg := PublicListenerConfig{ BindAddress: "127.0.0.1", - BindPort: ports[0], + BindPort: port, LocalServiceAddress: testApp.Addr().String(), HandshakeTimeoutMs: 100, LocalConnectTimeoutMs: 100, @@ -144,7 +142,7 @@ func TestPublicListener(t *testing.T) { // Proxy and backend are running, play the part of a TLS client using same // cert for now. conn, err := svc.Dial(context.Background(), &connect.StaticResolver{ - Addr: TestLocalAddr(ports[0]), + Addr: TestLocalAddr(port), CertURI: agConnect.TestSpiffeIDService(t, "db"), }) require.NoError(t, err) @@ -166,9 +164,6 @@ func TestUpstreamListener(t *testing.T) { // Can't enable t.Parallel since we rely on the global metrics instance. ca := agConnect.TestCA(t, nil) - ports := freeport.MustTake(1) - defer freeport.Return(ports) - // Run a test server that we can dial. testSvr := connect.NewTestServer(t, "db", ca) go func() { @@ -184,7 +179,7 @@ func TestUpstreamListener(t *testing.T) { DestinationName: "db", Config: map[string]interface{}{"connect_timeout_ms": 100}, LocalBindAddress: "localhost", - LocalBindPort: ports[0], + LocalBindPort: freeport.GetOne(t), } // Setup metrics to test they are recorded diff --git a/connect/proxy/proxy_test.go b/connect/proxy/proxy_test.go index 46274de9b..904e9c7c4 100644 --- a/connect/proxy/proxy_test.go +++ b/connect/proxy/proxy_test.go @@ -27,8 +27,7 @@ func TestProxy_public(t *testing.T) { t.Skip("too slow for testing.Short") } - ports := freeport.MustTake(2) - defer freeport.Return(ports) + ports := freeport.GetN(t, 2) a := agent.NewTestAgent(t, "") defer a.Shutdown() diff --git a/connect/proxy/testing.go b/connect/proxy/testing.go index cad243478..db27581d0 100644 --- a/connect/proxy/testing.go +++ b/connect/proxy/testing.go @@ -8,10 +8,10 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/consul/connect" - "github.com/hashicorp/consul/sdk/freeport" "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/connect" ) // TestLocalAddr makes a localhost address on the given port @@ -24,24 +24,17 @@ type TestTCPServer struct { l net.Listener stopped int32 accepted, closed, active int32 - returnPortsFn func() } // NewTestTCPServer opens as a listening socket on the given address and returns // a TestTCPServer serving requests to it. The server is already started and can // be stopped by calling Close(). func NewTestTCPServer(t testing.T) *TestTCPServer { - ports := freeport.MustTake(1) - addr := TestLocalAddr(ports[0]) - - l, err := net.Listen("tcp", addr) + l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - log.Printf("test tcp server listening on %s", addr) - s := &TestTCPServer{ - l: l, - returnPortsFn: func() { freeport.Return(ports) }, - } + log.Printf("test tcp server listening on %s", l.Addr()) + s := &TestTCPServer{l: l} go s.accept() return s @@ -53,10 +46,6 @@ func (s *TestTCPServer) Close() { if s.l != nil { s.l.Close() } - if s.returnPortsFn != nil { - s.returnPortsFn() - s.returnPortsFn = nil - } } // Addr returns the address that this server is listening on. diff --git a/connect/testing.go b/connect/testing.go index 30a517b61..d054c0dee 100644 --- a/connect/testing.go +++ b/connect/testing.go @@ -96,24 +96,21 @@ type TestServer struct { // Listening is closed when the listener is run. Listening chan struct{} - l net.Listener - returnPortsFn func() - stopFlag int32 - stopChan chan struct{} + l net.Listener + stopFlag int32 + stopChan chan struct{} } // NewTestServer returns a TestServer. It should be closed when test is // complete. func NewTestServer(t testing.T, service string, ca *structs.CARoot) *TestServer { - ports := freeport.MustTake(1) return &TestServer{ - Service: service, - CA: ca, - stopChan: make(chan struct{}), - TLSCfg: TestTLSConfig(t, service, ca), - Addr: fmt.Sprintf("127.0.0.1:%d", ports[0]), - Listening: make(chan struct{}), - returnPortsFn: func() { freeport.Return(ports) }, + Service: service, + CA: ca, + stopChan: make(chan struct{}), + TLSCfg: TestTLSConfig(t, service, ca), + Addr: fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t)), + Listening: make(chan struct{}), } } @@ -190,10 +187,6 @@ func (s *TestServer) Close() error { if s.l != nil { s.l.Close() } - if s.returnPortsFn != nil { - s.returnPortsFn() - s.returnPortsFn = nil - } close(s.stopChan) } return nil diff --git a/go.mod b/go.mod index 2bcc92566..09d867c7c 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/frankban/quicktest v1.11.0 // indirect github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.3.5 - github.com/google/go-cmp v0.5.2 + github.com/google/go-cmp v0.5.6 github.com/google/go-querystring v1.0.0 // indirect github.com/google/gofuzz v1.2.0 github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 @@ -50,13 +50,13 @@ require ( github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 - github.com/hashicorp/mdns v1.0.4 // indirect - github.com/hashicorp/memberlist v0.2.4 + github.com/hashicorp/memberlist v0.3.0 github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 github.com/hashicorp/raft v1.3.2 github.com/hashicorp/raft-autopilot v0.1.5 - github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea - github.com/hashicorp/serf v0.9.6-0.20210609195804-2b5dd0cd2de9 + github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 // indirect + github.com/hashicorp/raft-boltdb/v2 v2.2.0 + github.com/hashicorp/serf v0.9.6 github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493 @@ -79,15 +79,16 @@ require ( github.com/prometheus/client_golang v1.4.0 github.com/rboyer/safeio v0.2.1 github.com/ryanuber/columnize v2.1.0+incompatible - github.com/shirou/gopsutil/v3 v3.20.10 - github.com/stretchr/testify v1.6.1 + github.com/shirou/gopsutil/v3 v3.21.10 + github.com/stretchr/testify v1.7.0 + go.etcd.io/bbolt v1.3.5 go.opencensus.io v0.22.0 // indirect go.uber.org/goleak v1.1.10 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 + golang.org/x/sys v0.0.0-20211013075003-97ac67df715c golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e google.golang.org/api v0.9.0 // indirect google.golang.org/appengine v1.6.0 // indirect diff --git a/go.sum b/go.sum index d9e5ab42f..53aed7fcb 100644 --- a/go.sum +++ b/go.sum @@ -45,8 +45,8 @@ github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -59,6 +59,7 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.8/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -148,8 +149,9 @@ github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= @@ -184,8 +186,9 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -279,22 +282,25 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.2.4 h1:OOhYzSvFnkFQXm1ysE8RjXTHsqSRDyP4emusC9K7DYg= -github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE= github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= +github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.3.2 h1:j2tqHqFnDdWCepLxzuo3b6WzS2krIweBrvEoqBbWMTo= github.com/hashicorp/raft v1.3.2/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft-autopilot v0.1.5 h1:onEfMH5uHVdXQqtas36zXUHEZxLdsJVu/nXHLcLdL1I= github.com/hashicorp/raft-autopilot v0.1.5/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= -github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hashicorp/serf v0.9.6-0.20210609195804-2b5dd0cd2de9 h1:lCZfMBDn/Puwg9VosHMf/9p9jNDYYkbzVjb4jYjVfqU= -github.com/hashicorp/serf v0.9.6-0.20210609195804-2b5dd0cd2de9/go.mod h1:qapjppkpNXHYTyzx+HqkyWGGkmUxafHjuspm/Bqb2Jc= +github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea/go.mod h1:qRd6nFJYYS6Iqnc/8HcUmko2/2Gw8qTFEmxDLii6W5I= +github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 h1:Ye8SofeDHJzu9xvvaMmpMkqHELWW7rTcXwdUR0CWW48= +github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42/go.mod h1:wcXL8otVu5cpJVLjcmq7pmfdRCdaP+xnvu7WQcKJAhs= +github.com/hashicorp/raft-boltdb/v2 v2.2.0 h1:/CVN9LSAcH50L3yp2TsPFIpeyHn1m3VF6kiutlDE3Nw= +github.com/hashicorp/raft-boltdb/v2 v2.2.0/go.mod h1:SgPUD5TP20z/bswEr210SnkUFvQP/YjKV95aaiTbeMQ= +github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 h1:OKsyxKi2sNmqm1Gv93adf2AID2FOBFdCbbZn9fGtIdg= github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 h1:e1ok06zGrWJW91rzRroyl5nRNqraaBe4d5hiKcVZuHM= @@ -344,6 +350,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -470,8 +478,8 @@ github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06q github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shirou/gopsutil/v3 v3.20.10 h1:7zomV9HJv6UGk225YtvEa5+camNLpbua3MAz/GqiVJY= -github.com/shirou/gopsutil/v3 v3.20.10/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= +github.com/shirou/gopsutil/v3 v3.21.10 h1:flTg1DrnV/UVrBqjLgVgDJzx6lf+91rC64/dBHmO2IA= +github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -499,10 +507,15 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 h1:8fDzz4GuVg4skjY2B0nMN7h6uN61EDVkuLyI2+qGHhI= github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= +github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= @@ -515,6 +528,8 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -605,6 +620,7 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -614,15 +630,16 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 h1:c8PlLMqBbOHoqtjteWm5/kbe6rNY2pbRfbIMVnepueo= -golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/internal/go-sso/oidcauth/oidcauthtest/testing.go b/internal/go-sso/oidcauth/oidcauthtest/testing.go index cdf27a19c..8e20fb575 100644 --- a/internal/go-sso/oidcauth/oidcauthtest/testing.go +++ b/internal/go-sso/oidcauth/oidcauthtest/testing.go @@ -24,11 +24,11 @@ import ( "sync" "time" - "github.com/hashicorp/consul/internal/go-sso/oidcauth/internal/strutil" - "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" "gopkg.in/square/go-jose.v2" "gopkg.in/square/go-jose.v2/jwt" + + "github.com/hashicorp/consul/internal/go-sso/oidcauth/internal/strutil" ) // Server is local server the mocks the endpoints used by the OIDC and @@ -54,25 +54,17 @@ type Server struct { disableUserInfo bool } -type startOption struct { - port int - returnFunc func() -} - -// WithPort is a option for Start that lets the caller control the port -// allocation. The returnFunc parameter is used when the provider is stopped to -// return the port in whatever bookkeeping system the caller wants to use. -func WithPort(port int, returnFunc func()) startOption { - return startOption{ - port: port, - returnFunc: returnFunc, - } +type TestingT interface { + require.TestingT + Helper() + Cleanup(func()) } // Start creates a disposable Server. If the port provided is // zero it will bind to a random free port, otherwise the provided port is // used. -func Start(t testing.T, options ...startOption) *Server { +func Start(t TestingT) *Server { + t.Helper() s := &Server{ allowedRedirectURIs: []string{ "https://example.com", @@ -89,23 +81,9 @@ func Start(t testing.T, options ...startOption) *Server { require.NoError(t, err) s.jwks = jwks - var ( - port int - returnFunc func() - ) - for _, option := range options { - if option.port > 0 { - port = option.port - returnFunc = option.returnFunc - } - } - - s.httpServer = httptestNewUnstartedServerWithPort(s, port) + s.httpServer = httptest.NewUnstartedServer(s) s.httpServer.Config.ErrorLog = log.New(ioutil.Discard, "", 0) s.httpServer.StartTLS() - if returnFunc != nil { - t.Cleanup(returnFunc) - } t.Cleanup(s.httpServer.Close) cert := s.httpServer.Certificate() diff --git a/lib/testing_httpserver.go b/lib/testing_httpserver.go deleted file mode 100644 index df5e1f414..000000000 --- a/lib/testing_httpserver.go +++ /dev/null @@ -1,36 +0,0 @@ -package lib - -import ( - "net/http" - - "github.com/hashicorp/consul/ipaddr" - "github.com/hashicorp/consul/sdk/freeport" - "github.com/mitchellh/go-testing-interface" -) - -// StartTestServer fires up a web server on a random unused port to serve the -// given handler body. The address it is listening on is returned. When the -// test case terminates the server will be stopped via cleanup functions. -// -// We can't directly use httptest.Server here because that only thinks a port -// is free if it's not bound. Consul tests frequently reserve ports via -// `sdk/freeport` so you can have one part of the test try to use a port and -// _know_ nothing is listening. If you simply assumed unbound ports were free -// you'd end up with test cross-talk and weirdness. -func StartTestServer(t testing.T, handler http.Handler) string { - ports := freeport.MustTake(1) - t.Cleanup(func() { - freeport.Return(ports) - }) - - addr := ipaddr.FormatAddressPort("127.0.0.1", ports[0]) - - server := &http.Server{Addr: addr, Handler: handler} - t.Cleanup(func() { - server.Close() - }) - - go server.ListenAndServe() - - return addr -} diff --git a/proto/pbautoconf/auto_config.pb.go b/proto/pbautoconf/auto_config.pb.go index 2fb0b0132..8a4c25b5d 100644 --- a/proto/pbautoconf/auto_config.pb.go +++ b/proto/pbautoconf/auto_config.pb.go @@ -37,6 +37,8 @@ type AutoConfigRequest struct { Node string `protobuf:"bytes,2,opt,name=Node,proto3" json:"Node,omitempty"` // Segment is the network segment that the requester would like to join Segment string `protobuf:"bytes,4,opt,name=Segment,proto3" json:"Segment,omitempty"` + // Partition is the partition that the requester would like to join + Partition string `protobuf:"bytes,8,opt,name=Partition,proto3" json:"Partition,omitempty"` // JWT is a signed JSON Web Token used to authorize the request JWT string `protobuf:"bytes,5,opt,name=JWT,proto3" json:"JWT,omitempty"` // ConsulToken is a Consul ACL token that the agent requesting the @@ -104,6 +106,13 @@ func (m *AutoConfigRequest) GetSegment() string { return "" } +func (m *AutoConfigRequest) GetPartition() string { + if m != nil { + return m.Partition + } + return "" +} + func (m *AutoConfigRequest) GetJWT() string { if m != nil { return m.JWT @@ -212,29 +221,30 @@ func init() { } var fileDescriptor_ccc5af992e5daf69 = []byte{ - // 347 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xc1, 0x4a, 0xc3, 0x30, - 0x18, 0xc7, 0x8d, 0x9b, 0x9b, 0x4b, 0x41, 0x66, 0x76, 0x09, 0x13, 0xcb, 0xd8, 0x41, 0x86, 0x48, - 0x2b, 0x13, 0xaf, 0xc2, 0x8c, 0x1e, 0xf4, 0xe0, 0x21, 0x1b, 0x08, 0x5e, 0xa4, 0xeb, 0xb2, 0xad, - 0xb8, 0x25, 0xb5, 0xf9, 0x02, 0x3e, 0x8a, 0x6f, 0xe0, 0xab, 0x78, 0xd4, 0x37, 0x90, 0xf9, 0x22, - 0xd2, 0xb4, 0x95, 0x20, 0x9e, 0xfa, 0xef, 0xff, 0xf7, 0xfb, 0xe0, 0x6b, 0x13, 0xdc, 0x4f, 0x33, - 0x05, 0x2a, 0x4c, 0xa7, 0x91, 0x01, 0x15, 0x2b, 0x39, 0x0f, 0xf3, 0xf0, 0x98, 0xa7, 0x64, 0x11, - 0x58, 0x48, 0x76, 0x2b, 0xd6, 0x3d, 0xa8, 0xec, 0x82, 0x87, 0xae, 0xd6, 0x3d, 0x74, 0xa0, 0x14, - 0x31, 0x84, 0xe5, 0xb3, 0xc0, 0xfd, 0x37, 0x84, 0xf7, 0x47, 0x06, 0x14, 0xb3, 0x33, 0x5c, 0x3c, - 0x1b, 0xa1, 0x81, 0xf8, 0x18, 0x5f, 0x45, 0x10, 0xc5, 0x42, 0x82, 0xc8, 0x28, 0xea, 0xa1, 0x41, - 0x8b, 0x3b, 0x0d, 0x21, 0xb8, 0x7e, 0xa7, 0x66, 0x82, 0x6e, 0x5b, 0x62, 0x33, 0xa1, 0xb8, 0x39, - 0x16, 0x8b, 0xb5, 0x90, 0x40, 0xeb, 0xb6, 0xae, 0x5e, 0x49, 0x1b, 0xd7, 0x6e, 0xef, 0x27, 0x74, - 0xc7, 0xb6, 0x79, 0x24, 0x3d, 0xec, 0x31, 0x25, 0xb5, 0x59, 0x4d, 0xd4, 0x93, 0x90, 0xb4, 0x61, - 0x89, 0x5b, 0xe5, 0x33, 0x6c, 0xcc, 0x69, 0xb3, 0x98, 0x61, 0x63, 0xde, 0xff, 0x44, 0x98, 0xb8, - 0x9b, 0xea, 0x54, 0x49, 0x2d, 0xc8, 0x11, 0x6e, 0x14, 0x8d, 0x5d, 0xd3, 0x1b, 0xee, 0x05, 0xe5, - 0xe7, 0x97, 0x5e, 0x49, 0xc9, 0x31, 0x6e, 0xb2, 0x11, 0x57, 0x0a, 0xb4, 0xdd, 0xda, 0x1b, 0xb6, - 0x83, 0xea, 0x4f, 0x94, 0x3d, 0xaf, 0x04, 0x72, 0x8e, 0x3d, 0x26, 0x32, 0x48, 0xe6, 0x49, 0x1c, - 0x81, 0xa0, 0x35, 0xeb, 0x77, 0x7e, 0xfd, 0x1b, 0xad, 0x8d, 0x98, 0xe5, 0x06, 0x77, 0x3d, 0x72, - 0x8a, 0x3b, 0xd7, 0x2f, 0x90, 0x45, 0x6c, 0xe4, 0xb4, 0x9a, 0xd6, 0x7b, 0xb5, 0x41, 0x8b, 0xff, - 0x87, 0x2e, 0x2f, 0xde, 0x37, 0x3e, 0xfa, 0xd8, 0xf8, 0xe8, 0x6b, 0xe3, 0xa3, 0xd7, 0x6f, 0x7f, - 0xeb, 0xe1, 0x64, 0x91, 0xc0, 0xd2, 0x4c, 0x83, 0x58, 0xad, 0xc3, 0x65, 0xa4, 0x97, 0x49, 0xac, - 0xb2, 0x34, 0x3f, 0x33, 0x6d, 0x56, 0xe1, 0xdf, 0x5b, 0x31, 0x6d, 0xd8, 0xe6, 0xec, 0x27, 0x00, - 0x00, 0xff, 0xff, 0xe2, 0x1d, 0x6e, 0x48, 0x30, 0x02, 0x00, 0x00, + // 361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xcf, 0x4a, 0xc3, 0x40, + 0x10, 0xc6, 0x8d, 0xad, 0xfd, 0xb3, 0x01, 0xa9, 0xdb, 0xcb, 0x52, 0x35, 0x94, 0x1e, 0xa4, 0x88, + 0x24, 0x52, 0xf1, 0x2a, 0xd4, 0xe8, 0x41, 0x0f, 0x22, 0xdb, 0x82, 0xe0, 0x45, 0xd2, 0x74, 0xdb, + 0x06, 0xdb, 0x9d, 0x98, 0x9d, 0x80, 0x8f, 0xe2, 0x0b, 0x09, 0x1e, 0xf5, 0x0d, 0xa4, 0xbe, 0x88, + 0x64, 0x93, 0xe8, 0x22, 0x9e, 0xf2, 0xe5, 0xf7, 0xfb, 0xe6, 0x30, 0x93, 0x90, 0x5e, 0x9c, 0x00, + 0x82, 0x17, 0x4f, 0x82, 0x14, 0x21, 0x04, 0x39, 0xf3, 0xb2, 0xf0, 0x90, 0xa5, 0x68, 0xee, 0x6a, + 0x49, 0x1b, 0xa5, 0xeb, 0xec, 0x96, 0xed, 0xdc, 0x7b, 0x66, 0xad, 0xb3, 0x6f, 0x48, 0x29, 0x42, + 0xf4, 0x8a, 0x67, 0xae, 0x7b, 0xaf, 0x16, 0xd9, 0x19, 0xa6, 0x08, 0xbe, 0x9e, 0xe1, 0xe2, 0x29, + 0x15, 0x0a, 0xa9, 0x43, 0xc8, 0x45, 0x80, 0x41, 0x28, 0x24, 0x8a, 0x84, 0x59, 0x5d, 0xab, 0xdf, + 0xe4, 0x06, 0xa1, 0x94, 0x54, 0x6f, 0x60, 0x2a, 0xd8, 0xa6, 0x36, 0x3a, 0x53, 0x46, 0xea, 0x23, + 0x31, 0x5f, 0x09, 0x89, 0xac, 0xaa, 0x71, 0xf9, 0x4a, 0xf7, 0x48, 0xf3, 0x36, 0x48, 0x30, 0xc2, + 0x08, 0x24, 0x6b, 0x68, 0xf7, 0x0b, 0x68, 0x8b, 0x54, 0xae, 0xef, 0xc6, 0x6c, 0x4b, 0xf3, 0x2c, + 0xd2, 0x2e, 0xb1, 0x7d, 0x90, 0x2a, 0x5d, 0x8e, 0xe1, 0x51, 0x48, 0x56, 0xd3, 0xc6, 0x44, 0xd9, + 0x8c, 0x3f, 0xe2, 0xac, 0x9e, 0xcf, 0xf8, 0x23, 0xde, 0xfb, 0xb0, 0x08, 0x35, 0xf7, 0x50, 0x31, + 0x48, 0x25, 0xe8, 0x01, 0xa9, 0xe5, 0x44, 0x2f, 0x61, 0x0f, 0xb6, 0xdd, 0xe2, 0x38, 0x45, 0xaf, + 0xb0, 0xf4, 0x90, 0xd4, 0xfd, 0x21, 0x07, 0x40, 0xa5, 0x77, 0xb2, 0x07, 0x2d, 0xb7, 0xbc, 0x53, + 0xc1, 0x79, 0x59, 0xa0, 0xa7, 0xc4, 0xf6, 0x45, 0x82, 0xd1, 0x2c, 0x0a, 0x03, 0x14, 0xac, 0xa2, + 0xfb, 0xed, 0x9f, 0xfe, 0x95, 0x52, 0xa9, 0x98, 0x66, 0x0d, 0x6e, 0xf6, 0xe8, 0x31, 0x69, 0x5f, + 0x3e, 0x63, 0x12, 0xf8, 0x43, 0x83, 0x2a, 0x56, 0xed, 0x56, 0xfa, 0x4d, 0xfe, 0x9f, 0x3a, 0x3f, + 0x7b, 0x5b, 0x3b, 0xd6, 0xfb, 0xda, 0xb1, 0x3e, 0xd7, 0x8e, 0xf5, 0xf2, 0xe5, 0x6c, 0xdc, 0x1f, + 0xcd, 0x23, 0x5c, 0xa4, 0x13, 0x37, 0x84, 0x95, 0xb7, 0x08, 0xd4, 0x22, 0x0a, 0x21, 0x89, 0xb3, + 0x2f, 0xaa, 0xd2, 0xa5, 0xf7, 0xf7, 0x9f, 0x99, 0xd4, 0x34, 0x39, 0xf9, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x65, 0x57, 0x2e, 0x30, 0x4e, 0x02, 0x00, 0x00, } func (m *AutoConfigRequest) Marshal() (dAtA []byte, err error) { @@ -261,6 +271,13 @@ func (m *AutoConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Partition) > 0 { + i -= len(m.Partition) + copy(dAtA[i:], m.Partition) + i = encodeVarintAutoConfig(dAtA, i, uint64(len(m.Partition))) + i-- + dAtA[i] = 0x42 + } if len(m.CSR) > 0 { i -= len(m.CSR) copy(dAtA[i:], m.CSR) @@ -419,6 +436,10 @@ func (m *AutoConfigRequest) Size() (n int) { if l > 0 { n += 1 + l + sovAutoConfig(uint64(l)) } + l = len(m.Partition) + if l > 0 { + n += 1 + l + sovAutoConfig(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -682,6 +703,38 @@ func (m *AutoConfigRequest) Unmarshal(dAtA []byte) error { } m.CSR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAutoConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAutoConfig + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAutoConfig + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Partition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipAutoConfig(dAtA[iNdEx:]) diff --git a/proto/pbautoconf/auto_config.proto b/proto/pbautoconf/auto_config.proto index aa7f4e381..3a4db6575 100644 --- a/proto/pbautoconf/auto_config.proto +++ b/proto/pbautoconf/auto_config.proto @@ -7,7 +7,7 @@ option go_package = "github.com/hashicorp/consul/proto/pbautoconf"; import "proto/pbconfig/config.proto"; import "proto/pbconnect/connect.proto"; -// AutoConfigRequest is the data structure to be sent along with the +// AutoConfigRequest is the data structure to be sent along with the // AutoConfig.InitialConfiguration RPC message AutoConfigRequest { // Datacenter is the local datacenter name. This wont actually be set by clients @@ -15,21 +15,24 @@ message AutoConfigRequest { // the leader. If it ever happens to be set and differs from the local datacenters // name then an error should be returned. string Datacenter = 1; - + // Node is the node name that the requester would like to assume // the identity of. string Node = 2; - + // Segment is the network segment that the requester would like to join string Segment = 4; + // Partition is the partition that the requester would like to join + string Partition = 8; + // JWT is a signed JSON Web Token used to authorize the request string JWT = 5; - + // ConsulToken is a Consul ACL token that the agent requesting the // configuration already has. string ConsulToken = 6; - + // CSR is a certificate signing request to be used when generating the // agents TLS certificate string CSR = 7; @@ -39,13 +42,13 @@ message AutoConfigRequest { message AutoConfigResponse { // Config is the partial Consul configuration to inject into the agents own configuration config.Config Config = 1; - + // CARoots is the current list of Connect CA Roots connect.CARoots CARoots = 2; // Certificate is the TLS certificate issued for the agent connect.IssuedCert Certificate = 3; - + // ExtraCACertificates holds non-Connect certificates that may be necessary // to verify TLS connections with the Consul servers repeated string ExtraCACertificates = 4; -} \ No newline at end of file +} diff --git a/proto/pbautoconf/auto_config_oss.go b/proto/pbautoconf/auto_config_oss.go new file mode 100644 index 000000000..461bfb1a7 --- /dev/null +++ b/proto/pbautoconf/auto_config_oss.go @@ -0,0 +1,8 @@ +//go:build !consulent +// +build !consulent + +package pbautoconf + +func (req *AutoConfigRequest) PartitionOrDefault() string { + return "" +} diff --git a/proto/pbcommon/common.pb.go b/proto/pbcommon/common.pb.go index 0217bf819..3b29930d5 100644 --- a/proto/pbcommon/common.pb.go +++ b/proto/pbcommon/common.pb.go @@ -391,6 +391,10 @@ type QueryMeta struct { // Having `discovery_max_stale` on the agent can affect whether // the request was served by a leader. ConsistencyLevel string `protobuf:"bytes,4,opt,name=ConsistencyLevel,proto3" json:"ConsistencyLevel,omitempty"` + // ResultsFilteredByACLs is true when some of the query's results were + // filtered out by enforcing ACLs. It may be false because nothing was + // removed, or because the endpoint does not yet support this flag. + ResultsFilteredByACLs bool `protobuf:"varint,7,opt,name=ResultsFilteredByACLs,proto3" json:"ResultsFilteredByACLs,omitempty"` } func (m *QueryMeta) Reset() { *m = QueryMeta{} } @@ -454,6 +458,13 @@ func (m *QueryMeta) GetConsistencyLevel() string { return "" } +func (m *QueryMeta) GetResultsFilteredByACLs() bool { + if m != nil { + return m.ResultsFilteredByACLs + } + return false +} + // EnterpriseMeta contains metadata that is only used by the Enterprise version // of Consul. type EnterpriseMeta struct { @@ -509,46 +520,49 @@ func init() { func init() { proto.RegisterFile("proto/pbcommon/common.proto", fileDescriptor_a6f5ac44994d718c) } var fileDescriptor_a6f5ac44994d718c = []byte{ - // 620 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x41, 0x4f, 0xd4, 0x40, - 0x14, 0xde, 0xe2, 0xb2, 0x6e, 0xdf, 0x02, 0xc1, 0x09, 0x31, 0x15, 0x4d, 0x97, 0x6c, 0x8c, 0x21, - 0x44, 0xb7, 0x09, 0xde, 0xf0, 0x04, 0x0b, 0x1a, 0xe2, 0x56, 0x74, 0xc4, 0x90, 0x78, 0x9b, 0xed, - 0xbe, 0xed, 0x4e, 0x6c, 0x3b, 0x75, 0x3a, 0x85, 0xe5, 0x1f, 0x78, 0xf4, 0x48, 0x3c, 0xf9, 0x43, - 0xfc, 0x01, 0x1c, 0x39, 0x7a, 0x42, 0x65, 0xff, 0x81, 0xbf, 0xc0, 0x74, 0x5a, 0xa0, 0x08, 0x18, - 0x3c, 0xed, 0x7e, 0xdf, 0x7c, 0xdf, 0xeb, 0x9b, 0xf7, 0xbe, 0x16, 0xee, 0xc7, 0x52, 0x28, 0xe1, - 0xc4, 0x3d, 0x4f, 0x84, 0xa1, 0x88, 0x9c, 0xfc, 0xa7, 0xad, 0x59, 0x52, 0xcb, 0xd1, 0xbc, 0xed, - 0x0b, 0xe1, 0x07, 0xe8, 0x68, 0xb6, 0x97, 0x0e, 0x9c, 0x7e, 0x2a, 0x99, 0xe2, 0xa7, 0xba, 0xf9, - 0x39, 0x5f, 0xf8, 0x22, 0x2f, 0x94, 0xfd, 0xcb, 0xd9, 0x56, 0x08, 0x26, 0x65, 0x03, 0xb5, 0x19, - 0xf5, 0x71, 0x44, 0x1c, 0x68, 0x74, 0x24, 0x32, 0x85, 0x1a, 0x5a, 0xc6, 0x82, 0xb1, 0x58, 0x5d, - 0x9b, 0xfe, 0x7d, 0xdc, 0x34, 0x7b, 0x38, 0x8a, 0xe5, 0x4a, 0xeb, 0x49, 0x8b, 0x96, 0x15, 0x99, - 0xc1, 0x15, 0x7d, 0x3e, 0xd8, 0xcf, 0x0d, 0x13, 0x57, 0x1a, 0x4a, 0x8a, 0xd6, 0x32, 0xcc, 0x6e, - 0x33, 0xe9, 0xa3, 0x5a, 0x67, 0x8a, 0x79, 0x18, 0x29, 0x94, 0xc4, 0x06, 0x38, 0x47, 0xfa, 0xa1, - 0x26, 0x2d, 0x31, 0xad, 0x25, 0x98, 0xda, 0x91, 0x5c, 0x21, 0xc5, 0x8f, 0x29, 0x26, 0x8a, 0xcc, - 0xc1, 0xe4, 0xb6, 0xf8, 0x80, 0x51, 0x21, 0xcd, 0xc1, 0x4a, 0xf5, 0xd3, 0xd7, 0xa6, 0xd1, 0xda, - 0x81, 0x06, 0x45, 0xd6, 0xff, 0xa7, 0x94, 0x3c, 0x86, 0x3b, 0x99, 0x80, 0x4b, 0xec, 0x88, 0x28, - 0xe1, 0x89, 0xc2, 0x48, 0xe9, 0xde, 0xeb, 0xf4, 0xf2, 0x41, 0x51, 0xf8, 0x4b, 0x15, 0xa6, 0xde, - 0xa4, 0x28, 0xf7, 0xb7, 0xe2, 0x6c, 0xa6, 0xc9, 0x35, 0xa5, 0x1f, 0xc2, 0xb4, 0xcb, 0x23, 0x2d, - 0x2c, 0x8d, 0x84, 0x5e, 0x24, 0xc9, 0x0b, 0x98, 0x72, 0xd9, 0x48, 0x13, 0xdb, 0x3c, 0x44, 0xeb, - 0xd6, 0x82, 0xb1, 0xd8, 0x58, 0xbe, 0xd7, 0xce, 0x37, 0xd8, 0x3e, 0xdd, 0x60, 0x7b, 0xbd, 0xd8, - 0xe0, 0x5a, 0xfd, 0xf0, 0xb8, 0x59, 0x39, 0xf8, 0xd1, 0x34, 0xe8, 0x05, 0x63, 0x36, 0xba, 0xd5, - 0x20, 0x10, 0x7b, 0x6f, 0x15, 0x0b, 0xd0, 0xaa, 0xea, 0x2b, 0x94, 0x98, 0xab, 0x6f, 0x3a, 0x79, - 0xcd, 0x4d, 0xc9, 0x3c, 0xd4, 0xdf, 0x25, 0xd8, 0x61, 0xde, 0x10, 0xad, 0x9a, 0x16, 0x9d, 0x61, - 0xb2, 0x05, 0xb3, 0x2e, 0x1b, 0xe9, 0xaa, 0xa7, 0x5d, 0x59, 0xb7, 0x6f, 0xde, 0xf6, 0x25, 0x33, - 0x79, 0x06, 0x35, 0x97, 0x8d, 0x56, 0x7d, 0xb4, 0xea, 0x37, 0x2f, 0x53, 0x58, 0xc8, 0x23, 0x98, - 0x71, 0xd3, 0x44, 0x51, 0xdc, 0x65, 0x01, 0xef, 0x33, 0x85, 0x96, 0xa9, 0xfb, 0xfd, 0x8b, 0xcd, - 0x06, 0xad, 0x9f, 0xba, 0x39, 0xd8, 0x90, 0x52, 0x48, 0x0b, 0xfe, 0x63, 0xd0, 0x65, 0x23, 0xb9, - 0x0b, 0xb5, 0xe7, 0x3c, 0xc8, 0xf2, 0xd9, 0xd0, 0xeb, 0x2e, 0x50, 0x11, 0x8e, 0x6f, 0x06, 0x98, - 0x7a, 0x29, 0x2e, 0x2a, 0x96, 0x25, 0xa3, 0xf4, 0xfe, 0xd0, 0x1c, 0x90, 0x0d, 0x68, 0x74, 0x59, - 0xa2, 0x3a, 0x22, 0x52, 0xcc, 0xcb, 0xe3, 0x76, 0xc3, 0x4e, 0xca, 0x3e, 0xb2, 0x00, 0x8d, 0x97, - 0x91, 0xd8, 0x8b, 0xba, 0xc8, 0xfa, 0x28, 0x75, 0x72, 0xea, 0xb4, 0x4c, 0x91, 0x25, 0x98, 0x3d, - 0xdb, 0xa9, 0xb7, 0xdf, 0xc5, 0x5d, 0x0c, 0x74, 0x32, 0x4c, 0x7a, 0x89, 0x2f, 0xda, 0xef, 0xc2, - 0xcc, 0x46, 0xf6, 0xa6, 0xc5, 0x92, 0x27, 0xa8, 0xaf, 0xf0, 0x00, 0xcc, 0x57, 0x2c, 0xc4, 0x24, - 0x66, 0x1e, 0x16, 0x01, 0x3f, 0x27, 0xb2, 0xd3, 0xd7, 0x4c, 0x2a, 0xae, 0x43, 0x30, 0x91, 0x9f, - 0x9e, 0x11, 0x6b, 0xdd, 0xc3, 0x5f, 0x76, 0xe5, 0xf0, 0xc4, 0x36, 0x8e, 0x4e, 0x6c, 0xe3, 0xe7, - 0x89, 0x6d, 0x7c, 0x1e, 0xdb, 0x95, 0x83, 0xb1, 0x5d, 0x39, 0x1a, 0xdb, 0x95, 0xef, 0x63, 0xbb, - 0xf2, 0x7e, 0xc9, 0xe7, 0x6a, 0x98, 0xf6, 0xda, 0x9e, 0x08, 0x9d, 0x21, 0x4b, 0x86, 0xdc, 0x13, - 0x32, 0x76, 0x3c, 0x11, 0x25, 0x69, 0xe0, 0x5c, 0xfc, 0xd4, 0xf5, 0x6a, 0x1a, 0x3f, 0xfd, 0x13, - 0x00, 0x00, 0xff, 0xff, 0x9c, 0xf6, 0xbd, 0xcc, 0x03, 0x05, 0x00, 0x00, + // 657 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x4e, 0xdb, 0x4a, + 0x18, 0x8d, 0xb9, 0x21, 0xd8, 0x13, 0x40, 0xb9, 0x23, 0xee, 0x95, 0x2f, 0xb7, 0x72, 0x90, 0x55, + 0x55, 0x08, 0xb5, 0xb1, 0x44, 0xbb, 0xa2, 0x2b, 0x12, 0x68, 0x05, 0x8d, 0x4b, 0x3b, 0xa5, 0x42, + 0xea, 0x6e, 0x62, 0x7f, 0x71, 0xac, 0x3a, 0x1e, 0x77, 0x66, 0x0c, 0xc9, 0x1b, 0x74, 0xd9, 0x25, + 0xea, 0xaa, 0x8f, 0xc3, 0x92, 0x65, 0x57, 0xb4, 0x25, 0x6f, 0xd0, 0x07, 0xa8, 0x2a, 0x8f, 0x0d, + 0x98, 0x02, 0x15, 0x5d, 0x25, 0xe7, 0xcc, 0x39, 0xdf, 0x7c, 0x7f, 0x63, 0xf4, 0x7f, 0xc2, 0x99, + 0x64, 0x4e, 0xd2, 0xf3, 0xd8, 0x70, 0xc8, 0x62, 0x27, 0xff, 0x69, 0x29, 0x16, 0xd7, 0x72, 0xb4, + 0x68, 0x05, 0x8c, 0x05, 0x11, 0x38, 0x8a, 0xed, 0xa5, 0x7d, 0xc7, 0x4f, 0x39, 0x95, 0xe1, 0x99, + 0x6e, 0x71, 0x21, 0x60, 0x01, 0xcb, 0x03, 0x65, 0xff, 0x72, 0xd6, 0x1e, 0x22, 0x83, 0xd0, 0xbe, + 0xdc, 0x8a, 0x7d, 0x18, 0x61, 0x07, 0xd5, 0x3b, 0x1c, 0xa8, 0x04, 0x05, 0x4d, 0x6d, 0x49, 0x5b, + 0xae, 0xb6, 0xe7, 0xbe, 0x9f, 0x34, 0x8d, 0x1e, 0x8c, 0x12, 0xbe, 0x66, 0x3f, 0xb0, 0x49, 0x59, + 0x91, 0x19, 0x5c, 0xe6, 0x87, 0xfd, 0x71, 0x6e, 0x98, 0xba, 0xd6, 0x50, 0x52, 0xd8, 0xab, 0xa8, + 0xb1, 0x4b, 0x79, 0x00, 0x72, 0x83, 0x4a, 0xea, 0x41, 0x2c, 0x81, 0x63, 0x0b, 0xa1, 0x0b, 0xa4, + 0x2e, 0x35, 0x48, 0x89, 0xb1, 0x57, 0xd0, 0xec, 0x1e, 0x0f, 0x25, 0x10, 0x78, 0x97, 0x82, 0x90, + 0x78, 0x01, 0x4d, 0xef, 0xb2, 0xb7, 0x10, 0x17, 0xd2, 0x1c, 0xac, 0x55, 0xdf, 0x7f, 0x6a, 0x6a, + 0xf6, 0x1e, 0xaa, 0x13, 0xa0, 0xfe, 0x6f, 0xa5, 0xf8, 0x3e, 0xfa, 0x3b, 0x13, 0x84, 0x1c, 0x3a, + 0x2c, 0x16, 0xa1, 0x90, 0x10, 0x4b, 0x95, 0xbb, 0x4e, 0xae, 0x1e, 0x14, 0x81, 0x3f, 0x56, 0xd1, + 0xec, 0xcb, 0x14, 0xf8, 0x78, 0x27, 0xc9, 0x7a, 0x2a, 0x6e, 0x08, 0x7d, 0x17, 0xcd, 0xb9, 0x61, + 0xac, 0x84, 0xa5, 0x96, 0x90, 0xcb, 0x24, 0x7e, 0x8a, 0x66, 0x5d, 0x3a, 0x52, 0xc4, 0x6e, 0x38, + 0x04, 0xf3, 0xaf, 0x25, 0x6d, 0xb9, 0xbe, 0xfa, 0x5f, 0x2b, 0x9f, 0x60, 0xeb, 0x6c, 0x82, 0xad, + 0x8d, 0x62, 0x82, 0x6d, 0xfd, 0xe8, 0xa4, 0x59, 0x39, 0xfc, 0xd2, 0xd4, 0xc8, 0x25, 0x63, 0xd6, + 0xba, 0xf5, 0x28, 0x62, 0x07, 0xaf, 0x24, 0x8d, 0xc0, 0xac, 0xaa, 0x12, 0x4a, 0xcc, 0xf5, 0x95, + 0x4e, 0xdf, 0x50, 0x29, 0x5e, 0x44, 0xfa, 0x6b, 0x01, 0x1d, 0xea, 0x0d, 0xc0, 0xac, 0x29, 0xd1, + 0x39, 0xc6, 0x3b, 0xa8, 0xe1, 0xd2, 0x91, 0x8a, 0x7a, 0x96, 0x95, 0x39, 0x73, 0xfb, 0xb4, 0xaf, + 0x98, 0xf1, 0x63, 0x54, 0x73, 0xe9, 0x68, 0x3d, 0x00, 0x53, 0xbf, 0x7d, 0x98, 0xc2, 0x82, 0xef, + 0xa1, 0x79, 0x37, 0x15, 0x92, 0xc0, 0x3e, 0x8d, 0x42, 0x9f, 0x4a, 0x30, 0x0d, 0x95, 0xef, 0x2f, + 0x6c, 0xd6, 0x68, 0x75, 0xeb, 0x56, 0x7f, 0x93, 0x73, 0xc6, 0x4d, 0xf4, 0x07, 0x8d, 0x2e, 0x1b, + 0xf1, 0xbf, 0xa8, 0xf6, 0x24, 0x8c, 0xb2, 0xfd, 0xac, 0xab, 0x71, 0x17, 0xa8, 0x58, 0x8e, 0x1f, + 0x1a, 0x32, 0xd4, 0x50, 0x5c, 0x90, 0x34, 0xdb, 0x8c, 0xd2, 0xfb, 0x21, 0x39, 0xc0, 0x9b, 0xa8, + 0xde, 0xa5, 0x42, 0x76, 0x58, 0x2c, 0xa9, 0x97, 0xaf, 0xdb, 0x2d, 0x33, 0x29, 0xfb, 0xf0, 0x12, + 0xaa, 0x3f, 0x8b, 0xd9, 0x41, 0xdc, 0x05, 0xea, 0x03, 0x57, 0x9b, 0xa3, 0x93, 0x32, 0x85, 0x57, + 0x50, 0xe3, 0x7c, 0xa6, 0xde, 0xb8, 0x0b, 0xfb, 0x10, 0xa9, 0xcd, 0x30, 0xc8, 0x15, 0x1e, 0x3f, + 0x42, 0xff, 0x10, 0x10, 0x69, 0x24, 0x45, 0x5e, 0x0f, 0xf8, 0xed, 0xf1, 0x7a, 0xa7, 0x2b, 0xd4, + 0x68, 0x75, 0x72, 0xfd, 0x61, 0x5e, 0xf4, 0x76, 0x55, 0x9f, 0x6e, 0xd4, 0xb6, 0xab, 0x7a, 0xad, + 0x31, 0x63, 0x77, 0xd1, 0xfc, 0x66, 0xf6, 0x56, 0x13, 0x1e, 0x0a, 0x50, 0x4d, 0xb8, 0x83, 0x8c, + 0xe7, 0x74, 0x08, 0x22, 0xa1, 0x1e, 0x14, 0x4f, 0xe4, 0x82, 0xc8, 0x4e, 0x5f, 0x50, 0x2e, 0x43, + 0xb5, 0x46, 0x53, 0xf9, 0xe9, 0x39, 0xd1, 0xee, 0x1e, 0x7d, 0xb3, 0x2a, 0x47, 0xa7, 0x96, 0x76, + 0x7c, 0x6a, 0x69, 0x5f, 0x4f, 0x2d, 0xed, 0xc3, 0xc4, 0xaa, 0x1c, 0x4e, 0xac, 0xca, 0xf1, 0xc4, + 0xaa, 0x7c, 0x9e, 0x58, 0x95, 0x37, 0x2b, 0x41, 0x28, 0x07, 0x69, 0xaf, 0xe5, 0xb1, 0xa1, 0x33, + 0xa0, 0x62, 0x10, 0x7a, 0x8c, 0x27, 0x8e, 0xc7, 0x62, 0x91, 0x46, 0xce, 0xe5, 0x8f, 0x65, 0xaf, + 0xa6, 0xf0, 0xc3, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xab, 0xfa, 0x4f, 0xec, 0x45, 0x05, 0x00, + 0x00, } func (m *RaftIndex) Marshal() (dAtA []byte, err error) { @@ -818,6 +832,16 @@ func (m *QueryMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ResultsFilteredByACLs { + i-- + if m.ResultsFilteredByACLs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } if len(m.ConsistencyLevel) > 0 { i -= len(m.ConsistencyLevel) copy(dAtA[i:], m.ConsistencyLevel) @@ -1014,6 +1038,9 @@ func (m *QueryMeta) Size() (n int) { if l > 0 { n += 1 + l + sovCommon(uint64(l)) } + if m.ResultsFilteredByACLs { + n += 2 + } return n } @@ -1872,6 +1899,26 @@ func (m *QueryMeta) Unmarshal(dAtA []byte) error { } m.ConsistencyLevel = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsFilteredByACLs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ResultsFilteredByACLs = bool(v != 0) default: iNdEx = preIndex skippy, err := skipCommon(dAtA[iNdEx:]) diff --git a/proto/pbcommon/common.proto b/proto/pbcommon/common.proto index 2a0d2c6dc..a771fb40d 100644 --- a/proto/pbcommon/common.proto +++ b/proto/pbcommon/common.proto @@ -150,6 +150,14 @@ message QueryMeta { // Having `discovery_max_stale` on the agent can affect whether // the request was served by a leader. string ConsistencyLevel = 4; + + // Reserved for NotModified and Backend. + reserved 5, 6; + + // ResultsFilteredByACLs is true when some of the query's results were + // filtered out by enforcing ACLs. It may be false because nothing was + // removed, or because the endpoint does not yet support this flag. + bool ResultsFilteredByACLs = 7; } // EnterpriseMeta contains metadata that is only used by the Enterprise version @@ -159,4 +167,4 @@ message EnterpriseMeta { string Namespace = 1; // Partition in which the entity exists. string Partition = 2; -} \ No newline at end of file +} diff --git a/proto/pbcommon/common_oss.go b/proto/pbcommon/common_oss.go index 024f207fa..35588b673 100644 --- a/proto/pbcommon/common_oss.go +++ b/proto/pbcommon/common_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package pbcommon diff --git a/proto/pbconfig/config.pb.go b/proto/pbconfig/config.pb.go index 90e743dc9..458fc5482 100644 --- a/proto/pbconfig/config.pb.go +++ b/proto/pbconfig/config.pb.go @@ -27,6 +27,7 @@ type Config struct { PrimaryDatacenter string `protobuf:"bytes,2,opt,name=PrimaryDatacenter,proto3" json:"PrimaryDatacenter,omitempty"` NodeName string `protobuf:"bytes,3,opt,name=NodeName,proto3" json:"NodeName,omitempty"` SegmentName string `protobuf:"bytes,4,opt,name=SegmentName,proto3" json:"SegmentName,omitempty"` + Partition string `protobuf:"bytes,9,opt,name=Partition,proto3" json:"Partition,omitempty"` ACL *ACL `protobuf:"bytes,5,opt,name=ACL,proto3" json:"ACL,omitempty"` AutoEncrypt *AutoEncrypt `protobuf:"bytes,6,opt,name=AutoEncrypt,proto3" json:"AutoEncrypt,omitempty"` Gossip *Gossip `protobuf:"bytes,7,opt,name=Gossip,proto3" json:"Gossip,omitempty"` @@ -97,6 +98,13 @@ func (m *Config) GetSegmentName() string { return "" } +func (m *Config) GetPartition() string { + if m != nil { + return m.Partition + } + return "" +} + func (m *Config) GetACL() *ACL { if m != nil { return m.ACL @@ -453,9 +461,9 @@ func (m *ACL) GetMSPDisableBootstrap() bool { } type ACLTokens struct { - Master string `protobuf:"bytes,1,opt,name=Master,proto3" json:"Master,omitempty"` + InitialManagement string `protobuf:"bytes,1,opt,name=InitialManagement,proto3" json:"InitialManagement,omitempty"` Replication string `protobuf:"bytes,2,opt,name=Replication,proto3" json:"Replication,omitempty"` - AgentMaster string `protobuf:"bytes,3,opt,name=AgentMaster,proto3" json:"AgentMaster,omitempty"` + AgentRecovery string `protobuf:"bytes,3,opt,name=AgentRecovery,proto3" json:"AgentRecovery,omitempty"` Default string `protobuf:"bytes,4,opt,name=Default,proto3" json:"Default,omitempty"` Agent string `protobuf:"bytes,5,opt,name=Agent,proto3" json:"Agent,omitempty"` ManagedServiceProvider []*ACLServiceProviderToken `protobuf:"bytes,6,rep,name=ManagedServiceProvider,proto3" json:"ManagedServiceProvider,omitempty"` @@ -497,9 +505,9 @@ func (m *ACLTokens) XXX_DiscardUnknown() { var xxx_messageInfo_ACLTokens proto.InternalMessageInfo -func (m *ACLTokens) GetMaster() string { +func (m *ACLTokens) GetInitialManagement() string { if m != nil { - return m.Master + return m.InitialManagement } return "" } @@ -511,9 +519,9 @@ func (m *ACLTokens) GetReplication() string { return "" } -func (m *ACLTokens) GetAgentMaster() string { +func (m *ACLTokens) GetAgentRecovery() string { if m != nil { - return m.AgentMaster + return m.AgentRecovery } return "" } @@ -679,58 +687,59 @@ func init() { func init() { proto.RegisterFile("proto/pbconfig/config.proto", fileDescriptor_aefa824db7b74d77) } var fileDescriptor_aefa824db7b74d77 = []byte{ - // 802 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xdd, 0x8e, 0x22, 0x45, - 0x14, 0xb6, 0xa7, 0x77, 0x7a, 0x86, 0x83, 0x6e, 0x76, 0x6b, 0x57, 0xec, 0xf8, 0x83, 0xa4, 0x63, - 0x36, 0xa3, 0x31, 0x83, 0xc1, 0x68, 0xd4, 0x78, 0xc3, 0xc0, 0x46, 0x71, 0x01, 0x49, 0x37, 0xae, - 0x89, 0x37, 0xa6, 0x69, 0x0e, 0x50, 0xb1, 0xa9, 0xea, 0x54, 0x17, 0x3b, 0xe1, 0x4d, 0x7c, 0x0d, - 0xaf, 0x7d, 0x01, 0x2f, 0x7d, 0x04, 0x1d, 0x5f, 0xc0, 0x47, 0x30, 0xf5, 0xd3, 0x4d, 0xf7, 0x08, - 0x57, 0x70, 0xbe, 0xef, 0xab, 0x53, 0xe7, 0xaf, 0x4e, 0xc3, 0x3b, 0x99, 0xe0, 0x92, 0x77, 0xb3, - 0x45, 0xc2, 0xd9, 0x8a, 0xae, 0xbb, 0xe6, 0xe7, 0x5a, 0xa3, 0xc4, 0x33, 0x56, 0xf0, 0xdb, 0x19, - 0x78, 0x03, 0xfd, 0x97, 0xb4, 0x01, 0x86, 0xb1, 0x8c, 0x13, 0x64, 0x12, 0x85, 0xef, 0x74, 0x9c, - 0xab, 0x46, 0x58, 0x41, 0xc8, 0xc7, 0xf0, 0x78, 0x26, 0xe8, 0x36, 0x16, 0xfb, 0x8a, 0xec, 0x4c, - 0xcb, 0xfe, 0x4f, 0x90, 0xb7, 0xe1, 0x72, 0xca, 0x97, 0x38, 0x8d, 0xb7, 0xe8, 0xbb, 0x5a, 0x54, - 0xda, 0xa4, 0x03, 0xcd, 0x08, 0xd7, 0x5b, 0x64, 0x52, 0xd3, 0x0f, 0x34, 0x5d, 0x85, 0xc8, 0x7b, - 0xe0, 0xf6, 0x07, 0x63, 0xff, 0xbc, 0xe3, 0x5c, 0x35, 0x7b, 0xcd, 0x6b, 0x1b, 0x7a, 0x7f, 0x30, - 0x0e, 0x15, 0x4e, 0x3e, 0x83, 0x66, 0x7f, 0x27, 0xf9, 0x73, 0x96, 0x88, 0x7d, 0x26, 0x7d, 0x4f, - 0xcb, 0x9e, 0x94, 0xb2, 0x03, 0x15, 0x56, 0x75, 0xe4, 0x19, 0x78, 0xdf, 0xf0, 0x3c, 0xa7, 0x99, - 0x7f, 0xa1, 0x4f, 0x3c, 0x2c, 0x4e, 0x18, 0x34, 0xb4, 0xac, 0xba, 0x7d, 0x3e, 0x8e, 0xfc, 0xcb, - 0xfa, 0xed, 0xf3, 0x71, 0x14, 0x2a, 0x3c, 0x58, 0x15, 0x6e, 0xc8, 0x17, 0x00, 0xd6, 0x37, 0xe5, - 0x4c, 0x97, 0xac, 0xd9, 0xf3, 0xeb, 0x4e, 0x0f, 0x7c, 0x58, 0xd1, 0x92, 0x00, 0x5e, 0x0f, 0x51, - 0x8a, 0xfd, 0x77, 0x9c, 0xb2, 0x71, 0x7f, 0xea, 0x9f, 0x75, 0xdc, 0xab, 0x46, 0x58, 0xc3, 0x02, - 0x09, 0x8f, 0xee, 0xfb, 0x20, 0x8f, 0xc0, 0x7d, 0x81, 0x7b, 0xdb, 0x1d, 0xf5, 0x97, 0x3c, 0x83, - 0x87, 0x2f, 0x51, 0xd0, 0xd5, 0x7e, 0xc4, 0x12, 0xbe, 0xa5, 0x6c, 0xad, 0x7b, 0x72, 0x19, 0xde, - 0x43, 0x0f, 0xba, 0xef, 0x77, 0x72, 0xcd, 0x95, 0xce, 0xad, 0xea, 0x0a, 0x34, 0xf8, 0xdb, 0xd1, - 0xd9, 0x1f, 0xd1, 0x3b, 0xc7, 0xf4, 0xa4, 0x07, 0x4f, 0x0d, 0x12, 0xa1, 0x78, 0x85, 0xe2, 0x5b, - 0x9e, 0x4b, 0xa6, 0xba, 0x6a, 0xa2, 0x38, 0xca, 0xa9, 0xec, 0x07, 0x34, 0xdb, 0xa0, 0x88, 0x76, - 0x54, 0x62, 0x6e, 0x07, 0xa4, 0x86, 0xa9, 0x71, 0x9c, 0x50, 0xf6, 0x12, 0x45, 0xae, 0x6a, 0x6b, - 0x66, 0xa4, 0x82, 0x90, 0xaf, 0xc0, 0x9f, 0x09, 0x5c, 0xa1, 0x30, 0xbe, 0x6b, 0xfe, 0xce, 0xf5, - 0xdd, 0x27, 0xf9, 0xe0, 0x77, 0x57, 0xcf, 0x17, 0xf1, 0xe1, 0xe2, 0x39, 0x8b, 0x17, 0x29, 0x2e, - 0x6d, 0x72, 0x85, 0x49, 0xde, 0x85, 0xc6, 0x8c, 0xa7, 0x34, 0xd9, 0xcf, 0xe7, 0x63, 0x3b, 0xe4, - 0x07, 0x40, 0x9d, 0x0b, 0x79, 0x8a, 0x8a, 0x33, 0xa1, 0x17, 0xa6, 0x1a, 0xfb, 0x39, 0xff, 0x05, - 0x99, 0xa2, 0x4c, 0xcc, 0xa5, 0xad, 0x1f, 0x18, 0xbf, 0x65, 0xc6, 0x8d, 0x8e, 0x51, 0x3d, 0xb0, - 0x12, 0x21, 0x1f, 0xc0, 0x1b, 0x43, 0x5c, 0xc5, 0xbb, 0x54, 0x5a, 0x89, 0xa7, 0x25, 0x75, 0x90, - 0x7c, 0x02, 0x4f, 0x4c, 0x90, 0x2f, 0x70, 0x3f, 0xa6, 0x79, 0xa1, 0xbd, 0xd0, 0xf1, 0x1f, 0xa3, - 0xc8, 0x87, 0xe0, 0xe9, 0x18, 0x72, 0x3b, 0xd1, 0x8f, 0x2b, 0xef, 0xc9, 0x10, 0xa1, 0x15, 0x90, - 0x2f, 0xa1, 0x35, 0xc4, 0x4c, 0x60, 0x12, 0x4b, 0x5c, 0xfe, 0x3c, 0xa4, 0xb9, 0xae, 0x86, 0x4a, - 0xa6, 0xa1, 0x62, 0xb9, 0x39, 0xf3, 0x9d, 0xf0, 0xcd, 0x83, 0xa2, 0x22, 0x20, 0x9f, 0x43, 0xcb, - 0x5c, 0xae, 0x5d, 0xcd, 0x54, 0x97, 0x72, 0x89, 0x2c, 0x41, 0x1f, 0x74, 0x68, 0x27, 0x58, 0x95, - 0xcf, 0x24, 0x9a, 0x59, 0x4f, 0x37, 0x9c, 0xcb, 0x5c, 0x8a, 0x38, 0xf3, 0x9b, 0x26, 0x9f, 0x23, - 0x54, 0xf0, 0xaf, 0x03, 0x8d, 0x32, 0x74, 0xd2, 0x02, 0x6f, 0x12, 0xe7, 0x87, 0x95, 0x65, 0x2d, - 0xb5, 0x64, 0x42, 0xcc, 0x52, 0x9a, 0xc4, 0xfa, 0x71, 0x9a, 0x1e, 0x56, 0x21, 0xa5, 0xe8, 0xaf, - 0x91, 0x49, 0x7b, 0xdc, 0x74, 0xb2, 0x0a, 0xa9, 0x3e, 0xdb, 0xe2, 0xdb, 0x66, 0x16, 0x26, 0x79, - 0x0a, 0xe7, 0x5a, 0x68, 0xdb, 0x68, 0x0c, 0xf2, 0x23, 0xb4, 0x26, 0x31, 0x8b, 0xd7, 0xb8, 0x54, - 0x43, 0x47, 0x13, 0x9c, 0x09, 0xfe, 0x8a, 0x2e, 0x51, 0xf8, 0x5e, 0xc7, 0xbd, 0x6a, 0xf6, 0xde, - 0xaf, 0x54, 0xfe, 0x9e, 0x42, 0x67, 0x13, 0x9e, 0x38, 0x1e, 0xfc, 0x00, 0x6f, 0x9d, 0x38, 0xa2, - 0xa6, 0xaa, 0x9f, 0x24, 0x98, 0xe7, 0x5c, 0x8c, 0x86, 0xc5, 0xda, 0x3e, 0x20, 0x6a, 0x22, 0x23, - 0x4c, 0x04, 0xca, 0xd1, 0xd0, 0x16, 0xa1, 0xb4, 0x03, 0x5a, 0xdb, 0xa3, 0x6a, 0xb9, 0xa8, 0xbd, - 0x67, 0x9e, 0x82, 0x5e, 0x02, 0x2d, 0xf0, 0x86, 0xd3, 0x28, 0x2a, 0x17, 0x94, 0xb5, 0x54, 0xfa, - 0xa3, 0x99, 0x82, 0x5d, 0x0d, 0x1b, 0x43, 0x5d, 0xd5, 0x4f, 0x53, 0x7e, 0xab, 0x9c, 0x3c, 0xd0, - 0x4e, 0x4a, 0xfb, 0xe6, 0xeb, 0x3f, 0xee, 0xda, 0xce, 0x9f, 0x77, 0x6d, 0xe7, 0xaf, 0xbb, 0xb6, - 0xf3, 0xeb, 0x3f, 0xed, 0xd7, 0x7e, 0xfa, 0x68, 0x4d, 0xe5, 0x66, 0xb7, 0xb8, 0x4e, 0xf8, 0xb6, - 0xbb, 0x89, 0xf3, 0x0d, 0x4d, 0xb8, 0xc8, 0xd4, 0x57, 0x2a, 0xdf, 0xa5, 0xdd, 0xfa, 0xb7, 0x6b, - 0xe1, 0x69, 0xfb, 0xd3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x54, 0x4a, 0x4e, 0xf1, 0xd4, 0x06, - 0x00, 0x00, + // 823 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xdd, 0x6e, 0xe3, 0x44, + 0x14, 0xc6, 0xf1, 0xd6, 0x6d, 0x26, 0xb0, 0xda, 0x9d, 0x5d, 0x8a, 0xc5, 0x4f, 0x88, 0x2c, 0xb4, + 0x2a, 0x08, 0xb5, 0xa8, 0x08, 0x04, 0x88, 0x9b, 0x34, 0x59, 0x41, 0xd8, 0x34, 0x44, 0x76, 0x58, + 0x24, 0x6e, 0x90, 0xe3, 0x9c, 0x24, 0x23, 0x9c, 0x19, 0x6b, 0x3c, 0xe9, 0xca, 0xaf, 0xc0, 0x13, + 0xf0, 0x2e, 0xbc, 0x00, 0x77, 0xf0, 0x08, 0x50, 0x5e, 0x64, 0x75, 0x66, 0xc6, 0x8e, 0xdd, 0x26, + 0x57, 0xc9, 0xf9, 0xbe, 0x6f, 0xce, 0x9c, 0x33, 0xe7, 0xc7, 0xe4, 0xbd, 0x4c, 0x0a, 0x25, 0x2e, + 0xb2, 0x79, 0x22, 0xf8, 0x92, 0xad, 0x2e, 0xcc, 0xcf, 0xb9, 0x46, 0xa9, 0x67, 0xac, 0xe0, 0xef, + 0x16, 0xf1, 0x06, 0xfa, 0x2f, 0xed, 0x12, 0x32, 0x8c, 0x55, 0x9c, 0x00, 0x57, 0x20, 0x7d, 0xa7, + 0xe7, 0x9c, 0xb5, 0xc3, 0x1a, 0x42, 0x3f, 0x25, 0x8f, 0xa7, 0x92, 0x6d, 0x62, 0x59, 0xd4, 0x64, + 0x2d, 0x2d, 0xbb, 0x4f, 0xd0, 0x77, 0xc9, 0xc9, 0x44, 0x2c, 0x60, 0x12, 0x6f, 0xc0, 0x77, 0xb5, + 0xa8, 0xb2, 0x69, 0x8f, 0x74, 0x22, 0x58, 0x6d, 0x80, 0x2b, 0x4d, 0x3f, 0xd0, 0x74, 0x1d, 0xa2, + 0xef, 0x93, 0xf6, 0x34, 0x96, 0x8a, 0x29, 0x26, 0xb8, 0xdf, 0xd6, 0xfc, 0x0e, 0xa0, 0x1f, 0x10, + 0xb7, 0x3f, 0x18, 0xfb, 0x47, 0x3d, 0xe7, 0xac, 0x73, 0xd9, 0x39, 0xb7, 0x89, 0xf5, 0x07, 0xe3, + 0x10, 0x71, 0xfa, 0x05, 0xe9, 0xf4, 0xb7, 0x4a, 0x3c, 0xe7, 0x89, 0x2c, 0x32, 0xe5, 0x7b, 0x5a, + 0xf6, 0xa4, 0x92, 0xed, 0xa8, 0xb0, 0xae, 0xa3, 0xcf, 0x88, 0xf7, 0x9d, 0xc8, 0x73, 0x96, 0xf9, + 0xc7, 0xfa, 0xc4, 0xc3, 0xf2, 0x84, 0x41, 0x43, 0xcb, 0xe2, 0xed, 0xb3, 0x71, 0xe4, 0x9f, 0x34, + 0x6f, 0x9f, 0x8d, 0xa3, 0x10, 0xf1, 0x60, 0x59, 0xba, 0xa1, 0x5f, 0x11, 0x62, 0x7d, 0x63, 0x16, + 0x8e, 0xd6, 0xfb, 0x4d, 0xa7, 0x3b, 0x3e, 0xac, 0x69, 0x69, 0x40, 0xde, 0x0c, 0x41, 0xc9, 0xe2, + 0x07, 0xc1, 0xf8, 0xb8, 0x3f, 0xf1, 0x5b, 0x3d, 0xf7, 0xac, 0x1d, 0x36, 0xb0, 0x40, 0x91, 0x47, + 0x77, 0x7d, 0xd0, 0x47, 0xc4, 0x7d, 0x01, 0x85, 0xad, 0x1d, 0xfe, 0xa5, 0xcf, 0xc8, 0xc3, 0x97, + 0x20, 0xd9, 0xb2, 0x18, 0xf1, 0x44, 0x6c, 0x18, 0x5f, 0xe9, 0x8a, 0x9d, 0x84, 0x77, 0xd0, 0x9d, + 0xee, 0xc7, 0xad, 0x5a, 0x09, 0xd4, 0xb9, 0x75, 0x5d, 0x89, 0x06, 0xff, 0x39, 0x3a, 0xfb, 0x3d, + 0x7a, 0x67, 0x9f, 0x9e, 0x5e, 0x92, 0xa7, 0x06, 0x89, 0x40, 0xde, 0x80, 0xfc, 0x5e, 0xe4, 0x8a, + 0x63, 0xcd, 0x4d, 0x14, 0x7b, 0x39, 0xcc, 0x7e, 0xc0, 0xb2, 0x35, 0xc8, 0x68, 0xcb, 0x14, 0xe4, + 0xb6, 0x7d, 0x1a, 0x18, 0x36, 0xeb, 0x35, 0xe3, 0x2f, 0x41, 0xe6, 0xf8, 0xb6, 0xa6, 0x83, 0x6a, + 0x08, 0xfd, 0x86, 0xf8, 0x53, 0x09, 0x4b, 0x90, 0xc6, 0x77, 0xc3, 0xdf, 0x91, 0xbe, 0xfb, 0x20, + 0x1f, 0xfc, 0xe9, 0xea, 0xfe, 0xa2, 0x3e, 0x39, 0x7e, 0xce, 0xe3, 0x79, 0x0a, 0x0b, 0x9b, 0x5c, + 0x69, 0xea, 0xf6, 0x14, 0x29, 0x4b, 0x8a, 0xd9, 0x6c, 0x6c, 0x47, 0x60, 0x07, 0xe0, 0xb9, 0x50, + 0xa4, 0x80, 0x9c, 0x09, 0xbd, 0x34, 0x71, 0x28, 0x66, 0xe2, 0x37, 0xe0, 0x48, 0x99, 0x98, 0x2b, + 0x5b, 0x8f, 0x9f, 0x78, 0xc5, 0x8d, 0x1b, 0x1d, 0x23, 0x8e, 0x5f, 0x85, 0xd0, 0x8f, 0xc8, 0x5b, + 0x43, 0x58, 0xc6, 0xdb, 0x54, 0x59, 0x89, 0xa7, 0x25, 0x4d, 0x90, 0x7e, 0x46, 0x9e, 0x98, 0x20, + 0x5f, 0x40, 0x31, 0x66, 0x79, 0xa9, 0x3d, 0xd6, 0xf1, 0xef, 0xa3, 0xe8, 0xc7, 0xc4, 0xd3, 0x31, + 0xe4, 0xb6, 0xa3, 0x1f, 0xd7, 0xe6, 0xc9, 0x10, 0xa1, 0x15, 0xd0, 0xaf, 0xc9, 0xe9, 0x10, 0x32, + 0x09, 0x49, 0xac, 0x60, 0xf1, 0xeb, 0x90, 0xe5, 0xfa, 0x35, 0x30, 0x19, 0x3d, 0xa2, 0x57, 0x2d, + 0xdf, 0x09, 0xdf, 0xde, 0x29, 0x6a, 0x02, 0xfa, 0x25, 0x39, 0x35, 0x97, 0x6b, 0x57, 0x53, 0xac, + 0x52, 0xae, 0x80, 0x27, 0xe0, 0x13, 0x1d, 0xda, 0x01, 0x16, 0xf3, 0xb9, 0x8e, 0xa6, 0xd6, 0xd3, + 0x95, 0x10, 0x2a, 0x57, 0x32, 0xce, 0xfc, 0x8e, 0xc9, 0x67, 0x0f, 0x15, 0xfc, 0xde, 0x22, 0xed, + 0x2a, 0x74, 0x5c, 0x5a, 0x23, 0xce, 0x14, 0x8b, 0xd3, 0xeb, 0x98, 0xc7, 0x2b, 0xc0, 0x0d, 0x63, + 0xe7, 0xe3, 0x3e, 0x81, 0x8b, 0x29, 0x84, 0x2c, 0x65, 0x49, 0xac, 0x47, 0xd6, 0x54, 0xb6, 0x0e, + 0x61, 0x15, 0xfa, 0x2b, 0xe0, 0x2a, 0x84, 0x44, 0xdc, 0x80, 0x2c, 0x6c, 0x85, 0x9b, 0x20, 0x76, + 0x80, 0x2d, 0x8b, 0x2d, 0x73, 0x69, 0xd2, 0xa7, 0xe4, 0x48, 0x4b, 0x6d, 0x81, 0x8d, 0x41, 0x7f, + 0x26, 0xa7, 0x26, 0x8a, 0x05, 0xb6, 0x23, 0x4b, 0x60, 0x2a, 0xc5, 0x0d, 0x5b, 0x80, 0xf4, 0xbd, + 0x9e, 0x7b, 0xd6, 0xb9, 0xfc, 0xb0, 0x56, 0x93, 0x3b, 0x0a, 0x9d, 0x67, 0x78, 0xe0, 0x78, 0xf0, + 0x13, 0x79, 0xe7, 0xc0, 0x11, 0xec, 0xb7, 0x7e, 0x92, 0x40, 0x9e, 0x0b, 0x39, 0x1a, 0x96, 0xeb, + 0x7e, 0x87, 0x60, 0xaf, 0x46, 0x90, 0x48, 0x50, 0xa3, 0xa1, 0x7d, 0x88, 0xca, 0x0e, 0x58, 0x63, + 0xc3, 0xe2, 0xda, 0xc1, 0x8d, 0x68, 0x86, 0x44, 0xaf, 0x87, 0x53, 0xe2, 0x0d, 0x27, 0x51, 0x54, + 0xad, 0x2e, 0x6b, 0x61, 0xfa, 0xa3, 0x29, 0xc2, 0xae, 0x86, 0x8d, 0x81, 0x57, 0xf5, 0xd3, 0x54, + 0xbc, 0x42, 0x27, 0x0f, 0xb4, 0x93, 0xca, 0xbe, 0xfa, 0xf6, 0xaf, 0xdb, 0xae, 0xf3, 0xcf, 0x6d, + 0xd7, 0xf9, 0xf7, 0xb6, 0xeb, 0xfc, 0xf1, 0x7f, 0xf7, 0x8d, 0x5f, 0x3e, 0x59, 0x31, 0xb5, 0xde, + 0xce, 0xcf, 0x13, 0xb1, 0xb9, 0x58, 0xc7, 0xf9, 0x9a, 0x25, 0x42, 0x66, 0xf8, 0x75, 0xcb, 0xb7, + 0xe9, 0x45, 0xf3, 0x9b, 0x37, 0xf7, 0xb4, 0xfd, 0xf9, 0xeb, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, + 0x66, 0xca, 0x39, 0x0c, 0x07, 0x00, 0x00, } func (m *Config) Marshal() (dAtA []byte, err error) { @@ -757,6 +766,13 @@ func (m *Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Partition) > 0 { + i -= len(m.Partition) + copy(dAtA[i:], m.Partition) + i = encodeVarintConfig(dAtA, i, uint64(len(m.Partition))) + i-- + dAtA[i] = 0x4a + } if m.TLS != nil { { size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) @@ -1182,10 +1198,10 @@ func (m *ACLTokens) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if len(m.AgentMaster) > 0 { - i -= len(m.AgentMaster) - copy(dAtA[i:], m.AgentMaster) - i = encodeVarintConfig(dAtA, i, uint64(len(m.AgentMaster))) + if len(m.AgentRecovery) > 0 { + i -= len(m.AgentRecovery) + copy(dAtA[i:], m.AgentRecovery) + i = encodeVarintConfig(dAtA, i, uint64(len(m.AgentRecovery))) i-- dAtA[i] = 0x1a } @@ -1196,10 +1212,10 @@ func (m *ACLTokens) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.Master) > 0 { - i -= len(m.Master) - copy(dAtA[i:], m.Master) - i = encodeVarintConfig(dAtA, i, uint64(len(m.Master))) + if len(m.InitialManagement) > 0 { + i -= len(m.InitialManagement) + copy(dAtA[i:], m.InitialManagement) + i = encodeVarintConfig(dAtA, i, uint64(len(m.InitialManagement))) i-- dAtA[i] = 0xa } @@ -1361,6 +1377,10 @@ func (m *Config) Size() (n int) { l = m.TLS.Size() n += 1 + l + sovConfig(uint64(l)) } + l = len(m.Partition) + if l > 0 { + n += 1 + l + sovConfig(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1498,7 +1518,7 @@ func (m *ACLTokens) Size() (n int) { } var l int _ = l - l = len(m.Master) + l = len(m.InitialManagement) if l > 0 { n += 1 + l + sovConfig(uint64(l)) } @@ -1506,7 +1526,7 @@ func (m *ACLTokens) Size() (n int) { if l > 0 { n += 1 + l + sovConfig(uint64(l)) } - l = len(m.AgentMaster) + l = len(m.AgentRecovery) if l > 0 { n += 1 + l + sovConfig(uint64(l)) } @@ -1887,6 +1907,38 @@ func (m *Config) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConfig + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConfig + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConfig + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Partition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipConfig(dAtA[iNdEx:]) @@ -2716,7 +2768,7 @@ func (m *ACLTokens) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Master", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitialManagement", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2744,7 +2796,7 @@ func (m *ACLTokens) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Master = string(dAtA[iNdEx:postIndex]) + m.InitialManagement = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2780,7 +2832,7 @@ func (m *ACLTokens) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentMaster", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AgentRecovery", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2808,7 +2860,7 @@ func (m *ACLTokens) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AgentMaster = string(dAtA[iNdEx:postIndex]) + m.AgentRecovery = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { diff --git a/proto/pbconfig/config.proto b/proto/pbconfig/config.proto index 25b80f94e..8483c9626 100644 --- a/proto/pbconfig/config.proto +++ b/proto/pbconfig/config.proto @@ -9,6 +9,7 @@ message Config { string PrimaryDatacenter = 2; string NodeName = 3; string SegmentName = 4; + string Partition = 9; ACL ACL = 5; AutoEncrypt AutoEncrypt = 6; Gossip Gossip = 7; @@ -51,9 +52,9 @@ message ACL { } message ACLTokens { - string Master = 1; + string InitialManagement = 1; string Replication = 2; - string AgentMaster = 3; + string AgentRecovery = 3; string Default = 4; string Agent = 5; repeated ACLServiceProviderToken ManagedServiceProvider = 6; @@ -69,4 +70,4 @@ message AutoEncrypt { repeated string DNSSAN = 2; repeated string IPSAN = 3; bool AllowTLS = 4; -} \ No newline at end of file +} diff --git a/proto/pbservice/convert_oss.go b/proto/pbservice/convert_oss.go index f49a84ac7..215a2dc5f 100644 --- a/proto/pbservice/convert_oss.go +++ b/proto/pbservice/convert_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package pbservice diff --git a/proto/pbservice/convert_oss_test.go b/proto/pbservice/convert_oss_test.go index 7848e04d0..17717f058 100644 --- a/proto/pbservice/convert_oss_test.go +++ b/proto/pbservice/convert_oss_test.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package pbservice diff --git a/sdk/freeport/ephemeral_darwin.go b/sdk/freeport/ephemeral_darwin.go index 2449041c5..13d366af6 100644 --- a/sdk/freeport/ephemeral_darwin.go +++ b/sdk/freeport/ephemeral_darwin.go @@ -1,4 +1,5 @@ -//+build darwin +//go:build darwin +// +build darwin package freeport diff --git a/sdk/freeport/ephemeral_darwin_test.go b/sdk/freeport/ephemeral_darwin_test.go index e602f474a..10d0b9bba 100644 --- a/sdk/freeport/ephemeral_darwin_test.go +++ b/sdk/freeport/ephemeral_darwin_test.go @@ -1,4 +1,5 @@ -//+build darwin +//go:build darwin +// +build darwin package freeport diff --git a/sdk/freeport/ephemeral_fallback.go b/sdk/freeport/ephemeral_fallback.go index 4e3e2b386..d250ce64f 100644 --- a/sdk/freeport/ephemeral_fallback.go +++ b/sdk/freeport/ephemeral_fallback.go @@ -1,4 +1,5 @@ -//+build !linux,!darwin +//go:build !linux && !darwin +// +build !linux,!darwin package freeport diff --git a/sdk/freeport/ephemeral_linux.go b/sdk/freeport/ephemeral_linux.go index 88cc62a88..55200ad2a 100644 --- a/sdk/freeport/ephemeral_linux.go +++ b/sdk/freeport/ephemeral_linux.go @@ -1,4 +1,5 @@ -//+build linux +//go:build linux +// +build linux package freeport diff --git a/sdk/freeport/ephemeral_linux_test.go b/sdk/freeport/ephemeral_linux_test.go index 2d9385df4..cd4e9db34 100644 --- a/sdk/freeport/ephemeral_linux_test.go +++ b/sdk/freeport/ephemeral_linux_test.go @@ -1,4 +1,5 @@ -//+build linux +//go:build linux +// +build linux package freeport diff --git a/sdk/freeport/freeport.go b/sdk/freeport/freeport.go index eab12a87f..e35d662ad 100644 --- a/sdk/freeport/freeport.go +++ b/sdk/freeport/freeport.go @@ -1,5 +1,16 @@ -// Package freeport provides a helper for allocating free ports across multiple -// processes on the same machine. +// Package freeport provides a helper for reserving free TCP ports across multiple +// processes on the same machine. Each process reserves a block of ports outside +// the ephemeral port range. Tests can request one of these reserved ports +// and freeport will ensure that no other test uses that port until it is returned +// to freeport. +// +// Freeport is particularly useful when the code being tested does not accept +// a net.Listener. Any code that accepts a net.Listener (or uses net/http/httptest.Server) +// can use port 0 (ex: 127.0.0.1:0) to find an unused ephemeral port that will +// not conflict. +// +// Any code that does not accept a net.Listener or can not bind directly to port +// zero should use freeport to find an unused port. package freeport import ( @@ -11,8 +22,6 @@ import ( "runtime" "sync" "time" - - "github.com/mitchellh/go-testing-interface" ) const ( @@ -251,6 +260,8 @@ func alloc() (int, net.Listener) { } // MustTake is the same as Take except it panics on error. +// +// Deprecated: Use GetN or GetOne instead. func MustTake(n int) (ports []int) { ports, err := Take(n) if err != nil { @@ -259,10 +270,12 @@ func MustTake(n int) (ports []int) { return ports } -// Take returns a list of free ports from the allocated port block. It is safe +// Take returns a list of free ports from the reserved port block. It is safe // to call this method concurrently. Ports have been tested to be available on // 127.0.0.1 TCP but there is no guarantee that they will remain free in the // future. +// +// Most callers should prefer GetN or GetOne. func Take(n int) (ports []int, err error) { if n <= 0 { return nil, fmt.Errorf("freeport: cannot take %d ports", n) @@ -381,11 +394,44 @@ func logf(severity string, format string, a ...interface{}) { fmt.Fprintf(os.Stderr, "["+severity+"] freeport: "+format+"\n", a...) } +// TestingT is the minimal set of methods implemented by *testing.T that are +// used by functions in freelist. +// +// In the future new methods may be added to this interface, but those methods +// should always be implemented by *testing.T +type TestingT interface { + Helper() + Fatalf(format string, args ...interface{}) + Cleanup(func()) +} + +// GetN returns n free ports from the reserved port block, and returns the +// ports to the pool when the test ends. See Take for more details. +func GetN(t TestingT, n int) []int { + t.Helper() + ports, err := Take(n) + if err != nil { + t.Fatalf("failed to take %v ports: %w", n, err) + } + t.Cleanup(func() { + Return(ports) + }) + return ports +} + +// GetOne returns a single free port from the reserved port block, and returns the +// port to the pool when the test ends. See Take for more details. +// Use GetN if more than a single port is required. +func GetOne(t TestingT) int { + t.Helper() + return GetN(t, 1)[0] +} + // Deprecated: Please use Take/Return calls instead. func Get(n int) (ports []int) { return MustTake(n) } // Deprecated: Please use Take/Return calls instead. -func GetT(t testing.T, n int) (ports []int) { return MustTake(n) } +func GetT(t TestingT, n int) (ports []int) { return MustTake(n) } // Deprecated: Please use Take/Return calls instead. func Free(n int) (ports []int, err error) { return MustTake(n), nil } diff --git a/sdk/freeport/systemlimit.go b/sdk/freeport/systemlimit.go index fdd902142..2db29870d 100644 --- a/sdk/freeport/systemlimit.go +++ b/sdk/freeport/systemlimit.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package freeport diff --git a/sdk/freeport/systemlimit_windows.go b/sdk/freeport/systemlimit_windows.go index eec211957..867c64af3 100644 --- a/sdk/freeport/systemlimit_windows.go +++ b/sdk/freeport/systemlimit_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package freeport diff --git a/sdk/iptables/iptables.go b/sdk/iptables/iptables.go index c80776833..83b5c03a1 100644 --- a/sdk/iptables/iptables.go +++ b/sdk/iptables/iptables.go @@ -18,12 +18,18 @@ const ( // ProxyOutputRedirectChain is the chain to redirect outbound traffic to the proxy ProxyOutputRedirectChain = "CONSUL_PROXY_REDIRECT" + // DNSChain is the chain to redirect outbound DNS traffic to Consul DNS. + DNSChain = "CONSUL_DNS_REDIRECT" + DefaultTProxyOutboundPort = 15001 ) // Config is used to configure which traffic interception and redirection // rules should be applied with the iptables commands. type Config struct { + // ConsulDNSIP is the IP for Consul DNS to direct DNS queries to. + ConsulDNSIP string + // ProxyUserID is the user ID of the proxy process. ProxyUserID string @@ -90,7 +96,7 @@ func Setup(cfg Config) error { } // Create chains we will use for redirection. - chains := []string{ProxyInboundChain, ProxyInboundRedirectChain, ProxyOutputChain, ProxyOutputRedirectChain} + chains := []string{ProxyInboundChain, ProxyInboundRedirectChain, ProxyOutputChain, ProxyOutputRedirectChain, DNSChain} for _, chain := range chains { cfg.IptablesProvider.AddRule("iptables", "-t", "nat", "-N", chain) } @@ -100,6 +106,17 @@ func Setup(cfg Config) error { // Redirects outbound TCP traffic hitting PROXY_REDIRECT chain to Envoy's outbound listener port. cfg.IptablesProvider.AddRule("iptables", "-t", "nat", "-A", ProxyOutputRedirectChain, "-p", "tcp", "-j", "REDIRECT", "--to-port", strconv.Itoa(cfg.ProxyOutboundPort)) + // The DNS rules are applied before the rules that directs all TCP traffic, so that the traffic going to port 53 goes through this rule first. + if cfg.ConsulDNSIP != "" { + // Traffic in the DNSChain is directed to the Consul DNS Service IP. + cfg.IptablesProvider.AddRule("iptables", "-t", "nat", "-A", DNSChain, "-p", "udp", "--dport", "53", "-j", "DNAT", "--to-destination", cfg.ConsulDNSIP) + cfg.IptablesProvider.AddRule("iptables", "-t", "nat", "-A", DNSChain, "-p", "tcp", "--dport", "53", "-j", "DNAT", "--to-destination", cfg.ConsulDNSIP) + + // For outbound TCP and UDP traffic going to port 53 (DNS), jump to the DNSChain. + cfg.IptablesProvider.AddRule("iptables", "-t", "nat", "-A", "OUTPUT", "-p", "udp", "--dport", "53", "-j", DNSChain) + cfg.IptablesProvider.AddRule("iptables", "-t", "nat", "-A", "OUTPUT", "-p", "tcp", "--dport", "53", "-j", DNSChain) + } + // For outbound TCP traffic jump from OUTPUT chain to PROXY_OUTPUT chain. cfg.IptablesProvider.AddRule("iptables", "-t", "nat", "-A", "OUTPUT", "-p", "tcp", "-j", ProxyOutputChain) diff --git a/sdk/iptables/iptables_executor_linux.go b/sdk/iptables/iptables_executor_linux.go index 224e088eb..5c25d5bea 100644 --- a/sdk/iptables/iptables_executor_linux.go +++ b/sdk/iptables/iptables_executor_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package iptables diff --git a/sdk/iptables/iptables_executor_unsupported.go b/sdk/iptables/iptables_executor_unsupported.go index b26f39f67..3708328aa 100644 --- a/sdk/iptables/iptables_executor_unsupported.go +++ b/sdk/iptables/iptables_executor_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package iptables diff --git a/sdk/iptables/iptables_test.go b/sdk/iptables/iptables_test.go index 1421de7db..1de3122e5 100644 --- a/sdk/iptables/iptables_test.go +++ b/sdk/iptables/iptables_test.go @@ -25,6 +25,7 @@ func TestSetup(t *testing.T) { "iptables -t nat -N CONSUL_PROXY_IN_REDIRECT", "iptables -t nat -N CONSUL_PROXY_OUTPUT", "iptables -t nat -N CONSUL_PROXY_REDIRECT", + "iptables -t nat -N CONSUL_DNS_REDIRECT", "iptables -t nat -A CONSUL_PROXY_REDIRECT -p tcp -j REDIRECT --to-port 15001", "iptables -t nat -A OUTPUT -p tcp -j CONSUL_PROXY_OUTPUT", "iptables -t nat -A CONSUL_PROXY_OUTPUT -m owner --uid-owner 123 -j RETURN", @@ -35,6 +36,34 @@ func TestSetup(t *testing.T) { "iptables -t nat -A CONSUL_PROXY_INBOUND -p tcp -j CONSUL_PROXY_IN_REDIRECT", }, }, + { + "Consul DNS IP provided", + Config{ + ProxyUserID: "123", + ProxyInboundPort: 20000, + ConsulDNSIP: "10.0.34.16", + IptablesProvider: &fakeIptablesProvider{}, + }, + []string{ + "iptables -t nat -N CONSUL_PROXY_INBOUND", + "iptables -t nat -N CONSUL_PROXY_IN_REDIRECT", + "iptables -t nat -N CONSUL_PROXY_OUTPUT", + "iptables -t nat -N CONSUL_PROXY_REDIRECT", + "iptables -t nat -N CONSUL_DNS_REDIRECT", + "iptables -t nat -A CONSUL_PROXY_REDIRECT -p tcp -j REDIRECT --to-port 15001", + "iptables -t nat -A CONSUL_DNS_REDIRECT -p udp --dport 53 -j DNAT --to-destination 10.0.34.16", + "iptables -t nat -A CONSUL_DNS_REDIRECT -p tcp --dport 53 -j DNAT --to-destination 10.0.34.16", + "iptables -t nat -A OUTPUT -p udp --dport 53 -j CONSUL_DNS_REDIRECT", + "iptables -t nat -A OUTPUT -p tcp --dport 53 -j CONSUL_DNS_REDIRECT", + "iptables -t nat -A OUTPUT -p tcp -j CONSUL_PROXY_OUTPUT", + "iptables -t nat -A CONSUL_PROXY_OUTPUT -m owner --uid-owner 123 -j RETURN", + "iptables -t nat -A CONSUL_PROXY_OUTPUT -d 127.0.0.1/32 -j RETURN", + "iptables -t nat -A CONSUL_PROXY_OUTPUT -j CONSUL_PROXY_REDIRECT", + "iptables -t nat -A CONSUL_PROXY_IN_REDIRECT -p tcp -j REDIRECT --to-port 20000", + "iptables -t nat -A PREROUTING -p tcp -j CONSUL_PROXY_INBOUND", + "iptables -t nat -A CONSUL_PROXY_INBOUND -p tcp -j CONSUL_PROXY_IN_REDIRECT", + }, + }, { "proxy outbound port is provided", Config{ @@ -48,6 +77,7 @@ func TestSetup(t *testing.T) { "iptables -t nat -N CONSUL_PROXY_IN_REDIRECT", "iptables -t nat -N CONSUL_PROXY_OUTPUT", "iptables -t nat -N CONSUL_PROXY_REDIRECT", + "iptables -t nat -N CONSUL_DNS_REDIRECT", "iptables -t nat -A CONSUL_PROXY_REDIRECT -p tcp -j REDIRECT --to-port 21000", "iptables -t nat -A OUTPUT -p tcp -j CONSUL_PROXY_OUTPUT", "iptables -t nat -A CONSUL_PROXY_OUTPUT -m owner --uid-owner 123 -j RETURN", @@ -72,6 +102,7 @@ func TestSetup(t *testing.T) { "iptables -t nat -N CONSUL_PROXY_IN_REDIRECT", "iptables -t nat -N CONSUL_PROXY_OUTPUT", "iptables -t nat -N CONSUL_PROXY_REDIRECT", + "iptables -t nat -N CONSUL_DNS_REDIRECT", "iptables -t nat -A CONSUL_PROXY_REDIRECT -p tcp -j REDIRECT --to-port 21000", "iptables -t nat -A OUTPUT -p tcp -j CONSUL_PROXY_OUTPUT", "iptables -t nat -A CONSUL_PROXY_OUTPUT -m owner --uid-owner 123 -j RETURN", @@ -98,6 +129,7 @@ func TestSetup(t *testing.T) { "iptables -t nat -N CONSUL_PROXY_IN_REDIRECT", "iptables -t nat -N CONSUL_PROXY_OUTPUT", "iptables -t nat -N CONSUL_PROXY_REDIRECT", + "iptables -t nat -N CONSUL_DNS_REDIRECT", "iptables -t nat -A CONSUL_PROXY_REDIRECT -p tcp -j REDIRECT --to-port 21000", "iptables -t nat -A OUTPUT -p tcp -j CONSUL_PROXY_OUTPUT", "iptables -t nat -A CONSUL_PROXY_OUTPUT -m owner --uid-owner 123 -j RETURN", @@ -124,6 +156,7 @@ func TestSetup(t *testing.T) { "iptables -t nat -N CONSUL_PROXY_IN_REDIRECT", "iptables -t nat -N CONSUL_PROXY_OUTPUT", "iptables -t nat -N CONSUL_PROXY_REDIRECT", + "iptables -t nat -N CONSUL_DNS_REDIRECT", "iptables -t nat -A CONSUL_PROXY_REDIRECT -p tcp -j REDIRECT --to-port 21000", "iptables -t nat -A OUTPUT -p tcp -j CONSUL_PROXY_OUTPUT", "iptables -t nat -A CONSUL_PROXY_OUTPUT -m owner --uid-owner 123 -j RETURN", @@ -150,6 +183,7 @@ func TestSetup(t *testing.T) { "iptables -t nat -N CONSUL_PROXY_IN_REDIRECT", "iptables -t nat -N CONSUL_PROXY_OUTPUT", "iptables -t nat -N CONSUL_PROXY_REDIRECT", + "iptables -t nat -N CONSUL_DNS_REDIRECT", "iptables -t nat -A CONSUL_PROXY_REDIRECT -p tcp -j REDIRECT --to-port 21000", "iptables -t nat -A OUTPUT -p tcp -j CONSUL_PROXY_OUTPUT", "iptables -t nat -A CONSUL_PROXY_OUTPUT -m owner --uid-owner 123 -j RETURN", @@ -171,7 +205,6 @@ func TestSetup(t *testing.T) { require.Equal(t, c.expectedRules, c.cfg.IptablesProvider.Rules()) }) } - } func TestSetup_errors(t *testing.T) { diff --git a/sdk/testutil/server.go b/sdk/testutil/server.go index 1ecde56a1..cd894a2ef 100644 --- a/sdk/testutil/server.go +++ b/sdk/testutil/server.go @@ -29,11 +29,12 @@ import ( "testing" "time" - "github.com/hashicorp/consul/sdk/freeport" - "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-uuid" "github.com/pkg/errors" + + "github.com/hashicorp/consul/sdk/freeport" + "github.com/hashicorp/consul/sdk/testutil/retry" ) // TestPerformanceConfig configures the performance parameters. @@ -142,7 +143,11 @@ func defaultServerConfig(t TestingTB) *TestServerConfig { panic(err) } - ports := freeport.MustTake(6) + ports, err := freeport.Take(6) + if err != nil { + t.Fatalf("failed to take ports: %v", err) + } + logBuffer := NewLogBuffer(t) return &TestServerConfig{ diff --git a/sdk/testutil/types.go b/sdk/testutil/types.go index ec04e45dc..d6b5841c5 100644 --- a/sdk/testutil/types.go +++ b/sdk/testutil/types.go @@ -8,4 +8,5 @@ type TestingTB interface { Failed() bool Logf(format string, args ...interface{}) Name() string + Fatalf(fmt string, args ...interface{}) } diff --git a/sentinel/sentinel_oss.go b/sentinel/sentinel_oss.go index ce7671b2b..d4323b3ab 100644 --- a/sentinel/sentinel_oss.go +++ b/sentinel/sentinel_oss.go @@ -1,3 +1,4 @@ +//go:build !consulent // +build !consulent package sentinel diff --git a/service_os/service_windows.go b/service_os/service_windows.go index d7fc245f5..80d2e9165 100644 --- a/service_os/service_windows.go +++ b/service_os/service_windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows package service_os diff --git a/test/integration/connect/envoy/case-wanfed-gw/primary/verify.bats b/test/integration/connect/envoy/case-wanfed-gw/primary/verify.bats index 3c9d89998..5be23d04c 100644 --- a/test/integration/connect/envoy/case-wanfed-gw/primary/verify.bats +++ b/test/integration/connect/envoy/case-wanfed-gw/primary/verify.bats @@ -23,7 +23,7 @@ load helpers } @test "primary should be able to rpc to the secondary" { - retry_default curl -sL -f -XPUT localhost:8500/v1/kv/foo?dc=secondary -d'{"Value":"bar"}' + retry_long curl -sL -f -XPUT localhost:8500/v1/kv/foo?dc=secondary -d'{"Value":"bar"}' } @test "wan pool should show 2 healthy nodes" { diff --git a/test/integration/connect/envoy/defaults.sh b/test/integration/connect/envoy/defaults.sh index 2961ff8ae..ccf85824d 100644 --- a/test/integration/connect/envoy/defaults.sh +++ b/test/integration/connect/envoy/defaults.sh @@ -2,4 +2,5 @@ export DEFAULT_REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy" export REQUIRED_SERVICES="${DEFAULT_REQUIRED_SERVICES}" -export REQUIRE_SECONDARY=0 \ No newline at end of file +export REQUIRE_SECONDARY=0 +export REQUIRE_PARTITIONS=0 \ No newline at end of file diff --git a/test/integration/connect/envoy/helpers.bash b/test/integration/connect/envoy/helpers.bash index dcf184a6a..0c5f60681 100755 --- a/test/integration/connect/envoy/helpers.bash +++ b/test/integration/connect/envoy/helpers.bash @@ -115,14 +115,19 @@ function assert_proxy_presents_cert_uri { local SERVICENAME=$2 local DC=${3:-primary} local NS=${4:-default} + local PARTITION=${5:default} CERT=$(retry_default get_cert $HOSTPORT) - echo "WANT SERVICE: ${NS}/${SERVICENAME}" + echo "WANT SERVICE: ${PARTITION}/${NS}/${SERVICENAME}" echo "GOT CERT:" echo "$CERT" - echo "$CERT" | grep -Eo "URI:spiffe://([a-zA-Z0-9-]+).consul/ns/${NS}/dc/${DC}/svc/$SERVICENAME" + if [[ -z $PARTITION ]] || [[ $PARTITION = "default" ]]; then + echo "$CERT" | grep -Eo "URI:spiffe://([a-zA-Z0-9-]+).consul/ns/${NS}/dc/${DC}/svc/$SERVICENAME" + else + echo "$CERT" | grep -Eo "URI:spiffe://([a-zA-Z0-9-]+).consul/ap/${PARTITION}/ns/${NS}/dc/${DC}/svc/$SERVICENAME" + fi } function assert_dnssan_in_cert { diff --git a/test/integration/connect/envoy/main_test.go b/test/integration/connect/envoy/main_test.go index 25f955829..6b60efab5 100644 --- a/test/integration/connect/envoy/main_test.go +++ b/test/integration/connect/envoy/main_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package envoy diff --git a/test/integration/connect/envoy/run-tests.sh b/test/integration/connect/envoy/run-tests.sh index ea2f8429b..b5978ad29 100755 --- a/test/integration/connect/envoy/run-tests.sh +++ b/test/integration/connect/envoy/run-tests.sh @@ -40,39 +40,39 @@ function network_snippet { } function init_workdir { - local DC="$1" + local CLUSTER="$1" - if test -z "$DC" + if test -z "$CLUSTER" then - DC=primary + CLUSTER=primary fi # Note, we use explicit set of dirs so we don't delete .gitignore. Also, # don't wipe logs between runs as they are already split and we need them to # upload as artifacts later. - rm -rf workdir/${DC} - mkdir -p workdir/${DC}/{consul,register,envoy,bats,statsd,data} + rm -rf workdir/${CLUSTER} + mkdir -p workdir/${CLUSTER}/{consul,register,envoy,bats,statsd,data} # Reload consul config from defaults - cp consul-base-cfg/*.hcl workdir/${DC}/consul/ + cp consul-base-cfg/*.hcl workdir/${CLUSTER}/consul/ # Add any overrides if there are any (no op if not) - find ${CASE_DIR} -maxdepth 1 -name '*.hcl' -type f -exec cp -f {} workdir/${DC}/consul \; + find ${CASE_DIR} -maxdepth 1 -name '*.hcl' -type f -exec cp -f {} workdir/${CLUSTER}/consul \; # Copy all the test files - find ${CASE_DIR} -maxdepth 1 -name '*.bats' -type f -exec cp -f {} workdir/${DC}/bats \; - # Copy DC specific bats - cp helpers.bash workdir/${DC}/bats + find ${CASE_DIR} -maxdepth 1 -name '*.bats' -type f -exec cp -f {} workdir/${CLUSTER}/bats \; + # Copy CLUSTER specific bats + cp helpers.bash workdir/${CLUSTER}/bats - # Add any DC overrides - if test -d "${CASE_DIR}/${DC}" + # Add any CLUSTER overrides + if test -d "${CASE_DIR}/${CLUSTER}" then - find ${CASE_DIR}/${DC} -type f -name '*.hcl' -exec cp -f {} workdir/${DC}/consul \; - find ${CASE_DIR}/${DC} -type f -name '*.bats' -exec cp -f {} workdir/${DC}/bats \; + find ${CASE_DIR}/${CLUSTER} -type f -name '*.hcl' -exec cp -f {} workdir/${CLUSTER}/consul \; + find ${CASE_DIR}/${CLUSTER} -type f -name '*.bats' -exec cp -f {} workdir/${CLUSTER}/bats \; fi # move all of the registration files OUT of the consul config dir now - find workdir/${DC}/consul -type f -name 'service_*.hcl' -exec mv -f {} workdir/${DC}/register \; + find workdir/${CLUSTER}/consul -type f -name 'service_*.hcl' -exec mv -f {} workdir/${CLUSTER}/register \; # copy the ca-certs for SDS so we can verify the right ones are served mkdir -p workdir/test-sds-server/certs @@ -80,7 +80,7 @@ function init_workdir { if test -d "${CASE_DIR}/data" then - cp -r ${CASE_DIR}/data/* workdir/${DC}/data + cp -r ${CASE_DIR}/data/* workdir/${CLUSTER}/data fi return 0 @@ -157,13 +157,48 @@ function start_consul { -client "0.0.0.0" >/dev/null } +function start_partitioned_client { + local PARTITION=${1:-ap1} + + # Start consul now as setup script needs it up + docker_kill_rm consul-${PARTITION} + + license="${CONSUL_LICENSE:-}" + # load the consul license so we can pass it into the consul + # containers as an env var in the case that this is a consul + # enterprise test + if test -z "$license" -a -n "${CONSUL_LICENSE_PATH:-}" + then + license=$(cat $CONSUL_LICENSE_PATH) + fi + + sh -c "rm -rf /workdir/${PARTITION}/data" + + # Run consul and expose some ports to the host to make debugging locally a + # bit easier. + # + docker run -d --name envoy_consul-${PARTITION}_1 \ + --net=envoy-tests \ + $WORKDIR_SNIPPET \ + --hostname "consul-${PARTITION}" \ + --network-alias "consul-${PARTITION}" \ + -e "CONSUL_LICENSE=$license" \ + consul-dev agent \ + -datacenter "primary" \ + -retry-join "consul-primary" \ + -grpc-port 8502 \ + -data-dir "/tmp/consul" \ + -config-dir "/workdir/${PARTITION}/consul" \ + -client "0.0.0.0" >/dev/null +} + function pre_service_setup { - local DC=${1:-primary} + local CLUSTER=${1:-primary} # Run test case setup (e.g. generating Envoy bootstrap, starting containers) - if [ -f "${CASE_DIR}/${DC}/setup.sh" ] + if [ -f "${CASE_DIR}/${CLUSTER}/setup.sh" ] then - source ${CASE_DIR}/${DC}/setup.sh + source ${CASE_DIR}/${CLUSTER}/setup.sh else source ${CASE_DIR}/setup.sh fi @@ -184,29 +219,29 @@ function start_services { } function verify { - local DC=$1 - if test -z "$DC"; then - DC=primary + local CLUSTER="$1" + if test -z "$CLUSTER"; then + CLUSTER="primary" fi # Execute tests res=0 # Nuke any previous case's verify container. - docker_kill_rm verify-${DC} + docker_kill_rm verify-${CLUSTER} - echo "Running ${DC} verification step for ${CASE_DIR}..." + echo "Running ${CLUSTER} verification step for ${CASE_DIR}..." # need to tell the PID 1 inside of the container that it won't be actual PID # 1 because we're using --pid=host so we use TINI_SUBREAPER - if docker run --name envoy_verify-${DC}_1 -t \ + if docker run --name envoy_verify-${CLUSTER}_1 -t \ -e TINI_SUBREAPER=1 \ -e ENVOY_VERSION \ $WORKDIR_SNIPPET \ --pid=host \ - $(network_snippet $DC) \ + $(network_snippet $CLUSTER) \ bats-verify \ - --pretty /workdir/${DC}/bats ; then + --pretty /workdir/${CLUSTER}/bats ; then echogreen "✓ PASS" else echored "⨯ FAIL" @@ -228,6 +263,11 @@ function capture_logs { then services="$services consul-secondary" fi + if is_set $REQUIRE_PARTITIONS + then + services="$services consul-ap1" + fi + if [ -f "${CASE_DIR}/capture.sh" ] then @@ -247,7 +287,7 @@ function stop_services { # Teardown docker_kill_rm $REQUIRED_SERVICES - docker_kill_rm consul-primary consul-secondary + docker_kill_rm consul-primary consul-secondary consul-ap1 } function init_vars { @@ -286,6 +326,10 @@ function run_tests { then init_workdir secondary fi + if is_set $REQUIRE_PARTITIONS + then + init_workdir ap1 + fi global_setup @@ -307,6 +351,10 @@ function run_tests { if is_set $REQUIRE_SECONDARY; then start_consul secondary fi + if is_set $REQUIRE_PARTITIONS; then + docker_consul "primary" consul partition create -name ap1 > /dev/null + start_partitioned_client ap1 + fi echo "Setting up the primary datacenter" pre_service_setup primary @@ -315,14 +363,20 @@ function run_tests { echo "Setting up the secondary datacenter" pre_service_setup secondary fi + if is_set $REQUIRE_PARTITIONS; then + echo "Setting up the non-default partition" + pre_service_setup ap1 + fi echo "Starting services" start_services # Run the verify container and report on the output + echo "Verifying the primary datacenter" verify primary if is_set $REQUIRE_SECONDARY; then + echo "Verifying the secondary datacenter" verify secondary fi } @@ -378,7 +432,7 @@ function suite_teardown { docker_kill_rm $(grep "^function run_container_" $self_name | \ sed 's/^function run_container_\(.*\) {/\1/g') - docker_kill_rm consul-primary consul-secondary + docker_kill_rm consul-primary consul-secondary consul-ap1 if docker network inspect envoy-tests &>/dev/null ; then echo -n "Deleting network 'envoy-tests'..." @@ -402,13 +456,13 @@ function run_container { function common_run_container_service { local service="$1" - local DC="$2" + local CLUSTER="$2" local httpPort="$3" local grpcPort="$4" docker run -d --name $(container_name_prev) \ -e "FORTIO_NAME=${service}" \ - $(network_snippet $DC) \ + $(network_snippet $CLUSTER) \ "${HASHICORP_DOCKER_PROXY}/fortio/fortio" \ server \ -http-port ":$httpPort" \ @@ -420,6 +474,10 @@ function run_container_s1 { common_run_container_service s1 primary 8080 8079 } +function run_container_s1-ap1 { + common_run_container_service s1 ap1 8080 8079 +} + function run_container_s2 { common_run_container_service s2 primary 8181 8179 } @@ -455,9 +513,17 @@ function run_container_s2-secondary { common_run_container_service s2-secondary secondary 8181 8179 } +function run_container_s2-ap1 { + common_run_container_service s2 ap1 8480 8479 +} + +function run_container_s3-ap1 { + common_run_container_service s3 ap1 8580 8579 +} + function common_run_container_sidecar_proxy { local service="$1" - local DC="$2" + local CLUSTER="$2" # Hot restart breaks since both envoys seem to interact with each other # despite separate containers that don't share IPC namespace. Not quite @@ -465,10 +531,10 @@ function common_run_container_sidecar_proxy { # location? docker run -d --name $(container_name_prev) \ $WORKDIR_SNIPPET \ - $(network_snippet $DC) \ + $(network_snippet $CLUSTER) \ "${HASHICORP_DOCKER_PROXY}/envoyproxy/envoy:v${ENVOY_VERSION}" \ envoy \ - -c /workdir/${DC}/envoy/${service}-bootstrap.json \ + -c /workdir/${CLUSTER}/envoy/${service}-bootstrap.json \ -l debug \ --disable-hot-restart \ --drain-time-s 1 >/dev/null @@ -477,6 +543,11 @@ function common_run_container_sidecar_proxy { function run_container_s1-sidecar-proxy { common_run_container_sidecar_proxy s1 primary } + +function run_container_s1-ap1-sidecar-proxy { + common_run_container_sidecar_proxy s1 ap1 +} + function run_container_s1-sidecar-proxy-consul-exec { docker run -d --name $(container_name) \ $(network_snippet primary) \ @@ -518,6 +589,14 @@ function run_container_s2-sidecar-proxy-secondary { common_run_container_sidecar_proxy s2 secondary } +function run_container_s2-ap1-sidecar-proxy { + common_run_container_sidecar_proxy s2 ap1 +} + +function run_container_s3-ap1-sidecar-proxy { + common_run_container_sidecar_proxy s3 ap1 +} + function common_run_container_gateway { local name="$1" local DC="$2" diff --git a/types/tls.go b/types/tls.go new file mode 100644 index 000000000..66c10b19b --- /dev/null +++ b/types/tls.go @@ -0,0 +1,185 @@ +package types + +import ( + "encoding/json" + "fmt" +) + +// TLSVersion is a strongly-typed int used for relative comparison +// (minimum, maximum, greater than, less than) of TLS versions +type TLSVersion int + +const ( + // Error value, excluded from lookup maps + TLSVersionInvalid TLSVersion = iota - 1 + + // Explicit unspecified zero-value to avoid overwriting parent defaults + TLSVersionUnspecified + + // Explictly allow implementation to select TLS version + // May be useful to supercede defaults specified at a higher layer + TLSVersionAuto + + _ // Placeholder for SSLv3, hopefully we won't have to add this + + // TLS versions + TLSv1_0 + TLSv1_1 + TLSv1_2 + TLSv1_3 +) + +var ( + TLSVersions = map[string]TLSVersion{ + "TLS_AUTO": TLSVersionAuto, + "TLSv1_0": TLSv1_0, + "TLSv1_1": TLSv1_1, + "TLSv1_2": TLSv1_2, + "TLSv1_3": TLSv1_3, + } + // NOTE: This interface is deprecated in favor of TLSVersions + // and should be eventually removed in a future release. + DeprecatedConsulAgentTLSVersions = map[string]TLSVersion{ + "": TLSVersionAuto, + "tls10": TLSv1_0, + "tls11": TLSv1_1, + "tls12": TLSv1_2, + "tls13": TLSv1_3, + } + HumanTLSVersionStrings = map[TLSVersion]string{ + TLSVersionAuto: "Allow implementation to select TLS version", + TLSv1_0: "TLS 1.0", + TLSv1_1: "TLS 1.1", + TLSv1_2: "TLS 1.2", + TLSv1_3: "TLS 1.3", + } + ConsulConfigTLSVersionStrings = func() map[TLSVersion]string { + inverted := make(map[TLSVersion]string, len(TLSVersions)) + for k, v := range TLSVersions { + inverted[v] = k + } + return inverted + }() + // NOTE: these currently map to the deprecated config strings to support the + // deployment pattern of upgrading servers first. This map should eventually + // be removed and any lookups updated to use ConsulConfigTLSVersionStrings + // with newer config strings instead in a future release. + ConsulAutoConfigTLSVersionStrings = map[TLSVersion]string{ + TLSVersionAuto: "", + TLSv1_0: "tls10", + TLSv1_1: "tls11", + TLSv1_2: "tls12", + TLSv1_3: "tls13", + } +) + +func (v TLSVersion) String() string { + return ConsulConfigTLSVersionStrings[v] +} + +func (v TLSVersion) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *TLSVersion) UnmarshalJSON(bytes []byte) error { + versionStr := string(bytes) + + if n := len(versionStr); n > 1 && versionStr[0] == '"' && versionStr[n-1] == '"' { + versionStr = versionStr[1 : n-1] // trim surrounding quotes + } + + if version, ok := TLSVersions[versionStr]; ok { + *v = version + return nil + } + + *v = TLSVersionInvalid + return fmt.Errorf("no matching TLS Version found for %s", versionStr) +} + +// IANA cipher suite constants and values as defined at +// https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml +// This is the total list of TLS 1.2-style cipher suites +// which are currently supported by either Envoy 1.21 or the Consul agent +// via Go, and may change as some older suites are removed in future +// Envoy releases and Consul drops support for older Envoy versions, +// and as supported cipher suites in the Go runtime change. +// +// The naming convention for cipher suites changed in TLS 1.3 +// but constant values should still be globally unqiue +// Handling validation on a subset of TLSCipherSuite constants +// would be a future exercise if cipher suites for TLS 1.3 ever +// become configurable in BoringSSL, Envoy, or other implementation +type TLSCipherSuite uint16 + +const ( + // Envoy cipher suites also used by Consul agent + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 TLSCipherSuite = 0xc02b + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xcca9 // Not used by Consul agent yet + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xc02f + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xcca8 // Not used by Consul agent yet + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xc009 + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xc013 + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xc02c + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xc030 + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xc00a + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xc014 + + // Older cipher suites not supported for Consul agent TLS, will eventually be removed from Envoy defaults + TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009c + TLS_RSA_WITH_AES_128_CBC_SHA = 0x002f + TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009d + TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 + + // Additional cipher suites used by Consul agent but not Envoy + // TODO: these are both explicitly listed as insecure and disabled in the Go source, should they be removed? + // https://cs.opensource.google/go/go/+/refs/tags/go1.17.3:src/crypto/tls/cipher_suites.go;l=329-330 + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0x0023 + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xc027 +) + +var ( + TLSCipherSuites = map[string]TLSCipherSuite{ + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + + "TLS_RSA_WITH_AES_128_GCM_SHA256": TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_128_CBC_SHA": TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_GCM_SHA384": TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_RSA_WITH_AES_256_CBC_SHA": TLS_RSA_WITH_AES_256_CBC_SHA, + + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + } + HumanTLSCipherSuiteStrings = func() map[TLSCipherSuite]string { + inverted := make(map[TLSCipherSuite]string, len(TLSCipherSuites)) + for k, v := range TLSCipherSuites { + inverted[v] = k + } + return inverted + }() + EnvoyTLSCipherSuiteStrings = map[TLSCipherSuite]string{ + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "ECDHE-ECDSA-AES128-GCM-SHA256", + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: "ECDHE-ECDSA-CHACHA20-POLY1305", + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "ECDHE-RSA-AES128-GCM-SHA256", + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: "ECDHE-RSA-CHACHA20-POLY1305", + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "ECDHE-ECDSA-AES128-SHA", + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "ECDHE-RSA-AES128-SHA", + TLS_RSA_WITH_AES_128_GCM_SHA256: "AES128-GCM-SHA256", + TLS_RSA_WITH_AES_128_CBC_SHA: "AES128-SHA", + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "ECDHE-ECDSA-AES256-GCM-SHA384", + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "ECDHE-RSA-AES256-GCM-SHA384", + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "ECDHE-ECDSA-AES256-SHA", + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "ECDHE-RSA-AES256-SHA", + TLS_RSA_WITH_AES_256_GCM_SHA384: "AES256-GCM-SHA384", + TLS_RSA_WITH_AES_256_CBC_SHA: "AES256-SHA", + } +) diff --git a/types/tls_test.go b/types/tls_test.go new file mode 100644 index 000000000..0cf94e42f --- /dev/null +++ b/types/tls_test.go @@ -0,0 +1,49 @@ +package types + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTLSVersion_PartialEq(t *testing.T) { + require.Greater(t, TLSv1_3, TLSv1_2) + require.Greater(t, TLSv1_2, TLSv1_1) + require.Greater(t, TLSv1_1, TLSv1_0) + + require.Less(t, TLSv1_2, TLSv1_3) + require.Less(t, TLSv1_1, TLSv1_2) + require.Less(t, TLSv1_0, TLSv1_1) +} + +func TestTLSVersion_Invalid(t *testing.T) { + var zeroValue TLSVersion + require.NotEqual(t, TLSVersionInvalid, zeroValue) + require.NotEqual(t, TLSVersionInvalid, TLSVersionUnspecified) + require.NotEqual(t, TLSVersionInvalid, TLSVersionAuto) +} + +func TestTLSVersion_Zero(t *testing.T) { + var zeroValue TLSVersion + require.Equal(t, TLSVersionUnspecified, zeroValue) + require.NotEqual(t, TLSVersionUnspecified, TLSVersionInvalid) + require.NotEqual(t, TLSVersionUnspecified, TLSVersionAuto) +} + +func TestTLSVersion_ToJSON(t *testing.T) { + var tlsVersion TLSVersion + err := tlsVersion.UnmarshalJSON([]byte(`"foo"`)) + require.Error(t, err) + require.Equal(t, tlsVersion, TLSVersionInvalid) + + for str, version := range TLSVersions { + versionJSON, err := json.Marshal(version) + require.NoError(t, err) + require.Equal(t, versionJSON, []byte(`"`+str+`"`)) + + err = tlsVersion.UnmarshalJSON([]byte(`"` + str + `"`)) + require.NoError(t, err) + require.Equal(t, tlsVersion, version) + } +} diff --git a/ui/package.json b/ui/package.json index 06b57506c..542087e1d 100644 --- a/ui/package.json +++ b/ui/package.json @@ -11,7 +11,7 @@ "scripts": { "doc:toc": "doctoc README.md", "compliance": "npm-run-all compliance:*", - "compliance:licenses": "license-checker --summary --onlyAllow 'Python-2.0;Apache*;Apache License, Version 2.0;Apache-2.0;Apache 2.0;Artistic-2.0;BSD;BSD-3-Clause;CC-BY-3.0;CC-BY-4.0;CC0-1.0;ISC;MIT;MPL-2.0;Public Domain;Unicode-TOU;Unlicense;WTFPL' --excludePackages 'consul-ui@2.2.0;consul-acls@0.1.0;consul-partitions@0.1.0'" + "compliance:licenses": "license-checker --summary --onlyAllow 'Python-2.0;Apache*;Apache License, Version 2.0;Apache-2.0;Apache 2.0;Artistic-2.0;BSD;BSD-3-Clause;CC-BY-3.0;CC-BY-4.0;CC0-1.0;ISC;MIT;MPL-2.0;Public Domain;Unicode-TOU;Unlicense;WTFPL' --excludePackages 'consul-ui@2.2.0;consul-acls@0.1.0;consul-partitions@0.1.0;consul-nspaces@0.1.0'" }, "devDependencies": { diff --git a/ui/packages/consul-nspaces/app/components/consul/nspace/form/README.mdx b/ui/packages/consul-nspaces/app/components/consul/nspace/form/README.mdx new file mode 100644 index 000000000..d9e2eb2d2 --- /dev/null +++ b/ui/packages/consul-nspaces/app/components/consul/nspace/form/README.mdx @@ -0,0 +1,24 @@ +# Consul::Nspace::Form + +```hbs preview-template + + + + + +``` diff --git a/ui/packages/consul-nspaces/app/components/consul/nspace/form/index.hbs b/ui/packages/consul-nspaces/app/components/consul/nspace/form/index.hbs new file mode 100644 index 000000000..ec2a3d6b4 --- /dev/null +++ b/ui/packages/consul-nspaces/app/components/consul/nspace/form/index.hbs @@ -0,0 +1,169 @@ +
+ + + + + + + +{{#let + + (not (can "write nspaces")) + + @item + + (hash + help='Must be a valid DNS hostname. Must contain 1-64 characters (numbers, letters, and hyphens), and must begin with a letter. Once created, this cannot be changed.' + Name=(array + (hash + test='^[a-zA-Z0-9]([a-zA-Z0-9-]{0,62}[a-zA-Z0-9])?$' + error='Name must be a valid DNS hostname.' + ) + ) + ) + + (hash + Description=(array) + ) + +as |readOnly item Name Description|}} +
+ + + +
+{{#if (is "new nspace" item=item)}} + +{{/if}} + +
+{{#if (can 'use acls')}} +
+

Roles

+

+{{#if (can "write nspace" item=item)}} + By adding roles to this namespaces, you will apply them to all tokens created within this namespace. +{{else}} + The following roles are applied to all tokens created within this namespace. +{{/if}} +

+ +
+
+

Policies

+

+{{#if (not readOnly)}} + By adding policies to this namespace, you will apply them to all tokens created within this namespace. +{{else}} + The following policies are applied to all tokens created within this namespace. +{{/if}} +

+ +
+{{/if}} +
+{{#if (and (is "new nspace" item=item) (can "create nspaces"))}} + + Save + +{{else if (can "write nspace" item=item)}} + Save +{{/if}} + + + Cancel + + +{{#if (and (not (is "new nspace" item=item)) (can "delete nspace" item=item))}} + + + + Delete + + + + + + +{{/if}} + +
+
+
+{{/let}} +
+
+
+ diff --git a/ui/packages/consul-ui/app/components/consul/nspace/list/README.mdx b/ui/packages/consul-nspaces/app/components/consul/nspace/list/README.mdx similarity index 60% rename from ui/packages/consul-ui/app/components/consul/nspace/list/README.mdx rename to ui/packages/consul-nspaces/app/components/consul/nspace/list/README.mdx index 7be2debf0..7e5ee50cd 100644 --- a/ui/packages/consul-ui/app/components/consul/nspace/list/README.mdx +++ b/ui/packages/consul-nspaces/app/components/consul/nspace/list/README.mdx @@ -1,29 +1,30 @@ ---- -class: ember ---- -## Consul::Nspace::List +# Consul::Nspace::List -```hbs - +A presentational component for rendering Consul Namespaces + +Please note: + +- For the moment, make sure you have enabled nspaces using developer debug + cookies. + +```hbs preview-template + ``` -A presentational component for rendering Consul Namespaces - -### Arguments +## Arguments | Argument/Attribute | Type | Default | Description | | --- | --- | --- | --- | | `items` | `array` | | An array of Namespaces | | `ondelete` | `function` | | An action to execute when the `Delete` action is clicked | -### See +## See -- [Component Source Code](./index.js) - [Template Source Code](./index.hbs) --- diff --git a/ui/packages/consul-ui/app/components/consul/nspace/list/index.hbs b/ui/packages/consul-nspaces/app/components/consul/nspace/list/index.hbs similarity index 100% rename from ui/packages/consul-ui/app/components/consul/nspace/list/index.hbs rename to ui/packages/consul-nspaces/app/components/consul/nspace/list/index.hbs diff --git a/ui/packages/consul-ui/app/components/consul/nspace/list/pageobject.js b/ui/packages/consul-nspaces/app/components/consul/nspace/list/pageobject.js similarity index 100% rename from ui/packages/consul-ui/app/components/consul/nspace/list/pageobject.js rename to ui/packages/consul-nspaces/app/components/consul/nspace/list/pageobject.js diff --git a/ui/packages/consul-nspaces/app/components/consul/nspace/notifications/README.mdx b/ui/packages/consul-nspaces/app/components/consul/nspace/notifications/README.mdx new file mode 100644 index 000000000..fb2f834dd --- /dev/null +++ b/ui/packages/consul-nspaces/app/components/consul/nspace/notifications/README.mdx @@ -0,0 +1,19 @@ +# Consul::Nspace::Notifications + +A Notification component specifically for namespaces. This is only a component as we currently use this in two places and if we need to add more types we can do so in one place. + +We currently only have one 'remove' type due to the fact that namespaces can't use the default 'delete' notification as they get 'marked for deletion' instead. + +```hbs preview-template + +``` + + + +## See + +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-nspaces/app/components/consul/nspace/notifications/index.hbs b/ui/packages/consul-nspaces/app/components/consul/nspace/notifications/index.hbs new file mode 100644 index 000000000..c373a535b --- /dev/null +++ b/ui/packages/consul-nspaces/app/components/consul/nspace/notifications/index.hbs @@ -0,0 +1,16 @@ +{{#if (eq @type 'remove')}} + + + Success! + + +

+ Your Namespace has been marked for deletion. +

+
+
+{{/if}} diff --git a/ui/packages/consul-nspaces/app/components/consul/nspace/search-bar/README.mdx b/ui/packages/consul-nspaces/app/components/consul/nspace/search-bar/README.mdx new file mode 100644 index 000000000..0bcbd29fd --- /dev/null +++ b/ui/packages/consul-nspaces/app/components/consul/nspace/search-bar/README.mdx @@ -0,0 +1,30 @@ +# Consul::Nspace::SearchBar + +Searchbar tailored for searching namespaces. Follows our more generic +'*::SearchBar' component interface. + +```hbs preview-template + +``` + +## See + +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-ui/app/components/consul/nspace/search-bar/index.hbs b/ui/packages/consul-nspaces/app/components/consul/nspace/search-bar/index.hbs similarity index 100% rename from ui/packages/consul-ui/app/components/consul/nspace/search-bar/index.hbs rename to ui/packages/consul-nspaces/app/components/consul/nspace/search-bar/index.hbs diff --git a/ui/packages/consul-nspaces/app/components/consul/nspace/selector/README.mdx b/ui/packages/consul-nspaces/app/components/consul/nspace/selector/README.mdx new file mode 100644 index 000000000..afd5d8fb6 --- /dev/null +++ b/ui/packages/consul-nspaces/app/components/consul/nspace/selector/README.mdx @@ -0,0 +1,40 @@ +# Consul::Nspace::Selector + +A conditional, autoloading, menu component specifically for making it easy to select namespaces. + +Please note: + +- Currently at least, you must add this inside of a `
    ` element, as the `
  • ` elements output by this component are intended to be mixed with other sibling `
  • `s from other components or template code. +- For the moment, make sure you have enabled nspaces using developer debug + cookies. + +```hbs preview-template +
      + +
    +``` + + +## Arguments + +| Argument/Attribute | Type | Default | Description | +| --- | --- | --- | --- | +| `dc` | `object` | | The current datacenter | +| `nspace` | `string` | | The name of the current namespace | +| `partition` | `string` | | The name of the current partition | +| `nspaces` | `array` | | A list of nspace models/objects to use for the selector | +| `onchange` | `function` | | An event handler, for when nspaces are loaded. You probably want to update `@nspaces` using this. | + +## See + +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-nspaces/app/components/consul/nspace/selector/index.hbs b/ui/packages/consul-nspaces/app/components/consul/nspace/selector/index.hbs new file mode 100644 index 000000000..c3296dff2 --- /dev/null +++ b/ui/packages/consul-nspaces/app/components/consul/nspace/selector/index.hbs @@ -0,0 +1,74 @@ +{{#if (can "use nspaces")}} + {{#if (can "choose nspaces")}} +{{#let + (or @nspace 'default') +as |nspace|}} +
  • + + + {{nspace}} + + + {{#let components.MenuItem components.MenuSeparator as |MenuItem MenuSeparator|}} + {{#if (gt @nspaces.length 0)}} + + {{else}} + + {{/if}} + {{#each (reject-by 'DeletedAt' @nspaces) as |item|}} + + + {{item.Name}} + + + {{/each}} + {{#if (can 'manage nspaces')}} + + + + Manage Namespaces + + + {{/if}} + {{/let}} + + +
  • +{{/let}} + {{/if}} + {{/if}} + diff --git a/ui/packages/consul-ui/app/templates/dc/nspaces/edit.hbs b/ui/packages/consul-nspaces/app/templates/dc/nspaces/edit.hbs similarity index 83% rename from ui/packages/consul-ui/app/templates/dc/nspaces/edit.hbs rename to ui/packages/consul-nspaces/app/templates/dc/nspaces/edit.hbs index d21c293f6..3fef753e8 100644 --- a/ui/packages/consul-ui/app/templates/dc/nspaces/edit.hbs +++ b/ui/packages/consul-nspaces/app/templates/dc/nspaces/edit.hbs @@ -30,13 +30,6 @@ as |route|> loader.data.isNew as |dc partition nspace item create|}} - - -
    1. All Namespaces
    2. @@ -50,7 +43,13 @@ as |dc partition nspace item create|}} - {{ partial 'dc/nspaces/form'}} + {{/let}} diff --git a/ui/packages/consul-ui/app/templates/dc/nspaces/index.hbs b/ui/packages/consul-nspaces/app/templates/dc/nspaces/index.hbs similarity index 82% rename from ui/packages/consul-ui/app/templates/dc/nspaces/index.hbs rename to ui/packages/consul-nspaces/app/templates/dc/nspaces/index.hbs index e5721fbb5..ec008d528 100644 --- a/ui/packages/consul-ui/app/templates/dc/nspaces/index.hbs +++ b/ui/packages/consul-nspaces/app/templates/dc/nspaces/index.hbs @@ -42,13 +42,6 @@ as |route|> as |sort filters items|}} - - -

      @@ -72,6 +65,27 @@ as |route|> {{/if}} + + + + + @@ -118,9 +132,11 @@ as |route|> + + {{/let}} - \ No newline at end of file + diff --git a/ui/packages/consul-nspaces/package.json b/ui/packages/consul-nspaces/package.json new file mode 100644 index 000000000..96ff1ed9b --- /dev/null +++ b/ui/packages/consul-nspaces/package.json @@ -0,0 +1,5 @@ +{ + "name": "consul-nspaces", + "version": "0.1.0", + "private": true +} diff --git a/ui/packages/consul-nspaces/vendor/consul-nspaces/routes.js b/ui/packages/consul-nspaces/vendor/consul-nspaces/routes.js new file mode 100644 index 000000000..3f8a0f904 --- /dev/null +++ b/ui/packages/consul-nspaces/vendor/consul-nspaces/routes.js @@ -0,0 +1,38 @@ +(routes => routes({ + dc: { + nspaces: { + _options: { + path: '/namespaces', + queryParams: { + sortBy: 'sort', + searchproperty: { + as: 'searchproperty', + empty: [['Name', 'Description', 'Role', 'Policy']], + }, + search: { + as: 'filter', + replace: true, + }, + }, + abilities: ['read nspaces'], + }, + edit: { + _options: { path: '/:name' }, + }, + create: { + _options: { + template: 'dc/nspaces/edit', + path: '/create', + abilities: ['create nspaces'], + }, + }, + }, + }, +}))( + (json, data = document.currentScript.dataset) => { + const appNameJS = data.appName.split('-') + .map((item, i) => i ? `${item.substr(0, 1).toUpperCase()}${item.substr(1)}` : item) + .join(''); + data[`${appNameJS}Routes`] = JSON.stringify(json); + } +); diff --git a/ui/packages/consul-partitions/app/components/consul/partition/form/index.hbs b/ui/packages/consul-partitions/app/components/consul/partition/form/index.hbs index 7795f03c1..07bdf9281 100644 --- a/ui/packages/consul-partitions/app/components/consul/partition/form/index.hbs +++ b/ui/packages/consul-partitions/app/components/consul/partition/form/index.hbs @@ -12,14 +12,24 @@ ) }} @type={{'partition'}} - @label={{label}} + @label={{'Admin Partition'}} @ondelete={{fn (if @ondelete @ondelete @onsubmit) @item}} @onchange={{fn (optional @onsubmit) @item}} as |writer|> + + + {{#let + + (not (can "write partition")) @item @@ -37,19 +47,18 @@ Description=(array) ) -as |item Name Description|}} - +as |readOnly item Name Description|}}
      +as |State Guard ChartAction dispatch state|>
      - {{#if (is "new partition" item=item)}} +{{#if (is "new partition" item=item)}}
      {{#if (and (is "new partition" item=item) (can "create partitions")) }} - -{{else if (can "write partition" item=item)}} - + +{{else if (not readOnly)}} + Save {{/if}} - + {{#if (and (not (is "new partition" item=item)) (can "delete partition" item=item))}} - + -
      Provide a widget to change the @type
      - - - -
      -
      Provide a widget to change the @status
      - - -
      -
      -
      Show the notification text
      -

      - -

      -
      - + ``` diff --git a/ui/packages/consul-partitions/app/components/consul/partition/notifications/index.hbs b/ui/packages/consul-partitions/app/components/consul/partition/notifications/index.hbs index 14eb0c41a..46e0f483b 100644 --- a/ui/packages/consul-partitions/app/components/consul/partition/notifications/index.hbs +++ b/ui/packages/consul-partitions/app/components/consul/partition/notifications/index.hbs @@ -1,24 +1,16 @@ -{{#if (eq @type 'create')}} - {{#if (eq @status 'success') }} - Your partition has been added. - {{else}} - There was an error adding your partition. - {{/if}} -{{else if (eq @type 'update') }} - {{#if (eq @status 'success') }} - Your partition has been saved. - {{else}} - There was an error saving your partition. - {{/if}} -{{ else if (eq @type 'delete')}} - {{#if (eq @status 'success') }} - Your partition has been marked for deletion. - {{else}} - There was an error deleting your partition. - {{/if}} +{{#if (eq @type 'remove')}} + + + Success! + + +

      + Your Admin Partition has been marked for deletion. +

      +
      +
      {{/if}} -{{#let @error.errors.firstObject as |error|}} - {{#if error.detail }} -
      {{concat '(' (if error.status (concat error.status ': ')) error.detail ')'}} - {{/if}} -{{/let}} diff --git a/ui/packages/consul-partitions/app/components/consul/partition/selector/index.hbs b/ui/packages/consul-partitions/app/components/consul/partition/selector/index.hbs index ec2449e73..f67ed1cac 100644 --- a/ui/packages/consul-partitions/app/components/consul/partition/selector/index.hbs +++ b/ui/packages/consul-partitions/app/components/consul/partition/selector/index.hbs @@ -1,5 +1,8 @@ {{#if (can "use partitions")}} {{#if (can "choose partitions" dc=@dc)}} +{{#let + (or @partition 'default') +as |partition|}}
    3. - {{@partition}} + {{partition}} {{#let components.MenuItem components.MenuSeparator as |MenuItem MenuSeparator|}} @@ -24,10 +27,10 @@ /> {{#each (reject-by 'DeletedAt' @partitions) as |item|}} @@ -55,8 +58,9 @@ class="partition" aria-label="Admin Partition" > - {{@partition}} + {{partition}}
    4. +{{/let}} {{/if}} {{/if}} diff --git a/ui/packages/consul-partitions/app/templates/dc/partitions/edit.hbs b/ui/packages/consul-partitions/app/templates/dc/partitions/edit.hbs index 019ca28bf..0396562f6 100644 --- a/ui/packages/consul-partitions/app/templates/dc/partitions/edit.hbs +++ b/ui/packages/consul-partitions/app/templates/dc/partitions/edit.hbs @@ -27,28 +27,24 @@ as |route|> route.params.nspace loader.data - loader.data.isNew -as |dc partition nspace item create|}} +as |dc partition nspace item|}} - - -
        -
      1. All Partitions
      2. +
      3. All Admin Partitions

      - +

      - - -
      {{/let}} diff --git a/ui/packages/consul-partitions/app/templates/dc/partitions/index.hbs b/ui/packages/consul-partitions/app/templates/dc/partitions/index.hbs index cf905bfd5..53f96bb0d 100644 --- a/ui/packages/consul-partitions/app/templates/dc/partitions/index.hbs +++ b/ui/packages/consul-partitions/app/templates/dc/partitions/index.hbs @@ -42,13 +42,6 @@ as |route|> as |sort filters items|}} - - -

      @@ -79,8 +72,17 @@ as |route|> ) }} @type="partition" + @label="Admin Partition" @ondelete={{refresh-route}} as |writer|> + + + - diff --git a/ui/packages/consul-ui/.docfy-config.js b/ui/packages/consul-ui/.docfy-config.js index a3970301c..5a8995323 100644 --- a/ui/packages/consul-ui/.docfy-config.js +++ b/ui/packages/consul-ui/.docfy-config.js @@ -50,6 +50,12 @@ module.exports = { urlSchema: 'auto', urlPrefix: 'docs', }, + { + root: path.resolve(__dirname, 'app/styles'), + pattern: '**/*.mdx', + urlSchema: 'auto', + urlPrefix: 'docs/styles', + }, { root: path.resolve(__dirname, 'app/modifiers'), pattern: '**/*.mdx', @@ -85,6 +91,12 @@ module.exports = { pattern: '**/README.mdx', urlSchema: 'auto', urlPrefix: 'docs/consul-partitions', + }, + { + root: `${path.dirname(require.resolve('consul-nspaces/package.json'))}/app/components`, + pattern: '**/README.mdx', + urlSchema: 'auto', + urlPrefix: 'docs/consul-nspaces', } ].concat(user.sources), labels: { diff --git a/ui/packages/consul-ui/app/abilities/auth-method.js b/ui/packages/consul-ui/app/abilities/auth-method.js index b0af41980..e8b36afc5 100644 --- a/ui/packages/consul-ui/app/abilities/auth-method.js +++ b/ui/packages/consul-ui/app/abilities/auth-method.js @@ -18,4 +18,8 @@ export default class AuthMethodAbility extends BaseAbility { get canDelete() { return this.env.var('CONSUL_ACLS_ENABLED') && super.canDelete; } + + get canUse() { + return this.env.var('CONSUL_SSO_ENABLED'); + } } diff --git a/ui/packages/consul-ui/app/adapters/intention.js b/ui/packages/consul-ui/app/adapters/intention.js index 88285eb25..f81b3645b 100644 --- a/ui/packages/consul-ui/app/adapters/intention.js +++ b/ui/packages/consul-ui/app/adapters/intention.js @@ -6,9 +6,9 @@ import { get } from '@ember/object'; // Listing of intentions still requires the `ns` query string parameter which // will give us all the intentions that have the `ns` as either the SourceNS or // the DestinationNS. -// We currently list intentions by the * wildcard namespace for back compat reasons -// TODO: Change the above so that we only list intentions with -// Source/Destination in the currently selected nspace + +// TODO: Investigate the above to see if we should only list intentions with +// Source/Destination in the currently selected nspace or leave as is. export default class IntentionAdapter extends Adapter { requestForQuery(request, { dc, ns, partition, filter, index, uri }) { return request` @@ -21,7 +21,7 @@ export default class IntentionAdapter extends Adapter { } ${{ - partition: '*', + partition, ns: '*', index, filter, diff --git a/ui/packages/consul-ui/app/adapters/permission.js b/ui/packages/consul-ui/app/adapters/permission.js index cb89f0773..33493b375 100644 --- a/ui/packages/consul-ui/app/adapters/permission.js +++ b/ui/packages/consul-ui/app/adapters/permission.js @@ -3,6 +3,7 @@ import { inject as service } from '@ember/service'; export default class PermissionAdapter extends Adapter { @service('env') env; + @service('settings') settings; requestForAuthorize(request, { dc, ns, partition, resources = [], index }) { // the authorize endpoint is slightly different to all others in that it @@ -29,8 +30,30 @@ export default class PermissionAdapter extends Adapter { authorize(store, type, id, snapshot) { return this.rpc( - function(adapter, request, serialized, unserialized) { - return adapter.requestForAuthorize(request, serialized, unserialized); + async (adapter, request, serialized, unserialized) => { + // the authorize endpoint does not automatically take into account the + // default namespace of the token on the backend. This means that we + // need to add the default namespace of the token on the frontend + // instead. Decided this is the best place for it as its almost hidden + // from the rest of the app so from an app eng point of view it almost + // feels like it does happen on the backend. + // Same goes ^ for partitions + const nspacesEnabled = this.env.var('CONSUL_NSPACES_ENABLED'); + const partitionsEnabled = this.env.var('CONSUL_PARTITIONS_ENABLED'); + if(nspacesEnabled || partitionsEnabled) { + const token = await this.settings.findBySlug('token'); + if(nspacesEnabled) { + if(typeof serialized.ns === 'undefined' || serialized.ns.length === 0) { + serialized.ns = token.Namespace; + } + } + if(partitionsEnabled) { + if(typeof serialized.partition === 'undefined' || serialized.partition.length === 0) { + serialized.partition = token.Partition; + } + } + } + return adapter.requestForAuthorize(request, serialized); }, function(serializer, respond, serialized, unserialized) { // Completely skip the serializer here diff --git a/ui/packages/consul-ui/app/components/app-view/README.mdx b/ui/packages/consul-ui/app/components/app-view/README.mdx index b7681597b..da5e92de4 100644 --- a/ui/packages/consul-ui/app/components/app-view/README.mdx +++ b/ui/packages/consul-ui/app/components/app-view/README.mdx @@ -8,9 +8,7 @@ state: needs-love the app chrome), every 'top level main section/template' should have one of these. -It contains legacy authorization code (that can probably be removed now), and -our flash messages (that should be moved to the `` or `` component and potentially be renamed to `Page` or `View` or similar now +This component will potentially be renamed to `Page` or `View` or similar now that we don't need two words. Other than that it provides the basic layout/slots for our main title, search @@ -86,20 +84,12 @@ breadcrumbs and back again. ``` -## Arguments - -| Argument | Type | Default | Description | -| --- | --- | --- | --- | -| `authorized` | `Boolean` | `true` | Whether the View is authorized or not | -| `enabled` | `Boolean` | `true` | Whether ACLs are enabled or not | - ## Slots | Name | Description | | --- | --- | | `header` | The main title of the page, you probably want to put a `

      ` in here | | `content` | The main content of the page, and potentially an `` somewhere | -| `notification` | Old style notifications, also see `` | | `breadcrumbs` | Any breadcrumbs, you probably want an `ol/li/a` in here | | `actions` | Any actions relevant for the entire page, probably using `` | | `nav` | Secondary navigation goes in here, also see `` | diff --git a/ui/packages/consul-ui/app/components/app-view/index.hbs b/ui/packages/consul-ui/app/components/app-view/index.hbs index c23fea539..c40e715eb 100644 --- a/ui/packages/consul-ui/app/components/app-view/index.hbs +++ b/ui/packages/consul-ui/app/components/app-view/index.hbs @@ -4,74 +4,23 @@ > {{yield}}
      - {{#each flashMessages.queue as |flash|}} - - {{#if flash.dom}} - {{{flash.dom}}} - {{else}} - {{#let (lowercase component.flashType) (lowercase flash.action) as |status type|}} - {{! flashes automatically ucfirst the type }} - - - {{/let}} - {{/if}} - - {{/each}}
      - {{#if authorized}} - {{/if}}
      {{yield}}
      - {{#if authorized}} {{yield}} - {{/if}}
      @@ -79,42 +28,12 @@
      - {{#if authorized}} {{yield}} - {{/if}}
      - {{#if (not enabled) }} - - -

      Welcome to ACLs

      -
      - -

      - ACLs are not enabled in this Consul cluster. We strongly encourage the use of ACLs in production environments for the best security practices. -

      -
      - - - - -
      - {{else if (not authorized)}} - - {{else}} - {{yield}} - {{/if}} + {{yield}}

      diff --git a/ui/packages/consul-ui/app/components/app-view/index.js b/ui/packages/consul-ui/app/components/app-view/index.js index ee99672aa..b868ab70e 100644 --- a/ui/packages/consul-ui/app/components/app-view/index.js +++ b/ui/packages/consul-ui/app/components/app-view/index.js @@ -2,6 +2,4 @@ import Component from '@ember/component'; import SlotsMixin from 'block-slots'; export default Component.extend(SlotsMixin, { tagName: '', - authorized: true, - enabled: true, }); diff --git a/ui/packages/consul-ui/app/components/app/index.hbs b/ui/packages/consul-ui/app/components/app/index.hbs index 726f03c61..68498d61f 100644 --- a/ui/packages/consul-ui/app/components/app/index.hbs +++ b/ui/packages/consul-ui/app/components/app/index.hbs @@ -1,5 +1,6 @@ {{#let (hash main=(concat guid '-main') + Notification=(component 'app/notification') ) as |exported|}}
      {{t 'components.app.skip_to_content'}} {{!-- @@ -25,7 +26,7 @@ --}}
      @@ -78,6 +79,13 @@
      +
      + {{yield exported to="notifications"}} + +
      {{yield exported to="main"}}
      * { + min-width: 400px; +} +%app-notifications .app-notification { + @extend %with-transition-500; + transition-property: opacity; + width: fit-content; + max-width: 80%; + pointer-events: auto; +} + [role='banner'] { @extend %main-header-horizontal; } @@ -32,6 +59,7 @@ @extend %main-nav-horizontal-action-active; } %main-nav-sidebar, +%main-notifications, main { @extend %transition-pushover; } @@ -39,34 +67,50 @@ main { transition-property: left; z-index: 10; } +%app-notifications, main { margin-top: var(--chrome-height, 64px); transition-property: margin-left; } +%app-notifications { + transition-property: margin-left, width; +} @media #{$--sidebar-open} { + %main-nav-horizontal-toggle ~ main .notifications { + width: calc(100% - var(--chrome-width)); + } + %main-nav-horizontal-toggle:checked ~ main .notifications { + width: 100%; + } %main-nav-horizontal-toggle + header > div > nav:first-of-type { left: 0; } %main-nav-horizontal-toggle:checked + header > div > nav:first-of-type { left: calc(var(--chrome-width, 300px) * -1); } + %main-nav-horizontal-toggle ~ main .notifications, %main-nav-horizontal-toggle ~ main, %main-nav-horizontal-toggle ~ footer { margin-left: var(--chrome-width, 300px); } + %main-nav-horizontal-toggle:checked ~ main .notifications, %main-nav-horizontal-toggle:checked ~ main, %main-nav-horizontal-toggle:checked ~ footer { margin-left: 0; } } @media #{$--lt-sidebar-open} { + %main-nav-horizontal-toggle ~ main .notifications { + width: 100%; + } %main-nav-horizontal-toggle:checked + header > div > nav:first-of-type { left: 0; } %main-nav-horizontal-toggle + header > div > nav:first-of-type { left: calc(var(--chrome-width, 300px) * -1); } + %main-nav-horizontal-toggle ~ main .notifications, %main-nav-horizontal-toggle ~ main, %main-nav-horizontal-toggle ~ footer { margin-left: 0; diff --git a/ui/packages/consul-ui/app/components/app/notification/index.hbs b/ui/packages/consul-ui/app/components/app/notification/index.hbs new file mode 100644 index 000000000..e6653d302 --- /dev/null +++ b/ui/packages/consul-ui/app/components/app/notification/index.hbs @@ -0,0 +1,19 @@ +
      + {{yield}} +
      + diff --git a/ui/packages/consul-ui/app/components/auth-dialog/README.mdx b/ui/packages/consul-ui/app/components/auth-dialog/README.mdx index 32d0dbff1..d09f7fbfb 100644 --- a/ui/packages/consul-ui/app/components/auth-dialog/README.mdx +++ b/ui/packages/consul-ui/app/components/auth-dialog/README.mdx @@ -4,7 +4,12 @@ class: ember # AuthDialog ```hbs preview-template - + {{#let components.AuthForm components.AuthProfile as |AuthForm AuthProfile|}} Here's the login form: @@ -27,6 +32,7 @@ A component to help orchestrate a login/logout flow. | --- | --- | --- | --- | | `dc` | `String` | | The name of the current datacenter | | `nspace` | `String` | | The name of the current namespace | +| `partition` | `String` | | The name of the current partition | | `onchange` | `Function` | | An action to fire when the users token has changed (logged in/logged out/token changed) | ### Methods/Actions/api diff --git a/ui/packages/consul-ui/app/components/auth-dialog/index.hbs b/ui/packages/consul-ui/app/components/auth-dialog/index.hbs index afeeba0fd..2e8470208 100644 --- a/ui/packages/consul-ui/app/components/auth-dialog/index.hbs +++ b/ui/packages/consul-ui/app/components/auth-dialog/index.hbs @@ -1,8 +1,20 @@ - + + + + + - - - {{! This DataSource just permanently listens to any changes to the users }} {{! token, whether thats a new token, a changed token or a deleted token }} - {{#yield-slot name="unauthorized"}} + {{#yield-slot name="unauthorized"}} {{yield api components}} {{/yield-slot}} diff --git a/ui/packages/consul-ui/app/components/auth-form/README.mdx b/ui/packages/consul-ui/app/components/auth-form/README.mdx index 37fb97ce8..b8364b360 100644 --- a/ui/packages/consul-ui/app/components/auth-form/README.mdx +++ b/ui/packages/consul-ui/app/components/auth-form/README.mdx @@ -1,19 +1,47 @@ ---- -class: ember ---- # AuthForm +AuthForm is a mostly template only form component specifically for user +authentication for the UI. The component uses `TokenSource` which performs the +necessary functionality all composed together using a StateChart to +orchestrate the flow. + +Errors are contained/rendered within the form itself. + ```hbs preview-template - + ``` -### Methods/Actions/api +## Arguments -| Method/Action | Description | +| Argument | Type | Default | Description | +| --- | --- | --- | --- | +| `dc` | `String` | | The name of the current datacenter | +| `nspace` | `String` | | The name of the current namespace | +| `partition` | `String` | | The name of the current partition | +| `onsubmit` | `Function` | | The action to fire when the form is submitted | + +## Exported API + +| Name | Type | Description | +| --- | --- | --- | +| `submit` | `Function` | Submit the form with a `{Name: 'oidc', Value: 'provider-name'}` which will eventually be passed as the `value` to the TokenSource | +| `focus` | `Function` | Focus the input field | +| `error` | `Function` | Fire an error to be displayed in the form | +| `reset` | `Function` | Reset the form back to its original empty/non-error state | +| `disabled` | `Boolean` | Whether the form is currently disabled or not | + +## Slots + +| Name | Description | | --- | --- | -| `reset` | Reset the form back to its original empty/non-error state | +| `content` | Provides a configurable slot underneath the form for addition of other login widgets, in our case SSO | -### See +## See - [Component Source Code](./index.js) - [Template Source Code](./index.hbs) diff --git a/ui/packages/consul-ui/app/components/auth-form/chart.xstate.js b/ui/packages/consul-ui/app/components/auth-form/chart.xstate.js index dc4b28105..dfbb1fd42 100644 --- a/ui/packages/consul-ui/app/components/auth-form/chart.xstate.js +++ b/ui/packages/consul-ui/app/components/auth-form/chart.xstate.js @@ -7,6 +7,11 @@ export default { target: 'idle', }, ], + ERROR: [ + { + target: 'error', + }, + ], }, states: { idle: { @@ -23,15 +28,7 @@ export default { ], }, }, - loading: { - on: { - ERROR: [ - { - target: 'error', - }, - ], - }, - }, + loading: {}, error: { exit: ['clearError'], on: { diff --git a/ui/packages/consul-ui/app/components/auth-form/index.hbs b/ui/packages/consul-ui/app/components/auth-form/index.hbs index 45f50569f..4eab2a8b2 100644 --- a/ui/packages/consul-ui/app/components/auth-form/index.hbs +++ b/ui/packages/consul-ui/app/components/auth-form/index.hbs @@ -1,41 +1,93 @@ - - {{yield (hash + +{{#let + (hash + State=State + Guard=Guard + Action=ChartAction + dispatch=dispatch + state=state + ) +as |chart|}} +{{#let + (hash reset=(action dispatch "RESET") - focus=(action 'focus') - )}} - + focus=this.focus + disabled=(state-matches state "loading") + error=(queue + (action dispatch "ERROR") + (action (mut this.error) value="error.errors.firstObject") + ) + submit=(queue + (action (mut this.value)) + (action dispatch "SUBMIT") + ) + ) +as |exported|}} + {{!TODO: Call this reset or similar }} - -
      + +
      + +{{#if (can 'use SSO')}} + +{{/if}} - {{#if error.status}} + {{#if this.error.status}}

      - {{#if value.Name}} - {{#if (eq error.status '403')}} + {{#if this.value.Name}} + {{#if (eq this.error.status '403')}} Consul login failed
      We received a token from your OIDC provider but could not log in to Consul with it. - {{else if (eq error.status '401')}} + {{else if (eq this.error.status '401')}} Could not log in to provider
      The OIDC provider has rejected this access token. Please have an administrator check your auth method configuration. - {{else if (eq error.status '499')}} + {{else if (eq this.error.status '499')}} SSO log in window closed
      The OIDC provider window was closed. Please try again. {{else}} Error
      - {{error.detail}} + {{this.error.detail}} {{/if}} {{else}} - {{#if (eq error.status '403')}} + {{#if (eq this.error.status '403')}} Invalid token
      The token entered does not exist. Please enter a valid token to log in. + {{else if (eq this.error.status '404')}} + No providers
      + No SSO providers are configured for that Admin Partition. {{else}} Error
      - {{error.detail}} + {{this.error.detail}} {{/if}} {{/if}}

      @@ -43,25 +95,34 @@
      {{/if}}
      - + +
      -
      - - Contact your administrator for login credentials. + -{{#if (env 'CONSUL_SSO_ENABLED')}} - {{!-- This `or` can be completely removed post 1.10 as 1.10 has optional params with default values --}} - - {{#if (gt providers.length 0)}} -

      - or -

      - {{/if}} - -{{/if}} +
      + +{{yield (assign exported (hash Method=TabState))}} + + + Contact your administrator for login credentials. + +
      + +
      +{{/let}} +{{/let}} \ No newline at end of file diff --git a/ui/packages/consul-ui/app/components/auth-form/index.js b/ui/packages/consul-ui/app/components/auth-form/index.js index 7dda1779d..23c52e3aa 100644 --- a/ui/packages/consul-ui/app/components/auth-form/index.js +++ b/ui/packages/consul-ui/app/components/auth-form/index.js @@ -1,29 +1,23 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; -import Ember from 'ember'; +import Component from '@glimmer/component'; +import { action } from '@ember/object'; import chart from './chart.xstate'; +import tabs from './tabs.xstate'; -export default Component.extend({ - tagName: '', - onsubmit: function(e) {}, - onchange: function(e) {}, - // Blink/Webkit based seem to leak password inputs - // this will only occur during acceptance testing so - // turn them into text inputs during acceptance testing - inputType: computed(function() { - return Ember.testing ? 'text' : 'password'; - }), - init: function() { - this._super(...arguments); +export default class AuthForm extends Component { + constructor() { + super(...arguments); this.chart = chart; - }, - actions: { - hasValue: function(context, event, meta) { - return this.value !== '' && typeof this.value !== 'undefined'; - }, - focus: function() { - this.input.focus(); - }, - }, -}); + this.tabsChart = tabs; + } + + @action + hasValue(context, event, meta) { + return this.value !== '' && typeof this.value !== 'undefined'; + } + + @action + focus() { + this.input.focus(); + } +} diff --git a/ui/packages/consul-ui/app/components/auth-form/layout.scss b/ui/packages/consul-ui/app/components/auth-form/layout.scss index 32b057dca..3d9f4a735 100644 --- a/ui/packages/consul-ui/app/components/auth-form/layout.scss +++ b/ui/packages/consul-ui/app/components/auth-form/layout.scss @@ -1,34 +1,23 @@ %auth-form { width: 320px; - margin: 10px 25px; + margin: 0 25px; + margin-top: -20px; +} +%auth-form em { + display: inline-block; + margin-top: 1em; +} +%auth-form form, +%auth-form .oidc-select { + padding-top: 1em; } %auth-form form { - margin-bottom: 0.5em !important; + margin-bottom: 0 !important; } %auth-form .ember-basic-dropdown-trigger, -%auth-form button { +%auth-form button:not(.reset) { width: 100%; } %auth-form .progress { margin: 0 auto; } -%auth-form > p:not(.error) { - @extend %auth-form-hr; -} -%auth-form-hr { - text-align: center; - position: relative; -} -%auth-form-hr span { - display: inline-block; - padding: 5px; -} - -%auth-form-hr::before { - @extend %as-pseudo; - width: 100%; - position: absolute; - left: 0; - top: 50%; - z-index: -1; -} diff --git a/ui/packages/consul-ui/app/components/auth-form/skin.scss b/ui/packages/consul-ui/app/components/auth-form/skin.scss index 58053db1b..42d6af7f9 100644 --- a/ui/packages/consul-ui/app/components/auth-form/skin.scss +++ b/ui/packages/consul-ui/app/components/auth-form/skin.scss @@ -1,12 +1,5 @@ -%auth-form-hr { - text-transform: uppercase; -} -%auth-form-hr::before { - border-top: 1px solid rgb(var(--tone-gray-200)); -} -/* This is to mask off the hr so it has a space */ -/* in the center so if the background color of what the */ -/* line is on is different, then this should be different */ -%auth-form-hr span { - background-color: rgb(var(--white)); -} +%auth-form em { + @extend %p3; + color: rgb(var(--tone-gray-500)); + font-style: normal; +} \ No newline at end of file diff --git a/ui/packages/consul-ui/app/components/auth-form/tabs.xstate.js b/ui/packages/consul-ui/app/components/auth-form/tabs.xstate.js new file mode 100644 index 000000000..99428485d --- /dev/null +++ b/ui/packages/consul-ui/app/components/auth-form/tabs.xstate.js @@ -0,0 +1,20 @@ +export default { + id: 'auth-form-tabs', + initial: 'token', + on: { + TOKEN: [ + { + target: 'token', + }, + ], + SSO: [ + { + target: 'sso', + }, + ], + }, + states: { + token: {}, + sso: {}, + }, +}; diff --git a/ui/packages/consul-ui/app/components/card/skin.scss b/ui/packages/consul-ui/app/components/card/skin.scss index da07fa3f1..de730cf5b 100644 --- a/ui/packages/consul-ui/app/components/card/skin.scss +++ b/ui/packages/consul-ui/app/components/card/skin.scss @@ -5,7 +5,7 @@ %card { border: var(--decor-border-100); border-radius: var(--decor-radius-100); - background-color: rgb(var(--white) / 90%); + background-color: rgb(var(--tone-gray-000) / 90%); } %card > section, %card > ul > li { diff --git a/ui/packages/consul-ui/app/components/certificate/index.scss b/ui/packages/consul-ui/app/components/certificate/index.scss index ff343a3a5..f4a7ab23a 100644 --- a/ui/packages/consul-ui/app/components/certificate/index.scss +++ b/ui/packages/consul-ui/app/components/certificate/index.scss @@ -20,7 +20,7 @@ } hr { border: 3px dashed rgb(var(--tone-gray-300)); - background-color: rgb(var(--white)); + background-color: rgb(var(--tone-gray-000)); width: 150px; margin: auto; margin-top: 9px; diff --git a/ui/packages/consul-ui/app/components/code-editor/README.mdx b/ui/packages/consul-ui/app/components/code-editor/README.mdx index 665b77bf7..6e38d2d45 100644 --- a/ui/packages/consul-ui/app/components/code-editor/README.mdx +++ b/ui/packages/consul-ui/app/components/code-editor/README.mdx @@ -6,6 +6,45 @@ state: needs-love # CodeEditor ```hbs preview-template - - + + <:label> + Rules (HCL Format) + + <:content> + {"content": "Initial Content"} + + ``` + +A code-editor with syntax highlighting supporting multiple languages (JSON, HCL, YAML), validation and simple tools such as "Copy to clipboard" + + +### Arguments + +| Argument | Type | Default | Description | +| --- | --- | --- | --- | +| `readonly` | `Boolean` | false | If true, the content (value) of the CodeEditor cannot be changed by the user | +| `name` | `String` | | The name attribute of the form element | +| `syntax` | `String` | | Identifies the language used to validate/syntax highlight the code (possible values: hcl, json, yaml) | +| `oninput` | `Action` | noop | Action/callback that is triggered when the user inputs data | + +### Named Blocks + +| Name | Description | Behaviour if empty | +| --- | --- | --- | +| `:label` | Used to define the title that's displayed on the left inside the toolbar above the CodeEditor | Nothing is displayed | +| `:tools` | Used to define tools, buttons, widgets that will be displayed on the right inside the toolbar above the CodeEditor | By default it renders a `language selector` dropdown (if `readonly`== false and `syntax` is falsy) and a `CopyButton` +| `:content` | Used to display specific content such as code templates inside the CodeEditor | if the block is defined, @value will be displayed instead | + + +### See + +- [Component Source Code](./index.js) +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-ui/app/components/code-editor/index.hbs b/ui/packages/consul-ui/app/components/code-editor/index.hbs index a3d12d9e3..f5594c3d4 100644 --- a/ui/packages/consul-ui/app/components/code-editor/index.hbs +++ b/ui/packages/consul-ui/app/components/code-editor/index.hbs @@ -1,11 +1,37 @@ - -
      {{yield}}
      -{{#if (and (not readonly) (not syntax))}} - - {{mode.name}} - -{{/if}} +
      +
      + +
      + {{#if (has-block "tools")}} + {{yield to="tools"}} + {{else}} + {{#if (and (not readonly) (not syntax))}} + + {{mode.name}} + +
      + + {{/if}} + {{/if}} +
      +
      +
      + +
      {{#if (has-block "content")}}{{yield to="content"}}{{else}}{{value}}{{/if}}
      diff --git a/ui/packages/consul-ui/app/components/code-editor/layout.scss b/ui/packages/consul-ui/app/components/code-editor/layout.scss index 8375d23a5..220004232 100644 --- a/ui/packages/consul-ui/app/components/code-editor/layout.scss +++ b/ui/packages/consul-ui/app/components/code-editor/layout.scss @@ -3,32 +3,66 @@ border: 10px; overflow: hidden; position: relative; + clear: both; } -%code-editor .ember-power-select-trigger { - @extend %code-editor-syntax-select; -} -%code-editor-syntax-select { - width: 200px; - float: right; - z-index: 1; -} -%code-editor-syntax-select { - margin-top: 1px; - border: 0; - background-color: rgb(var(--black)); - color: rgb(var(--white)); - border-left: 1px solid; - border-radius: 0; -} + %code-editor::after { position: absolute; bottom: 0px; width: 100%; height: 25px; - background-color: var(--black); + background-color: rgb(var(--tone-gray-999)); content: ''; display: block; } %code-editor > pre { display: none; } + +%code-editor { + .toolbar-container, + .toolbar-container .toolbar { + align-items: center; + justify-content: space-between; + display: flex; + } + + .toolbar-container { + position: relative; + margin-top: 4px; + height: 44px; + + .toolbar { + flex: 1; + white-space: nowrap; + + .title { + padding: 0 8px; + } + + .toolbar-separator { + height: 32px; + margin: 0 4px; + width: 0; + } + + .tools { + display: flex; + flex-direction: row; + margin: 0 10px; + align-items: center; + .copy-button { + margin-left: 10px; + } + } + } + .ember-basic-dropdown-trigger { + margin: 0 8px; + width: 120px; + height: 32px; + display: flex; + align-items: center; + flex-direction: row; + } + } +} diff --git a/ui/packages/consul-ui/app/components/code-editor/skin.scss b/ui/packages/consul-ui/app/components/code-editor/skin.scss index e52b2bbaf..f4b35e6bd 100644 --- a/ui/packages/consul-ui/app/components/code-editor/skin.scss +++ b/ui/packages/consul-ui/app/components/code-editor/skin.scss @@ -24,7 +24,7 @@ $syntax-dark-gray: #535f73; --syntax-yellow: rgb(var(--tone-yellow-500)); } .CodeMirror { - max-width: 1150px; + max-width: 1260px; min-height: 300px; height: auto; /* adds some space at the bottom of the editor for when a horizonal-scroll has appeared */ @@ -46,7 +46,7 @@ $syntax-dark-gray: #535f73; .cm-s-hashi { &.CodeMirror { width: 100%; - background-color: rgb(var(--black)) !important; + background-color: rgb(var(--tone-gray-999)) !important; color: #cfd2d1 !important; border: none; font-family: var(--typo-family-mono); @@ -81,7 +81,7 @@ $syntax-dark-gray: #535f73; .CodeMirror-line::-moz-selection, .CodeMirror-line > span::-moz-selection, .CodeMirror-line > span > span::-moz-selection { - background: rgb(var(--white) / 10%); + background: rgb(var(--tone-gray-000) / 10%); } span.cm-comment { @@ -154,7 +154,7 @@ $syntax-dark-gray: #535f73; .CodeMirror-matchingbracket { text-decoration: underline; - color: rgb(var(--white)) !important; + color: rgb(var(--tone-gray-000)) !important; } } @@ -178,7 +178,7 @@ $syntax-dark-gray: #535f73; } span.cm-property { - color: rgb(var(--white)); + color: rgb(var(--tone-gray-000)); } span.cm-variable-2 { @@ -186,3 +186,35 @@ $syntax-dark-gray: #535f73; } } } + +%code-editor { + .toolbar-container { + background: rgb(var(--tone-gray-050)); + background: linear-gradient( + 180deg, + rgb(var(--tone-gray-050)) 50%, + rgb(var(--tone-gray-150)) 100% + ); + border: 1px solid rgb(var(--tone-gray-150)); + border-bottom-color: rgb(var(--tone-gray-600)); + border-top-color: rgb(var(--tone-gray-400)); + + .toolbar { + .title { + color: rgb(var(--tone-gray-900)); + font-size: 14px; + font-weight: 700; + } + .toolbar-separator { + border-right: 1px solid rgb(var(--tone-gray-300)); + } + } + .ember-power-select-trigger { + background-color: rgb(var(--tone-gray-000)); + color: rgb(var(--tone-gray-999)); + border-radius: var(--decor-radius-100); + border: var(--decor-border-100); + border-color: rgb(var(--tone-gray-700)); + } + } +} diff --git a/ui/packages/consul-ui/app/components/composite-row/index.scss b/ui/packages/consul-ui/app/components/composite-row/index.scss index 7b42462c6..2612d0ca1 100644 --- a/ui/packages/consul-ui/app/components/composite-row/index.scss +++ b/ui/packages/consul-ui/app/components/composite-row/index.scss @@ -1,5 +1,4 @@ @import './layout'; -@import './skin'; %composite-row { @extend %list-row; } @@ -33,12 +32,8 @@ .consul-auth-method-list > ul > li:not(:first-child) { @extend %with-composite-row-intent; } -.consul-lock-session-list ul > li:not(:first-child) { - @extend %with-one-action-row; -} // TODO: This hides the iconless dt's in the below lists as they don't have // tooltips the todo would be to wrap these texts in spans -.consul-lock-session-list ul > li:not(:first-child) dl:not([class]) dt, .consul-nspace-list > ul > li:not(:first-child) dt, .consul-token-list > ul > li:not(:first-child) dt, .consul-policy-list > ul li:not(:first-child) dl:not(.datacenter) dt, @@ -113,5 +108,5 @@ %composite-row-header .policy-management dd::before, %composite-row-detail .policy-management::before { @extend %with-star-fill-mask, %as-pseudo; - background-color: rgb(var(--brand-600)); + background-color: rgb(var(--tone-brand-600)); } diff --git a/ui/packages/consul-ui/app/components/composite-row/layout.scss b/ui/packages/consul-ui/app/components/composite-row/layout.scss index fd3b465a5..b855ef108 100644 --- a/ui/packages/consul-ui/app/components/composite-row/layout.scss +++ b/ui/packages/consul-ui/app/components/composite-row/layout.scss @@ -7,11 +7,6 @@ 'header actions' 'detail actions'; } -%with-one-action-row { - @extend %composite-row; - grid-template-columns: 1fr auto; - padding-right: 12px; -} %composite-row-header { grid-area: header; align-self: start; diff --git a/ui/packages/consul-ui/app/components/composite-row/skin.scss b/ui/packages/consul-ui/app/components/composite-row/skin.scss deleted file mode 100644 index e69de29bb..000000000 diff --git a/ui/packages/consul-ui/app/components/confirmation-dialog/skin.scss b/ui/packages/consul-ui/app/components/confirmation-dialog/skin.scss index 3c4047582..ded503dcf 100644 --- a/ui/packages/consul-ui/app/components/confirmation-dialog/skin.scss +++ b/ui/packages/consul-ui/app/components/confirmation-dialog/skin.scss @@ -1,5 +1,5 @@ table div.with-confirmation.confirming { - background-color: rgb(var(--white)); + background-color: rgb(var(--tone-gray-000)); } %confirmation-dialog-inline p { color: rgb(var(--tone-gray-400)); diff --git a/ui/packages/consul-ui/app/components/consul/auth-method/index.scss b/ui/packages/consul-ui/app/components/consul/auth-method/index.scss index 1b171b1ca..c1b862381 100644 --- a/ui/packages/consul-ui/app/components/consul/auth-method/index.scss +++ b/ui/packages/consul-ui/app/components/consul/auth-method/index.scss @@ -27,7 +27,7 @@ tbody { td { font-size: var(--typo-size-600); - color: var(--black); + color: rgb(var(--tone-gray-999)); } tr { cursor: default; @@ -74,7 +74,7 @@ tbody { td { font-size: var(--typo-size-600); - color: var(--black); + color: rgb(var(--tone-gray-999)); } tr { cursor: default; diff --git a/ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx b/ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx new file mode 100644 index 000000000..939b220e8 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx @@ -0,0 +1,44 @@ +# Consul::Bucket::List + +A presentational component for rendering a list of Consul 'buckets' +(a single partition and/or a single namepace). + +Please note this is not your usual "scrollable list component" more a list of +'buckets' that make up a partition / namespace combination. + +If only a the namespace is different to the currently selected namespace, then +the namespace will be displayed, whereas if the partition is different it will +show both the partition and namespace (as a namespace called 'team-1' in +`partition-1` is different to a namespace called 'team-1' in `partition-2`) + +If you don't need the nspace only support for the view you are building then +omit the `@nspace` argument. + +At the time of writing, this is not currently used across the entire UI +(specifically in intentions and maybe other areas) but eventually should be. + + +```hbs preview-template + + + +``` + +## Arguments + +| Argument/Attribute | Type | Default | Description | +| --- | --- | --- | --- | +| `item` | `array` | | A Consul object that could have both a `Partition` and a `Namespace` property | +| `nspace` | `string` | | The name of the current namespace | +| `partition` | `string` | | The name of the current partition | + +## See + +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-ui/app/components/consul/bucket/list/index.hbs b/ui/packages/consul-ui/app/components/consul/bucket/list/index.hbs new file mode 100644 index 000000000..d9520749b --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/bucket/list/index.hbs @@ -0,0 +1,39 @@ +{{#if (and @partition (can 'use partitions'))}} + {{#if (not-eq @item.Partition @partition)}} +
      +
      + Admin Partition +
      +
      + {{@item.Partition}} +
      +
      + Namespace +
      +
      + {{@item.Namespace}} +
      +
      + {{/if}} +{{else if (and @nspace (can 'use nspace'))}} + {{#if (not-eq @item.Namespace @nspace)}} +
      +
      + Namespace +
      +
      + {{@item.Namespace}} +
      +
      + {{/if}} +{{/if}} + diff --git a/ui/packages/consul-ui/app/components/consul/bucket/list/index.scss b/ui/packages/consul-ui/app/components/consul/bucket/list/index.scss new file mode 100644 index 000000000..826ef4841 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/bucket/list/index.scss @@ -0,0 +1,26 @@ +%consul-bucket-list { + & { + @extend %horizontal-kv-list; + } + .partition::before { + @extend %with-user-team-mask, %as-pseudo; + } + .nspace::before { + @extend %with-folder-outline-mask, %as-pseudo; + } + /* potential for some sort of %composite-kv thing here */ + .partition + dd::after { + display: inline-block; + content: '/'; + margin: 0 3px; + /*TODO: In isolation this is not needed */ + margin-right: 6px; + } + .partition + dd + .nspace { + margin-left: 0 !important; + } + /**/ +} +.consul-bucket-list { + @extend %consul-bucket-list; +} diff --git a/ui/packages/consul-ui/app/components/consul/discovery-chain/README.mdx b/ui/packages/consul-ui/app/components/consul/discovery-chain/README.mdx new file mode 100644 index 000000000..0a25754cb --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/discovery-chain/README.mdx @@ -0,0 +1,18 @@ +--- +type: ember +state: needs-love +--- +# Consul::DiscoveryChain + +Mainly presentational component to visualize a discovery-chain. + +```hbs preview-template + +{{#if source.data}} + +{{/if}} + +``` + diff --git a/ui/packages/consul-ui/app/components/consul/discovery-chain/index.hbs b/ui/packages/consul-ui/app/components/consul/discovery-chain/index.hbs index 52e3ab1a0..fca6c4a23 100644 --- a/ui/packages/consul-ui/app/components/consul/discovery-chain/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/discovery-chain/index.hbs @@ -3,7 +3,7 @@ {{selected.nodes}} { opacity: 1 !important; - background-color: var(--white); + background-color: var(--tone-gray-000); border: var(--decor-border-100); border-radius: var(--decor-radius-200); border-color: rgb(var(--tone-gray-500)); diff --git a/ui/packages/consul-ui/app/components/consul/discovery-chain/index.js b/ui/packages/consul-ui/app/components/consul/discovery-chain/index.js index 31842c7de..8b16e5b5c 100644 --- a/ui/packages/consul-ui/app/components/consul/discovery-chain/index.js +++ b/ui/packages/consul-ui/app/components/consul/discovery-chain/index.js @@ -44,8 +44,8 @@ export default Component.extend({ !routes.find(item => typeof item.Definition === 'undefined') ) { let nextNode; - const resolverID = `resolver:${this.chain.ServiceName}.${this.chain.Namespace}.${this.chain.Datacenter}`; - const splitterID = `splitter:${this.chain.ServiceName}.${this.chain.Namespace}`; + const resolverID = `resolver:${this.chain.ServiceName}.${this.chain.Namespace}.${this.chain.Partition}.${this.chain.Datacenter}`; + const splitterID = `splitter:${this.chain.ServiceName}.${this.chain.Namespace}.${this.chain.Partition}`; // The default router should look for a splitter first, // if there isn't one try the default resolver if (typeof this.chain.Nodes[splitterID] !== 'undefined') { @@ -106,6 +106,7 @@ export default Component.extend({ resolvers: computed('chain.{Nodes,Targets}', function() { return getResolvers( this.chain.Datacenter, + this.chain.Partition, this.chain.Namespace, get(this, 'chain.Targets'), get(this, 'chain.Nodes') diff --git a/ui/packages/consul-ui/app/components/consul/discovery-chain/skin.scss b/ui/packages/consul-ui/app/components/consul/discovery-chain/skin.scss index 6c34f57b9..30055d3aa 100644 --- a/ui/packages/consul-ui/app/components/consul/discovery-chain/skin.scss +++ b/ui/packages/consul-ui/app/components/consul/discovery-chain/skin.scss @@ -63,7 +63,7 @@ } %chain-node-active { opacity: 1; - background-color: rgb(var(--white)); + background-color: rgb(var(--tone-gray-000)); border-color: rgb(var(--tone-gray-500)); } /* TODO: More text truncation, centralize */ @@ -99,7 +99,7 @@ /**/ %with-chain-outlet::before { @extend %as-pseudo; - background-color: rgb(var(--white)); + background-color: rgb(var(--tone-gray-000)); border-radius: var(--decor-radius-full); border: 2px solid rgb(var(--tone-gray-400)); @@ -107,5 +107,5 @@ %discovery-chain circle { stroke-width: 2; stroke: rgb(var(--tone-gray-400)); - fill: rgb(var(--white)); + fill: rgb(var(--tone-gray-000)); } diff --git a/ui/packages/consul-ui/app/components/consul/discovery-chain/utils.js b/ui/packages/consul-ui/app/components/consul/discovery-chain/utils.js index 4dd42d1ff..e7698caa9 100644 --- a/ui/packages/consul-ui/app/components/consul/discovery-chain/utils.js +++ b/ui/packages/consul-ui/app/components/consul/discovery-chain/utils.js @@ -1,10 +1,10 @@ const getNodesByType = function(nodes = {}, type) { return Object.values(nodes).filter(item => item.Type === type); }; -const findResolver = function(resolvers, service, nspace = 'default', dc) { +const findResolver = function(resolvers, service, nspace = 'default', partition = 'default', dc) { if (typeof resolvers[service] === 'undefined') { resolvers[service] = { - ID: `${service}.${nspace}.${dc}`, + ID: `${service}.${nspace}.${partition}.${dc}`, Name: service, Children: [], }; @@ -19,7 +19,7 @@ export const getAlternateServices = function(targets, a) { // we might have more data from the endpoint so we don't have to guess // right now the backend also doesn't support dots in service names const [aRev, bRev] = [a, b].map(item => item.split('.').reverse()); - const types = ['Datacenter', 'Namespace', 'Service', 'Subset']; + const types = ['Datacenter', 'Partition', 'Namespace', 'Service', 'Subset']; return bRev.find(function(item, i) { const res = item !== aRev[i]; if (res) { @@ -61,7 +61,13 @@ export const getRoutes = function(nodes, uid) { ); }, []); }; -export const getResolvers = function(dc, nspace = 'default', targets = {}, nodes = {}) { +export const getResolvers = function( + dc, + partition = 'default', + nspace = 'default', + targets = {}, + nodes = {} +) { const resolvers = {}; // make all our resolver nodes Object.values(nodes) @@ -70,7 +76,7 @@ export const getResolvers = function(dc, nspace = 'default', targets = {}, nodes const parts = item.Name.split('.'); let subset; // this will leave behind the service.name.nspace.dc even if the service name contains a dot - if (parts.length > 3) { + if (parts.length > 4) { subset = parts.shift(); } parts.reverse(); @@ -79,10 +85,12 @@ export const getResolvers = function(dc, nspace = 'default', targets = {}, nodes parts.shift(); // const nodeNspace = parts.shift(); + // const nodePartition = + parts.shift(); // if it does contain a dot put it back to the correct order parts.reverse(); const service = parts.join('.'); - const resolver = findResolver(resolvers, service, nspace, dc); + const resolver = findResolver(resolvers, service, nspace, partition, dc); let failovers; if (typeof item.Resolver.Failover !== 'undefined') { // figure out what type of failover this is @@ -108,12 +116,12 @@ export const getResolvers = function(dc, nspace = 'default', targets = {}, nodes // Failovers don't have a specific node if (typeof nodes[`resolver:${target.ID}`] !== 'undefined') { // We use this to figure out whether this target is a redirect target - const alternate = getAlternateServices([target.ID], `service.${nspace}.${dc}`); + const alternate = getAlternateServices([target.ID], `service.${nspace}.${partition}.${dc}`); // as Failovers don't make it here, we know anything that has alternateServices // must be a redirect if (alternate.Type !== 'Service') { // find the already created resolver - const resolver = findResolver(resolvers, target.Service, nspace, dc); + const resolver = findResolver(resolvers, target.Service, nspace, partition, dc); // and add the redirect as a child, redirects are always children const child = { Redirect: true, diff --git a/ui/packages/consul-ui/app/components/consul/external-source/index.hbs b/ui/packages/consul-ui/app/components/consul/external-source/index.hbs index 38f446386..a2ee22a00 100644 --- a/ui/packages/consul-ui/app/components/consul/external-source/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/external-source/index.hbs @@ -1,21 +1,46 @@ {{#if @item}} {{#let (service/external-source @item) as |externalSource|}} - {{#if externalSource}} - - {{#if @label}} - {{@label}} - {{else}} - {{#if (eq externalSource 'aws')}} - Registered via {{uppercase externalSource}} - {{else}} - Registered via {{capitalize externalSource}} - {{/if}} - {{/if}} - - {{/if}} + {{#if (and @withInfo (eq externalSource 'consul-api-gateway'))}} +
      +
      + + Registered via {{t (concat "common.brand." externalSource)}} + +
      +
      + + + API Gateways manage north-south traffic from external services to services in the Datacenter. For more information, read our documentation. + + +
    5. + About {{t (concat "common.brand." externalSource)}} +
    6. + +
      +
      +
      +
      + {{else if externalSource}} + + {{#if @label}} + {{@label}} + {{else}} + Registered via {{t (concat "common.brand." externalSource)}} + {{/if}} + + {{/if}} {{/let}} {{/if}} diff --git a/ui/packages/consul-ui/app/components/consul/intention/form/index.hbs b/ui/packages/consul-ui/app/components/consul/intention/form/index.hbs index 98dc57c23..76281fd9c 100644 --- a/ui/packages/consul-ui/app/components/consul/intention/form/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/intention/form/index.hbs @@ -14,21 +14,45 @@ @onsubmit={{action this.onsubmit}} as |api|> - - -

      + An intention already exists for this Source-Destination pair. Please enter a different combination of Services, or search the intentions to edit an existing intention. +

      + + +{{else}} + + Error! - There was an error saving your intention. - {{#if (and api.error.status api.error.detail)}} -
      {{api.error.status}}: {{api.error.detail}} - {{/if}} +
      + +

      + There was an error saving your intention. + {{#if (and api.error.status api.error.detail)}} +
      {{api.error.status}}: {{api.error.detail}} + {{/if}} +

      +
      +
      {{/if}} -

      -
      diff --git a/ui/packages/consul-ui/app/components/consul/intention/list/README.mdx b/ui/packages/consul-ui/app/components/consul/intention/list/README.mdx new file mode 100644 index 000000000..67f5c0695 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/intention/list/README.mdx @@ -0,0 +1,36 @@ +# Consul::Intention::List + +A component for rendering Intentions. + + +There are some extra conextual components to use here due to how we detect +intention CRDs and make that easy to work with/add necessary notices. The +notice will only show if applicable, but the contextual component is used to +define where that is when it does display. + +```hbs preview-template + + + + + + + + +``` + +## Arguments + +| Argument/Attribute | Type | Default | Description | +| --- | --- | --- | --- | +| `items` | `array` | | An array of Intentions | +| `ondelete` | `function` | | An action to execute when the `Delete` action is clicked | + +## See + +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs b/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs index 25044226e..beeb91cd6 100644 --- a/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/intention/list/table/index.hbs @@ -24,6 +24,7 @@ as |item index|> {{else}} {{item.SourceName}} {{/if}} + {{#if (or (can 'use nspaces') (can 'use partitions'))}} {{! TODO: slugify }} class={{concat 'nspace-' (or item.SourceNS 'default')}} >{{or item.SourceNS 'default'}} + {{/if}} @@ -44,6 +46,7 @@ as |item index|> {{else}} {{item.DestinationName}} {{/if}} + {{#if (or (can 'use nspaces') (can 'use partitions'))}} {{! TODO: slugify }} class={{concat 'nspace-' (or item.DestinationNS 'default')}} >{{or item.DestinationNS 'default'}} + {{/if}} @@ -65,7 +69,7 @@ as |item index|> {{/if}} -{{#if (or (can "write intention" item=item) (can "view CRD intention" item=item))}} +{{#if (and (or (can "write intention" item=item) (can "view CRD intention" item=item)) (not-eq item.Meta.external-source 'consul-api-gateway'))}} Code
      + -
      -{{#if (env 'CONSUL_ACLS_ENABLED')}} -
      -

      Roles

      -

      - {{#if (can "write nspace" item=item)}} - By adding roles to this namespaces, you will apply them to all tokens created within this namespace. - {{else}} - The following roles are applied to all tokens created within this namespace. - {{/if}} -

      - -
      -
      -

      Policies

      -

      - {{#if (can "write nspace" item=item)}} - By adding policies to this namespaces, you will apply them to all tokens created within this namespace. - {{else}} - The following policies are applied to all tokens created within this namespace. - {{/if}} -

      - -
      -{{/if}} -
      -{{#if (and create (can "create nspaces")) }} - -{{else}} - {{#if (can "write nspace" item=item)}} - - {{/if}} -{{/if}} - -{{# if (and (not create) (can "delete nspace" item=item) ) }} - - - - - - - - -{{/if}} -
      - - diff --git a/ui/packages/consul-ui/app/templates/dc/services/index.hbs b/ui/packages/consul-ui/app/templates/dc/services/index.hbs index b35b76fe0..27342bf3c 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/index.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/index.hbs @@ -53,7 +53,10 @@ as |route|> (reject-by 'Kind' 'connect-proxy' api.data) -as |sort filters items|}} + (or route.params.partition route.model.user.token.Partition 'default') + (or route.params.nspace route.model.user.token.Namespace 'default') + +as |sort filters items partition nspace|}} @@ -63,9 +66,12 @@ as |sort filters items|}} - {{#if (gt items.length 0) }} +{{#if (gt items.length 0) }} + {{#let (collection items) as |items|}} - {{/if}} + {{/let}} +{{/if}} @@ -115,10 +123,20 @@ as |sort filters items|}} diff --git a/ui/packages/consul-ui/app/templates/dc/services/instance.hbs b/ui/packages/consul-ui/app/templates/dc/services/instance.hbs index bfa302abc..cab131278 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/instance.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/instance.hbs @@ -21,28 +21,58 @@ as |route|> />
      - + {{#if (eq loader.error.status "404")}} - - - + + +

      + This service has been deregistered and no longer exists in the catalog. +

      +
      + {{else if (eq loader.error.status "403")}} - - - + + +

      + You no longer have access to this service +

      +
      + {{else}} - - - + + +

      + An error was returned whilst loading this data, refresh to try again. +

      +
      + {{/if}}
      @@ -97,7 +127,7 @@ as |item|}}

      - + {{#if (eq proxy.ServiceProxy.Mode 'transparent')}} diff --git a/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs b/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs index d67a0cd25..526073d51 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs @@ -20,12 +20,14 @@ as |route|> ) ) + (or route.params.partition route.model.user.token.Partition 'default') + (or route.params.nspace route.model.user.token.Namespace 'default') route.params.dc - route.params.nspace + route.model.proxy route.model.proxy.Service.Proxy.Upstreams - as |sort filters dc nspace proxy items|}} + as |sort filters partition nspace dc proxy items|}} {{#if (gt items.length 0)}} @items={{collection.items}} @dc={{dc}} @nspace={{nspace}} + @partition={{partition}} /> diff --git a/ui/packages/consul-ui/app/templates/dc/services/show.hbs b/ui/packages/consul-ui/app/templates/dc/services/show.hbs index a406a8573..4817b0984 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/show.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/show.hbs @@ -19,28 +19,58 @@ as |route|> />
      - + {{#if (eq loader.error.status "404")}} - - - + + +

      + This service has been deregistered and no longer exists in the catalog. +

      +
      + {{else if (eq loader.error.status "403")}} - - - + + +

      + You no longer have access to this service +

      +
      + {{else}} - - - + + +

      + An error was returned whilst loading this data, refresh to try again. +

      +
      + {{/if}}
      @@ -112,7 +142,7 @@ as |items item dc|}}

      - +
      @@ -156,10 +186,17 @@ as |items item dc|}} }} as |config|> {{#if config.data.dashboard_url_templates.service}} - as |collection|> diff --git a/ui/packages/consul-ui/app/templates/dc/services/show/topology.hbs b/ui/packages/consul-ui/app/templates/dc/services/show/topology.hbs index 96ddbc647..a8df87e41 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/show/topology.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/show/topology.hbs @@ -103,10 +103,16 @@ as |nspace dc items topology|}} @service={{items.firstObject}} @topology={{topology}} - @metricsHref={{render-template config.data.dashboard_url_templates.service (hash - Datacenter=dc.Name - Service=items.firstObject - )}} + @metricsHref={{render-template config.data.dashboard_url_templates.service + (hash + Datacenter=dc.Name + Service=(hash + Name=items.firstObject.Name + Namespace=(or items.firstObject.Namespace '') + Partition=(or items.firstObject.Partition '') + ) + ) + }} @isRemoteDC={{not dc.Local}} @hasMetricsProvider={{gt config.data.metrics_provider.length 0}} @oncreate={{route-action 'createIntention'}} diff --git a/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs b/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs index 116e00bb3..72d86d0f7 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs @@ -42,11 +42,12 @@ as |route|> ) ) - route.params.nspace + (or route.params.partition route.model.user.token.Partition 'default') + (or route.params.nspace route.model.user.token.Namespace 'default') route.params.dc loader.data - as |sort filters nspace dc items|}} + as |sort filters partition nspace dc items|}} {{#if (gt items.length 0)}} @items={{collection.items}} @dc={{dc}} @nspace={{nspace}} + @partition={{partition}} > diff --git a/ui/packages/consul-ui/app/templates/debug.hbs b/ui/packages/consul-ui/app/templates/debug.hbs index 89bb4d3ee..b767b5197 100644 --- a/ui/packages/consul-ui/app/templates/debug.hbs +++ b/ui/packages/consul-ui/app/templates/debug.hbs @@ -1,11 +1,37 @@ {{page-title 'Engineering Docs - Consul' separator=' - '}} {{document-attrs class="is-debug"}} +{{! Tell CSS what we have enabled }} +{{#if (can "use acls")}} + {{document-attrs class="has-acls"}} +{{/if}} +{{#if (can "use nspaces")}} + {{document-attrs class="has-nspaces"}} +{{/if}} +{{#if (can "use partitions")}} + {{document-attrs class="has-partitions"}} +{{/if}} + + <:notifications as |app|> + {{#each flashMessages.queue as |flash|}} + {{#if flash.dom}} + + {{{flash.dom}}} + + {{/if}} + {{/each}} + + <:main-nav>
        + +{{!hardcode in docs first}}
      • Docs
      • {{#each node.children as |child|}} {{#each child.pages as |child|}} @@ -17,7 +43,27 @@ {{/each}} + +{{!hardcode in styles next}} + {{#let (find-by 'label' 'styles' child.children) as |section|}} +
      • {{section.label}}
      • +{{#each (flatten-property section 'pages') as |child|}} +
      • + + {{classify child.title}} + +
      • +{{/each}} + {{/let}} + +{{!print out the rest}} {{#each child.children as |section|}} +{{#if (not-eq section.label 'styles')}}
      • {{section.label}}
      • {{#each (flatten-property section 'pages') as |child|}}
      • {{/each}} +{{/if}} {{/each}} {{/each}} +
      diff --git a/ui/packages/consul-ui/app/templates/head.hbs b/ui/packages/consul-ui/app/templates/head.hbs deleted file mode 100644 index d2eda4f16..000000000 --- a/ui/packages/consul-ui/app/templates/head.hbs +++ /dev/null @@ -1 +0,0 @@ -{{model.title}} diff --git a/ui/packages/consul-ui/app/templates/nspace.hbs b/ui/packages/consul-ui/app/templates/nspace.hbs deleted file mode 100644 index a02351727..000000000 --- a/ui/packages/consul-ui/app/templates/nspace.hbs +++ /dev/null @@ -1,10 +0,0 @@ - - - {{outlet}} - - diff --git a/ui/packages/consul-ui/app/utils/create-fingerprinter.js b/ui/packages/consul-ui/app/utils/create-fingerprinter.js index 263b349f2..d0f103dcf 100644 --- a/ui/packages/consul-ui/app/utils/create-fingerprinter.js +++ b/ui/packages/consul-ui/app/utils/create-fingerprinter.js @@ -5,14 +5,19 @@ export default function(foreignKey, nspaceKey, partitionKey, hash = JSON.stringi return function(item) { foreignKeyValue = foreignKeyValue == null ? item[foreignKey] : foreignKeyValue; if (foreignKeyValue == null) { - throw new Error('Unable to create fingerprint, missing foreignKey value'); + throw new Error( + `Unable to create fingerprint, missing foreignKey value. Looking for value in \`${foreignKey}\` got \`${foreignKeyValue}\`` + ); } const slugKeys = slugKey.split(','); const slugValues = slugKeys.map(function(slugKey) { - if (get(item, slugKey) == null || get(item, slugKey).length < 1) { - throw new Error('Unable to create fingerprint, missing slug'); + const slug = get(item, slugKey); + if (slug == null || slug.length < 1) { + throw new Error( + `Unable to create fingerprint, missing slug. Looking for value in \`${slugKey}\` got \`${slug}\`` + ); } - return get(item, slugKey); + return slug; }); // This ensures that all data objects have a Namespace and a Partition // value set, even in OSS. diff --git a/ui/packages/consul-ui/app/utils/dom/event-source/callable.js b/ui/packages/consul-ui/app/utils/dom/event-source/callable.js index f8b232586..86e8af079 100644 --- a/ui/packages/consul-ui/app/utils/dom/event-source/callable.js +++ b/ui/packages/consul-ui/app/utils/dom/event-source/callable.js @@ -57,7 +57,7 @@ export default function( // close after the dispatch so we can tell if it was an error whilst closed or not // but make sure its before the promise tick this.readyState = 2; // CLOSE - this.dispatchEvent({ type: 'close' }); + this.dispatchEvent({ type: 'close', error: e }); }) .then(() => { // This only gets called when the promise chain completely finishes diff --git a/ui/packages/consul-ui/app/utils/form/builder.js b/ui/packages/consul-ui/app/utils/form/builder.js index b0091ee56..8d3276891 100644 --- a/ui/packages/consul-ui/app/utils/form/builder.js +++ b/ui/packages/consul-ui/app/utils/form/builder.js @@ -6,7 +6,7 @@ import lookupValidator from 'ember-changeset-validations'; // Keep these here for now so forms are easy to make // TODO: Probably move this to utils/form/parse-element-name import parseElementName from 'consul-ui/utils/get-form-name-property'; -const defaultChangeset = function(data, validators) { +export const defaultChangeset = function(data, validators) { return createChangeset(data, lookupValidator(validators), validators, { changeset: Changeset }); }; /** diff --git a/ui/packages/consul-ui/app/validations/nspace.js b/ui/packages/consul-ui/app/validations/nspace.js deleted file mode 100644 index 56a6ff554..000000000 --- a/ui/packages/consul-ui/app/validations/nspace.js +++ /dev/null @@ -1,4 +0,0 @@ -import { validateFormat } from 'ember-changeset-validations/validators'; -export default { - Name: validateFormat({ regex: /^[a-zA-Z0-9]([a-zA-Z0-9-]{0,62}[a-zA-Z0-9])?$/ }), -}; diff --git a/ui/packages/consul-ui/config/targets.js b/ui/packages/consul-ui/config/targets.js index cbdce5c0d..1b7774808 100644 --- a/ui/packages/consul-ui/config/targets.js +++ b/ui/packages/consul-ui/config/targets.js @@ -1,13 +1,29 @@ 'use strict'; -// async/await support came with the below specified versions for Chrome, + +// Technically this file configures babel transpilation support but we also +// use this file as a reference for our current browser support matrix and is +// therefore used by humans also. Therefore please feel free to be liberal +// with comments. + +// We are moving to a rough ~2 years back support rather than a 2 versions +// back support. This strikes a balance between folks who need to get a job +// done in the Consul UI and keeping the codebase modern and being able to use +// modern Web Platform features. This is not set in stone but please consult +// with the rest of the team before bumping forwards (or backwards) +// We pin specific versions rather than use a relative value so we can choose +// to bump and it's clear what is supported. + +/// + +// async/await support came before the below specified versions for Chrome, // Firefox and Edge. Async/await is is the newest ES6 feature we are not // transpiling. Safari's template literal support is a little problematic during // v12 in that it has a GC bug for tagged template literals. We don't currently // rely on this functionality so the bug wouldn't effect us, but in order to use // browser versions as a measure for ES6 features we need to specify Safari 13 // for native, non-transpiled template literals. In reality template literals -// came in Safari 9.1. Safari's async/await support came in Safari 10, so thats -// the earliest Safari we cover in reality here. +// came in Safari 9.1. Safari's async/await support came in Safari 10. + module.exports = { - browsers: ['Chrome 55', 'Firefox 53', 'Safari 13', 'Edge 15'], + browsers: ['Chrome 79', 'Firefox 72', 'Safari 13', 'Edge 79'], }; diff --git a/ui/packages/consul-ui/docs/index.mdx b/ui/packages/consul-ui/docs/index.mdx index 5a77a6dd5..3c1da2ea8 100644 --- a/ui/packages/consul-ui/docs/index.mdx +++ b/ui/packages/consul-ui/docs/index.mdx @@ -32,8 +32,9 @@ The above will render the same code snippet in a box above the snippet. The location and name of markdown files within the project differs slightly depending on what you need to add documentation for: - **docs**: `docs/filename.mdx` -- **components**: `components/your-component-name/README.mdx` -- **helpers**: `helpers/your-helper-name.mdx` -- **modifiers**: `modifiers/your-modifier-name.mdx` -- **services**: `services/your-service-name.mdx` (eventually these will partly use jsdoc code style generation) +- **styles**: `app/styles/*/*/README.mdx` (currently mostly in `app/styles/base`) +- **components**: `app/components/your-component-name/README.mdx` +- **helpers**: `app/helpers/your-helper-name.mdx` +- **modifiers**: `app/modifiers/your-modifier-name.mdx` +- **services**: `app/services/your-service-name.mdx` (eventually these will partly use jsdoc code style generation) diff --git a/ui/packages/consul-ui/docs/testing.mdx b/ui/packages/consul-ui/docs/testing.mdx index b13f19aff..1bc473442 100644 --- a/ui/packages/consul-ui/docs/testing.mdx +++ b/ui/packages/consul-ui/docs/testing.mdx @@ -24,6 +24,20 @@ filter field in QUnit's HTML test runner to run a subset of tests (see below). If you have started the tests with a filter, you don't have to quit everything and start again in order to filter by something different. +## Browser support + +Please check our current browser support when adding features or fixing bugs. + +At the time of writing we use a rough ~2 years back support matrix for our +browser support. As most of our targetted browsers are 'evergreen' and +auto-update this feels like it strikes a balance between the folks that are +likely to be using the Consul UI and the desire to maintain a modern +codebase/feature set. This is a balance that should always be revisited and is +not set in stone. i.e. if you need to use something check-in with the team +first, it might be the time to bump again. + +Please see [../config/targets.js](../config/targets.js) for our current support. + ### Running a single or a range of tests Please use the same `make test-oss-view` command to get the tests up and diff --git a/ui/packages/consul-ui/ember-cli-build.js b/ui/packages/consul-ui/ember-cli-build.js index 4e694d346..4bff448c2 100644 --- a/ui/packages/consul-ui/ember-cli-build.js +++ b/ui/packages/consul-ui/ember-cli-build.js @@ -28,7 +28,8 @@ module.exports = function(defaults, $ = process.env) { const apps = [ 'consul-acls', - 'consul-partitions' + 'consul-partitions', + 'consul-nspaces' ].map(item => { return { name: item, @@ -59,6 +60,8 @@ module.exports = function(defaults, $ = process.env) { excludeFiles = excludeFiles.concat([ 'instance-initializers/debug.js', 'routing/**/*-debug.js', + 'helpers/**/*-debug.js', + 'modifiers/**/*-debug.js', 'services/**/*-debug.js', 'templates/debug.hbs', 'components/debug/**/*.*' diff --git a/ui/packages/consul-ui/lib/startup/index.js b/ui/packages/consul-ui/lib/startup/index.js index 41dcd8990..5a2f6b534 100644 --- a/ui/packages/consul-ui/lib/startup/index.js +++ b/ui/packages/consul-ui/lib/startup/index.js @@ -54,6 +54,11 @@ module.exports = { environment: config.environment, rootURL: config.environment === 'production' ? '{{.ContentPath}}' : config.rootURL, config: config, + env: function(key) { + if (process.env[key]) { + return process.env[key]; + } + }, }; switch (type) { case 'head': diff --git a/ui/packages/consul-ui/lib/startup/templates/body.html.js b/ui/packages/consul-ui/lib/startup/templates/body.html.js index f5d164c50..cb9016821 100644 --- a/ui/packages/consul-ui/lib/startup/templates/body.html.js +++ b/ui/packages/consul-ui/lib/startup/templates/body.html.js @@ -15,7 +15,7 @@ const hbs = (path, attrs = {}) => const BrandLoader = attrs => hbs('brand-loader/index.hbs', attrs); const Enterprise = attrs => hbs('brand-loader/enterprise.hbs', attrs); -module.exports = ({ appName, environment, rootURL, config }) => ` +module.exports = ({ appName, environment, rootURL, config, env }) => `