updated CTS compatibility page to account for vault and all support product versions

This commit is contained in:
Jake Herschman 2022-01-21 16:36:08 -08:00
commit 00223a5462
684 changed files with 22392 additions and 10642 deletions

3
.changelog/10894.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
api: Improve error message if service or health check not found by stating that the entity must be referred to by ID, not name
```

3
.changelog/11335.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:enhancement
api: URL-encode/decode resource names for v1/agent endpoints in API
```

3
.changelog/11576.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
ingress: allow setting TLS min version and cipher suites in ingress gateway config entries
```

3
.changelog/11781.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
cli: when creating a private key, save the file with mode 0600 so that only the user has read permission.
```

3
.changelog/11820.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
http: when a user attempts to access the UI but can't because it's disabled, explain this and how to fix it
```

3
.changelog/11827.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:breaking-change
sdk: several changes to the testutil configuration structs (removed `ACLMasterToken`, renamed `Master` to `InitialManagement`, and `AgentMaster` to `AgentRecovery`)
```

3
.changelog/11895.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: update Envoy supported version of 1.20 to 1.20.1
```

4
.changelog/11903.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:bug
ui: Fixes a bug where proxy service health checks would sometimes not appear
until refresh
```

3
.changelog/11905.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Prevent disconnection notice appearing with auth change on certain pages
```

6
.changelog/11918.txt Normal file
View File

@ -0,0 +1,6 @@
```release-note:bug
config: include all config errors in the error message, previously some could be hidden.
```
```release-note:bug
snapshot: the `snapshot save` command now saves the snapshot with read permission for only the current user.
```

3
.changelog/11924.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
xds: fix a deadlock when the snapshot channel already have a snapshot to be consumed.
```

3
.changelog/11926.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
cli: Display assigned node identities in output of `consul acl token list`.
```

3
.changelog/11931.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Fixes a bug with URL decoding within KV area
```

3
.changelog/11937.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Fixes an issue saving intentions when editing per service intentions
```

3
.changelog/11940.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
Mutate `NodeService` struct properly to avoid a data race.
```

3
.changelog/11950.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
api: Return 404 when de-registering a non-existent check
```

3
.changelog/11958.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
Upgrade to raft `1.3.3` which fixes a bug where a read replica node can trigger a raft election and become a leader.
```

3
.changelog/11959.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Fixes a visual issue with some border colors
```

3
.changelog/11960.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
windows: Fixes a bug with empty log files when Consul is run as a Windows Service
```

3
.changelog/11968.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Temporarily remove KV pre-flight check for KV list permissions
```

4
.changelog/11979.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:bug
ui: Ensure partition query parameter is passed through to all OIDC related API
requests
```

3
.changelog/11985.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: Added a notice for non-primary intention creation
```

4
.changelog/12042.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:bug
memberlist: fixes a bug which prevented members from joining a cluster with
large amounts of churn [[GH-253](https://github.com/hashicorp/memberlist/issues/253)]
```

3
.changelog/12049.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: Add support for connecting to services behind a terminating gateway when using a transparent proxy.
```

3
.changelog/12057.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
serf: update serf v0.9.7, complete the leave process if broadcasting leave timeout.
```

3
.changelog/12081.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Fixed a bug with creating multiple nested KVs in one interaction
```

3
.changelog/12126.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
sdk: Add support for `Partition` and `RetryJoin` to the TestServerConfig struct.
```

3
.changelog/_1502.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
partitions: **(Enterprise only)** Do not leave a serf partition when the partition is deleted
```

View File

@ -689,11 +689,15 @@ jobs:
if ! git diff --quiet --exit-code HEAD^! ui/; then
git config --local user.email "github-team-consul-core@hashicorp.com"
git config --local user.name "hc-github-team-consul-core"
# checkout the CI branch and merge latest from main
git checkout ci/main-assetfs-build
git merge --no-edit main
short_sha=$(git rev-parse --short HEAD)
git add agent/uiserver/bindata_assetfs.go
git commit -m "auto-updated agent/uiserver/bindata_assetfs.go from commit ${short_sha}"
git push origin main
git push origin ci/main-assetfs-build
else
echo "no UI changes so no static assets to publish"
fi
@ -837,10 +841,10 @@ jobs:
environment:
ENVOY_VERSION: "1.19.1"
envoy-integration-test-1_20_0:
envoy-integration-test-1_20_1:
<<: *ENVOY_TESTS
environment:
ENVOY_VERSION: "1.20.0"
ENVOY_VERSION: "1.20.1"
# run integration tests for the connect ca providers
test-connect-ca-providers:
@ -1091,7 +1095,7 @@ workflows:
- envoy-integration-test-1_19_1:
requires:
- dev-build
- envoy-integration-test-1_20_0:
- envoy-integration-test-1_20_1:
requires:
- dev-build

View File

@ -1,7 +1,7 @@
version: 2
updates:
- package-ecosystem: gomod
open-pull-requests-limit: 5
open-pull-requests-limit: 10
directory: "/"
labels:
- "go"
@ -9,33 +9,6 @@ updates:
- "pr/no-changelog"
schedule:
interval: daily
- package-ecosystem: gomod
open-pull-requests-limit: 5
directory: "/api"
labels:
- "go"
- "dependencies"
- "pr/no-changelog"
schedule:
interval: daily
- package-ecosystem: gomod
open-pull-requests-limit: 5
directory: "/sdk"
labels:
- "go"
- "dependencies"
- "pr/no-changelog"
schedule:
interval: daily
- package-ecosystem: npm
open-pull-requests-limit: 5
directory: "/ui"
labels:
- "javascript"
- "dependencies"
- "pr/no-changelog"
schedule:
interval: daily
- package-ecosystem: npm
open-pull-requests-limit: 5
directory: "/website"

View File

@ -249,4 +249,4 @@ jobs:
arch: ${{matrix.arch}}
tags: |
docker.io/hashicorp/${{env.repo}}:${{env.version}}
ecr.public.aws/hashicorp/${{env.repo}}:${{env.version}}
public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}}

View File

@ -21,8 +21,10 @@ on:
jobs:
# checks that a 'type/docs-cherrypick' label is attached to PRs with website/ changes
website-check:
# If there's a `type/docs-cherrypick` label we ignore this check
if: "!contains(github.event.pull_request.labels.*.name, 'type/docs-cherrypick')"
# If there's already a `type/docs-cherrypick` label or an explicit `pr/no-docs` label, we ignore this check
if: >-
!contains(github.event.pull_request.labels.*.name, 'type/docs-cherrypick') ||
!contains(github.event.pull_request.labels.*.name, 'pr/no-docs')
runs-on: ubuntu-latest
steps:
@ -40,7 +42,7 @@ jobs:
# post PR comment to GitHub to check if a 'type/docs-cherrypick' label needs to be applied to the PR
echo "website-check: Did not find a 'type/docs-cherrypick' label, posting a reminder in the PR"
github_message="🤔 This PR has changes in the \`website/\` directory but does not have a \`type/docs-cherrypick\` label. If the changes are for the next version, this can be ignored. If they are updates to current docs, attach the label to auto cherrypick to the \`stable-website\` branch after merging."
curl -f -s -H "Authorization: token ${{ secrets.PR_COMMENT_TOKEN }}" \
curl -s -H "Authorization: token ${{ secrets.PR_COMMENT_TOKEN }}" \
-X POST \
-d "{ \"body\": \"${github_message}\"}" \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/issues/${{ github.event.pull_request.number }}/comments"

View File

@ -7,6 +7,7 @@ linters:
- staticcheck
- ineffassign
- unparam
- forbidigo
issues:
# Disable the default exclude list so that all excludes are explicitly
@ -57,6 +58,14 @@ issues:
linters-settings:
gofmt:
simplify: true
forbidigo:
# Forbid the following identifiers (list of regexp).
forbid:
- '\brequire\.New\b(# Use package-level functions with explicit TestingT)?'
- '\bassert\.New\b(# Use package-level functions with explicit TestingT)?'
# Exclude godoc examples from forbidigo checks.
# Default: true
exclude_godoc_examples: false
run:
timeout: 10m

View File

@ -1,3 +1,58 @@
## 1.11.2 (January 12, 2022)
FEATURES:
* ingress: allow setting TLS min version and cipher suites in ingress gateway config entries [[GH-11576](https://github.com/hashicorp/consul/issues/11576)]
IMPROVEMENTS:
* api: Return 404 when de-registering a non-existent check [[GH-11950](https://github.com/hashicorp/consul/issues/11950)]
* connect: Add support for connecting to services behind a terminating gateway when using a transparent proxy. [[GH-12049](https://github.com/hashicorp/consul/issues/12049)]
* http: when a user attempts to access the UI but can't because it's disabled, explain this and how to fix it [[GH-11820](https://github.com/hashicorp/consul/issues/11820)]
* ui: Added a notice for non-primary intention creation [[GH-11985](https://github.com/hashicorp/consul/issues/11985)]
BUG FIXES:
* Mutate `NodeService` struct properly to avoid a data race. [[GH-11940](https://github.com/hashicorp/consul/issues/11940)]
* Upgrade to raft `1.3.3` which fixes a bug where a read replica node can trigger a raft election and become a leader. [[GH-11958](https://github.com/hashicorp/consul/issues/11958)]
* cli: Display assigned node identities in output of `consul acl token list`. [[GH-11926](https://github.com/hashicorp/consul/issues/11926)]
* cli: when creating a private key, save the file with mode 0600 so that only the user has read permission. [[GH-11781](https://github.com/hashicorp/consul/issues/11781)]
* config: include all config errors in the error message, previously some could be hidden. [[GH-11918](https://github.com/hashicorp/consul/issues/11918)]
* memberlist: fixes a bug which prevented members from joining a cluster with
large amounts of churn [[GH-253](https://github.com/hashicorp/memberlist/issues/253)] [[GH-12042](https://github.com/hashicorp/consul/issues/12042)]
* snapshot: the `snapshot save` command now saves the snapshot with read permission for only the current user. [[GH-11918](https://github.com/hashicorp/consul/issues/11918)]
* ui: Differentiate between Service Meta and Node Meta when choosing search fields
in Service Instance listings [[GH-11774](https://github.com/hashicorp/consul/issues/11774)]
* ui: Ensure a login buttons appear for some error states, plus text amends [[GH-11892](https://github.com/hashicorp/consul/issues/11892)]
* ui: Ensure partition query parameter is passed through to all OIDC related API
requests [[GH-11979](https://github.com/hashicorp/consul/issues/11979)]
* ui: Fix an issue where attempting to delete a policy from the policy detail page when
attached to a token would result in the delete button disappearing and no
deletion being attempted [[GH-11868](https://github.com/hashicorp/consul/issues/11868)]
* ui: Fixes a bug where proxy service health checks would sometimes not appear
until refresh [[GH-11903](https://github.com/hashicorp/consul/issues/11903)]
* ui: Fixes a bug with URL decoding within KV area [[GH-11931](https://github.com/hashicorp/consul/issues/11931)]
* ui: Fixes a visual issue with some border colors [[GH-11959](https://github.com/hashicorp/consul/issues/11959)]
* ui: Fixes an issue saving intentions when editing per service intentions [[GH-11937](https://github.com/hashicorp/consul/issues/11937)]
* ui: Fixes an issue where once a 403 page is displayed in some circumstances its
diffcult to click back to where you where before receiving a 403 [[GH-11891](https://github.com/hashicorp/consul/issues/11891)]
* ui: Prevent disconnection notice appearing with auth change on certain pages [[GH-11905](https://github.com/hashicorp/consul/issues/11905)]
* ui: Temporarily remove KV pre-flight check for KV list permissions [[GH-11968](https://github.com/hashicorp/consul/issues/11968)]
* windows: Fixes a bug with empty log files when Consul is run as a Windows Service [[GH-11960](https://github.com/hashicorp/consul/issues/11960)]
* xds: fix a deadlock when the snapshot channel already have a snapshot to be consumed. [[GH-11924](https://github.com/hashicorp/consul/issues/11924)]
## 1.11.1 (December 15, 2021)
SECURITY:
* ci: Upgrade golang.org/x/net to address [CVE-2021-44716](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44716) [[GH-11854](https://github.com/hashicorp/consul/issues/11854)]
FEATURES:
* Admin Partitions (Consul Enterprise only) This version adds admin partitions, a new entity defining administrative and networking boundaries within a Consul deployment. For more information refer to the
[Admin Partition](https://www.consul.io/docs/enterprise/admin-partitions) documentation. [[GH-11855](https://github.com/hashicorp/consul/issues/11855)]
* networking: **(Enterprise Only)** Make `segment_limit` configurable, cap at 256.
## 1.11.0 (December 14, 2021)
BREAKING CHANGES:
@ -121,6 +176,61 @@ NOTES:
* Renamed the `agent_master` field to `agent_recovery` in the `acl-tokens.json` file in which tokens are persisted on-disk (when `acl.enable_token_persistence` is enabled) [[GH-11744](https://github.com/hashicorp/consul/issues/11744)]
## 1.10.7 (January 12, 2022)
SECURITY:
* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805.
FEATURES:
* ui: Adds visible Consul version information [[GH-11803](https://github.com/hashicorp/consul/issues/11803)]
BUG FIXES:
* Mutate `NodeService` struct properly to avoid a data race. [[GH-11940](https://github.com/hashicorp/consul/issues/11940)]
* Upgrade to raft `1.3.3` which fixes a bug where a read replica node can trigger a raft election and become a leader. [[GH-11958](https://github.com/hashicorp/consul/issues/11958)]
* ca: fixes a bug that caused non blocking leaf cert queries to return the same cached response regardless of ca rotation or leaf cert expiry [[GH-11693](https://github.com/hashicorp/consul/issues/11693)]
* ca: fixes a bug that caused the SigningKeyID to be wrong in the primary DC, when the Vault provider is used, after a CA config creates a new root. [[GH-11672](https://github.com/hashicorp/consul/issues/11672)]
* ca: fixes a bug that caused the intermediate cert used to sign leaf certs to be missing from the /connect/ca/roots API response when the Vault provider was used. [[GH-11671](https://github.com/hashicorp/consul/issues/11671)]
* cli: Display assigned node identities in output of `consul acl token list`. [[GH-11926](https://github.com/hashicorp/consul/issues/11926)]
* cli: when creating a private key, save the file with mode 0600 so that only the user has read permission. [[GH-11781](https://github.com/hashicorp/consul/issues/11781)]
* snapshot: **(Enterprise only)** fixed a bug where the snapshot agent would ignore the `license_path` setting in config files
* structs: **(Enterprise only)** Remove partition field parsing from 1.10 to prevent further 1.11 upgrade compatibility issues.
* ui: Differentiate between Service Meta and Node Meta when choosing search fields
in Service Instance listings [[GH-11774](https://github.com/hashicorp/consul/issues/11774)]
* ui: Ensure we show a readonly designed page for readonly intentions [[GH-11767](https://github.com/hashicorp/consul/issues/11767)]
* ui: Fix an issue where attempting to delete a policy from the policy detail page when
attached to a token would result in the delete button disappearing and no
deletion being attempted [[GH-11868](https://github.com/hashicorp/consul/issues/11868)]
* ui: Fix visual issue with slight table header overflow [[GH-11670](https://github.com/hashicorp/consul/issues/11670)]
* ui: Fixes an issue where once a 403 page is displayed in some circumstances its
diffcult to click back to where you where before receiving a 403 [[GH-11891](https://github.com/hashicorp/consul/issues/11891)]
* ui: Fixes an issue where under some circumstances after logging we present the
data loaded previous to you logging in. [[GH-11681](https://github.com/hashicorp/consul/issues/11681)]
* ui: Include `Service.Namespace` into available variables for `dashboard_url_templates` [[GH-11640](https://github.com/hashicorp/consul/issues/11640)]
* ui: Revert to depending on the backend, 'post-user-action', to report
permissions errors rather than using UI capabilities 'pre-user-action' [[GH-11520](https://github.com/hashicorp/consul/issues/11520)]
* ui: Temporarily remove KV pre-flight check for KV list permissions [[GH-11968](https://github.com/hashicorp/consul/issues/11968)]
* windows: Fixes a bug with empty log files when Consul is run as a Windows Service [[GH-11960](https://github.com/hashicorp/consul/issues/11960)]
* xds: fix a deadlock when the snapshot channel already have a snapshot to be consumed. [[GH-11924](https://github.com/hashicorp/consul/issues/11924)]
## 1.10.6 (December 15, 2021)
SECURITY:
* ci: Upgrade golang.org/x/net to address [CVE-2021-44716](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44716) [[GH-11856](https://github.com/hashicorp/consul/issues/11856)]
## 1.10.5 (December 13, 2021)
SECURITY:
* ci: Upgrade to Go 1.16.12 to address [CVE-2021-44716](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44716) [[GH-11808](https://github.com/hashicorp/consul/issues/11808)]
BUG FIXES:
* agent: **(Enterprise only)** fix bug where 1.10.x agents would deregister serf checks from 1.11.x servers [[GH-11700](https://github.com/hashicorp/consul/issues/11700)]
## 1.10.4 (November 11, 2021)
SECURITY:
@ -389,6 +499,41 @@ NOTES:
* legal: **(Enterprise only)** Enterprise binary downloads will now include a copy of the EULA and Terms of Evaluation in the zip archive
## 1.9.14 (January 12, 2022)
SECURITY:
* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805.
BUG FIXES:
* ca: fixes a bug that caused non blocking leaf cert queries to return the same cached response regardless of ca rotation or leaf cert expiry [[GH-11693](https://github.com/hashicorp/consul/issues/11693)]
* ca: fixes a bug that caused the intermediate cert used to sign leaf certs to be missing from the /connect/ca/roots API response when the Vault provider was used. [[GH-11671](https://github.com/hashicorp/consul/issues/11671)]
* cli: Display assigned node identities in output of `consul acl token list`. [[GH-11926](https://github.com/hashicorp/consul/issues/11926)]
* cli: when creating a private key, save the file with mode 0600 so that only the user has read permission. [[GH-11781](https://github.com/hashicorp/consul/issues/11781)]
* snapshot: **(Enterprise only)** fixed a bug where the snapshot agent would ignore the `license_path` setting in config files
* ui: Differentiate between Service Meta and Node Meta when choosing search fields
in Service Instance listings [[GH-11774](https://github.com/hashicorp/consul/issues/11774)]
* ui: Fixes an issue where under some circumstances after logging we present the
data loaded previous to you logging in. [[GH-11681](https://github.com/hashicorp/consul/issues/11681)]
* ui: Fixes an issue where under some circumstances the namespace selector could
become 'stuck' on the default namespace [[GH-11830](https://github.com/hashicorp/consul/issues/11830)]
* ui: Include `Service.Namespace` into available variables for `dashboard_url_templates` [[GH-11640](https://github.com/hashicorp/consul/issues/11640)]
* ui: Prevent disconnection notice appearing with auth change on certain pages [[GH-11905](https://github.com/hashicorp/consul/issues/11905)]
* xds: fix a deadlock when the snapshot channel already have a snapshot to be consumed. [[GH-11924](https://github.com/hashicorp/consul/issues/11924)]
## 1.9.13 (December 15, 2021)
SECURITY:
* ci: Upgrade golang.org/x/net to address [CVE-2021-44716](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44716) [[GH-11858](https://github.com/hashicorp/consul/issues/11858)]
## 1.9.12 (December 13, 2021)
SECURITY:
* ci: Upgrade to Go 1.16.12 to address [CVE-2021-44716](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44716) [[GH-11807](https://github.com/hashicorp/consul/issues/11807)]
## 1.9.11 (November 11, 2021)
SECURITY:
@ -769,6 +914,26 @@ BUG FIXES:
* telemetry: fixed a bug that caused logs to be flooded with `[WARN] agent.router: Non-server in server-only area` [[GH-8685](https://github.com/hashicorp/consul/issues/8685)]
* ui: show correct datacenter for gateways [[GH-8704](https://github.com/hashicorp/consul/issues/8704)]
## 1.8.19 (December 15, 2021)
SECURITY:
* ci: Upgrade golang.org/x/net to address [CVE-2021-44716](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44716) [[GH-11857](https://github.com/hashicorp/consul/issues/11857)]
## 1.8.18 (December 13, 2021)
SECURITY:
* ci: Upgrade to Go 1.16.12 to address [CVE-2021-44716](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44716) [[GH-11806](https://github.com/hashicorp/consul/issues/11806)]
* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805.
BUG FIXES:
* snapshot: **(Enterprise only)** fixed a bug where the snapshot agent would ignore the `license_path` setting in config files
* ui: Fixes an issue where under some circumstances after logging we present the
data loaded previous to you logging in. [[GH-11681](https://github.com/hashicorp/consul/issues/11681)]
* ui: Include `Service.Namespace` into available variables for `dashboard_url_templates` [[GH-11640](https://github.com/hashicorp/consul/issues/11640)]
## 1.8.17 (November 11, 2021)
SECURITY:

View File

@ -1516,30 +1516,28 @@ func TestMergePolicies(t *testing.T) {
},
}
req := require.New(t)
for _, tcase := range tests {
t.Run(tcase.name, func(t *testing.T) {
act := MergePolicies(tcase.input)
exp := tcase.expected
req.Equal(exp.ACL, act.ACL)
req.Equal(exp.Keyring, act.Keyring)
req.Equal(exp.Operator, act.Operator)
req.Equal(exp.Mesh, act.Mesh)
req.ElementsMatch(exp.Agents, act.Agents)
req.ElementsMatch(exp.AgentPrefixes, act.AgentPrefixes)
req.ElementsMatch(exp.Events, act.Events)
req.ElementsMatch(exp.EventPrefixes, act.EventPrefixes)
req.ElementsMatch(exp.Keys, act.Keys)
req.ElementsMatch(exp.KeyPrefixes, act.KeyPrefixes)
req.ElementsMatch(exp.Nodes, act.Nodes)
req.ElementsMatch(exp.NodePrefixes, act.NodePrefixes)
req.ElementsMatch(exp.PreparedQueries, act.PreparedQueries)
req.ElementsMatch(exp.PreparedQueryPrefixes, act.PreparedQueryPrefixes)
req.ElementsMatch(exp.Services, act.Services)
req.ElementsMatch(exp.ServicePrefixes, act.ServicePrefixes)
req.ElementsMatch(exp.Sessions, act.Sessions)
req.ElementsMatch(exp.SessionPrefixes, act.SessionPrefixes)
require.Equal(t, exp.ACL, act.ACL)
require.Equal(t, exp.Keyring, act.Keyring)
require.Equal(t, exp.Operator, act.Operator)
require.Equal(t, exp.Mesh, act.Mesh)
require.ElementsMatch(t, exp.Agents, act.Agents)
require.ElementsMatch(t, exp.AgentPrefixes, act.AgentPrefixes)
require.ElementsMatch(t, exp.Events, act.Events)
require.ElementsMatch(t, exp.EventPrefixes, act.EventPrefixes)
require.ElementsMatch(t, exp.Keys, act.Keys)
require.ElementsMatch(t, exp.KeyPrefixes, act.KeyPrefixes)
require.ElementsMatch(t, exp.Nodes, act.Nodes)
require.ElementsMatch(t, exp.NodePrefixes, act.NodePrefixes)
require.ElementsMatch(t, exp.PreparedQueries, act.PreparedQueries)
require.ElementsMatch(t, exp.PreparedQueryPrefixes, act.PreparedQueryPrefixes)
require.ElementsMatch(t, exp.Services, act.Services)
require.ElementsMatch(t, exp.ServicePrefixes, act.ServicePrefixes)
require.ElementsMatch(t, exp.Sessions, act.Sessions)
require.ElementsMatch(t, exp.SessionPrefixes, act.SessionPrefixes)
})
}

View File

@ -84,7 +84,13 @@ func (a *Agent) vetServiceUpdateWithAuthorizer(authz acl.Authorizer, serviceID s
structs.ServiceIDString(existing.Service, &existing.EnterpriseMeta))
}
} else {
return NotFoundError{Reason: fmt.Sprintf("Unknown service %q", serviceID)}
// Take care if modifying this error message.
// agent/local/state.go's deleteService assumes the Catalog.Deregister RPC call
// will include "Unknown service"in the error if deregistration fails due to a
// service with that ID not existing.
return NotFoundError{Reason: fmt.Sprintf(
"Unknown service ID %q. Ensure that the service ID is passed, not the service name.",
serviceID)}
}
return nil
@ -143,7 +149,9 @@ func (a *Agent) vetCheckUpdateWithAuthorizer(authz acl.Authorizer, checkID struc
}
}
} else {
return fmt.Errorf("Unknown check %q", checkID.String())
return NotFoundError{Reason: fmt.Sprintf(
"Unknown check ID %q. Ensure that the check ID is passed, not the check name.",
checkID.String())}
}
return nil

View File

@ -16,23 +16,22 @@ type aclBootstrapResponse struct {
structs.ACLToken
}
var aclDisabled = UnauthorizedError{Reason: "ACL support disabled"}
// checkACLDisabled will return a standard response if ACLs are disabled. This
// returns true if they are disabled and we should not continue.
func (s *HTTPHandlers) checkACLDisabled(resp http.ResponseWriter, _req *http.Request) bool {
func (s *HTTPHandlers) checkACLDisabled() bool {
if s.agent.config.ACLsEnabled {
return false
}
resp.WriteHeader(http.StatusUnauthorized)
fmt.Fprint(resp, "ACL support disabled")
return true
}
// ACLBootstrap is used to perform a one-time ACL bootstrap operation on
// a cluster to get the first management token.
func (s *HTTPHandlers) ACLBootstrap(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
args := structs.DCSpecificRequest{
@ -42,9 +41,7 @@ func (s *HTTPHandlers) ACLBootstrap(resp http.ResponseWriter, req *http.Request)
err := s.agent.RPC("ACL.BootstrapTokens", &args, &out)
if err != nil {
if strings.Contains(err.Error(), structs.ACLBootstrapNotAllowedErr.Error()) {
resp.WriteHeader(http.StatusForbidden)
fmt.Fprint(resp, acl.PermissionDeniedError{Cause: err.Error()}.Error())
return nil, nil
return nil, acl.PermissionDeniedError{Cause: err.Error()}
} else {
return nil, err
}
@ -53,8 +50,8 @@ func (s *HTTPHandlers) ACLBootstrap(resp http.ResponseWriter, req *http.Request)
}
func (s *HTTPHandlers) ACLReplicationStatus(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
// Note that we do not forward to the ACL DC here. This is a query for
@ -74,8 +71,8 @@ func (s *HTTPHandlers) ACLReplicationStatus(resp http.ResponseWriter, req *http.
}
func (s *HTTPHandlers) ACLPolicyList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
var args structs.ACLPolicyListRequest
@ -105,8 +102,8 @@ func (s *HTTPHandlers) ACLPolicyList(resp http.ResponseWriter, req *http.Request
}
func (s *HTTPHandlers) ACLPolicyCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
var fn func(resp http.ResponseWriter, req *http.Request, policyID string) (interface{}, error)
@ -125,7 +122,10 @@ func (s *HTTPHandlers) ACLPolicyCRUD(resp http.ResponseWriter, req *http.Request
return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}}
}
policyID := strings.TrimPrefix(req.URL.Path, "/v1/acl/policy/")
policyID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/policy/")
if err != nil {
return nil, err
}
if policyID == "" && req.Method != "PUT" {
return nil, BadRequestError{Reason: "Missing policy ID"}
}
@ -166,11 +166,14 @@ func (s *HTTPHandlers) ACLPolicyRead(resp http.ResponseWriter, req *http.Request
}
func (s *HTTPHandlers) ACLPolicyReadByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
policyName := strings.TrimPrefix(req.URL.Path, "/v1/acl/policy/name/")
policyName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/policy/name/")
if err != nil {
return nil, err
}
if policyName == "" {
return nil, BadRequestError{Reason: "Missing policy Name"}
}
@ -183,8 +186,8 @@ func (s *HTTPHandlers) ACLPolicyReadByID(resp http.ResponseWriter, req *http.Req
}
func (s *HTTPHandlers) ACLPolicyCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
return s.aclPolicyWriteInternal(resp, req, "", true)
@ -248,8 +251,8 @@ func (s *HTTPHandlers) ACLPolicyDelete(resp http.ResponseWriter, req *http.Reque
}
func (s *HTTPHandlers) ACLTokenList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
args := &structs.ACLTokenListRequest{
@ -285,8 +288,8 @@ func (s *HTTPHandlers) ACLTokenList(resp http.ResponseWriter, req *http.Request)
}
func (s *HTTPHandlers) ACLTokenCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
var fn func(resp http.ResponseWriter, req *http.Request, tokenID string) (interface{}, error)
@ -305,7 +308,10 @@ func (s *HTTPHandlers) ACLTokenCRUD(resp http.ResponseWriter, req *http.Request)
return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}}
}
tokenID := strings.TrimPrefix(req.URL.Path, "/v1/acl/token/")
tokenID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/token/")
if err != nil {
return nil, err
}
if strings.HasSuffix(tokenID, "/clone") && req.Method == "PUT" {
tokenID = tokenID[:len(tokenID)-6]
fn = s.ACLTokenClone
@ -318,8 +324,8 @@ func (s *HTTPHandlers) ACLTokenCRUD(resp http.ResponseWriter, req *http.Request)
}
func (s *HTTPHandlers) ACLTokenSelf(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
args := structs.ACLTokenGetRequest{
@ -351,8 +357,8 @@ func (s *HTTPHandlers) ACLTokenSelf(resp http.ResponseWriter, req *http.Request)
}
func (s *HTTPHandlers) ACLTokenCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
return s.aclTokenSetInternal(req, "", true)
@ -442,8 +448,8 @@ func (s *HTTPHandlers) ACLTokenDelete(resp http.ResponseWriter, req *http.Reques
}
func (s *HTTPHandlers) ACLTokenClone(resp http.ResponseWriter, req *http.Request, tokenID string) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
args := structs.ACLTokenSetRequest{
@ -471,8 +477,8 @@ func (s *HTTPHandlers) ACLTokenClone(resp http.ResponseWriter, req *http.Request
}
func (s *HTTPHandlers) ACLRoleList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
var args structs.ACLRoleListRequest
@ -504,8 +510,8 @@ func (s *HTTPHandlers) ACLRoleList(resp http.ResponseWriter, req *http.Request)
}
func (s *HTTPHandlers) ACLRoleCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
var fn func(resp http.ResponseWriter, req *http.Request, roleID string) (interface{}, error)
@ -524,7 +530,10 @@ func (s *HTTPHandlers) ACLRoleCRUD(resp http.ResponseWriter, req *http.Request)
return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}}
}
roleID := strings.TrimPrefix(req.URL.Path, "/v1/acl/role/")
roleID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/role/")
if err != nil {
return nil, err
}
if roleID == "" && req.Method != "PUT" {
return nil, BadRequestError{Reason: "Missing role ID"}
}
@ -533,11 +542,14 @@ func (s *HTTPHandlers) ACLRoleCRUD(resp http.ResponseWriter, req *http.Request)
}
func (s *HTTPHandlers) ACLRoleReadByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
roleName := strings.TrimPrefix(req.URL.Path, "/v1/acl/role/name/")
roleName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/role/name/")
if err != nil {
return nil, err
}
if roleName == "" {
return nil, BadRequestError{Reason: "Missing role Name"}
}
@ -581,8 +593,8 @@ func (s *HTTPHandlers) ACLRoleRead(resp http.ResponseWriter, req *http.Request,
}
func (s *HTTPHandlers) ACLRoleCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
return s.ACLRoleWrite(resp, req, "")
@ -634,8 +646,8 @@ func (s *HTTPHandlers) ACLRoleDelete(resp http.ResponseWriter, req *http.Request
}
func (s *HTTPHandlers) ACLBindingRuleList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
var args structs.ACLBindingRuleListRequest
@ -668,8 +680,8 @@ func (s *HTTPHandlers) ACLBindingRuleList(resp http.ResponseWriter, req *http.Re
}
func (s *HTTPHandlers) ACLBindingRuleCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
var fn func(resp http.ResponseWriter, req *http.Request, bindingRuleID string) (interface{}, error)
@ -688,7 +700,10 @@ func (s *HTTPHandlers) ACLBindingRuleCRUD(resp http.ResponseWriter, req *http.Re
return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}}
}
bindingRuleID := strings.TrimPrefix(req.URL.Path, "/v1/acl/binding-rule/")
bindingRuleID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/binding-rule/")
if err != nil {
return nil, err
}
if bindingRuleID == "" && req.Method != "PUT" {
return nil, BadRequestError{Reason: "Missing binding rule ID"}
}
@ -728,8 +743,8 @@ func (s *HTTPHandlers) ACLBindingRuleRead(resp http.ResponseWriter, req *http.Re
}
func (s *HTTPHandlers) ACLBindingRuleCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
return s.ACLBindingRuleWrite(resp, req, "")
@ -781,8 +796,8 @@ func (s *HTTPHandlers) ACLBindingRuleDelete(resp http.ResponseWriter, req *http.
}
func (s *HTTPHandlers) ACLAuthMethodList(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
var args structs.ACLAuthMethodListRequest
@ -812,8 +827,8 @@ func (s *HTTPHandlers) ACLAuthMethodList(resp http.ResponseWriter, req *http.Req
}
func (s *HTTPHandlers) ACLAuthMethodCRUD(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
var fn func(resp http.ResponseWriter, req *http.Request, methodName string) (interface{}, error)
@ -832,7 +847,10 @@ func (s *HTTPHandlers) ACLAuthMethodCRUD(resp http.ResponseWriter, req *http.Req
return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}}
}
methodName := strings.TrimPrefix(req.URL.Path, "/v1/acl/auth-method/")
methodName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/auth-method/")
if err != nil {
return nil, err
}
if methodName == "" && req.Method != "PUT" {
return nil, BadRequestError{Reason: "Missing auth method name"}
}
@ -872,8 +890,8 @@ func (s *HTTPHandlers) ACLAuthMethodRead(resp http.ResponseWriter, req *http.Req
}
func (s *HTTPHandlers) ACLAuthMethodCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
return s.ACLAuthMethodWrite(resp, req, "")
@ -928,8 +946,8 @@ func (s *HTTPHandlers) ACLAuthMethodDelete(resp http.ResponseWriter, req *http.R
}
func (s *HTTPHandlers) ACLLogin(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
args := &structs.ACLLoginRequest{
@ -954,8 +972,8 @@ func (s *HTTPHandlers) ACLLogin(resp http.ResponseWriter, req *http.Request) (in
}
func (s *HTTPHandlers) ACLLogout(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
args := structs.ACLLogoutRequest{
@ -1014,8 +1032,8 @@ func (s *HTTPHandlers) ACLAuthorize(resp http.ResponseWriter, req *http.Request)
// policy.
const maxRequests = 64
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, aclDisabled
}
request := structs.RemoteACLAuthorizationRequest{

View File

@ -70,10 +70,8 @@ func TestACL_Disabled_Response(t *testing.T) {
req, _ := http.NewRequest("PUT", "/should/not/care", nil)
resp := httptest.NewRecorder()
obj, err := tt.fn(resp, req)
require.NoError(t, err)
require.Nil(t, obj)
require.Equal(t, http.StatusUnauthorized, resp.Code)
require.Contains(t, resp.Body.String(), "ACL support disabled")
require.ErrorIs(t, err, UnauthorizedError{Reason: "ACL support disabled"})
})
}
}
@ -119,9 +117,6 @@ func TestACL_Bootstrap(t *testing.T) {
if tt.token && err != nil {
t.Fatalf("err: %v", err)
}
if got, want := resp.Code, tt.code; got != want {
t.Fatalf("got %d want %d", got, want)
}
if tt.token {
wrap, ok := out.(*aclBootstrapResponse)
if !ok {
@ -854,10 +849,10 @@ func TestACL_HTTP(t *testing.T) {
tokens, ok := raw.(structs.ACLTokenListStubs)
require.True(t, ok)
// 3 tokens created but 1 was deleted + master token + anon token
// 3 tokens created but 1 was deleted + initial management token + anon token
require.Len(t, tokens, 4)
// this loop doesn't verify anything about the master token
// this loop doesn't verify anything about the initial management token
for tokenID, expected := range tokenMap {
found := false
for _, actual := range tokens {
@ -1885,7 +1880,7 @@ func TestACL_Authorize(t *testing.T) {
var localToken structs.ACLToken
require.NoError(t, a2.RPC("ACL.TokenSet", &localTokenReq, &localToken))
t.Run("master-token", func(t *testing.T) {
t.Run("initial-management-token", func(t *testing.T) {
request := []structs.ACLAuthorizationRequest{
{
Resource: "acl",
@ -2021,7 +2016,7 @@ func TestACL_Authorize(t *testing.T) {
resp := responses[idx]
require.Equal(t, req, resp.ACLAuthorizationRequest)
require.True(t, resp.Allow, "should have allowed all access for master token")
require.True(t, resp.Allow, "should have allowed all access for initial management token")
}
})
}
@ -2282,7 +2277,7 @@ func TestACL_Authorize(t *testing.T) {
type rpcFn func(string, interface{}, interface{}) error
func upsertTestCustomizedAuthMethod(
rpc rpcFn, masterToken string, datacenter string,
rpc rpcFn, initialManagementToken string, datacenter string,
modify func(method *structs.ACLAuthMethod),
) (*structs.ACLAuthMethod, error) {
name, err := uuid.GenerateUUID()
@ -2296,7 +2291,7 @@ func upsertTestCustomizedAuthMethod(
Name: "test-method-" + name,
Type: "testing",
},
WriteRequest: structs.WriteRequest{Token: masterToken},
WriteRequest: structs.WriteRequest{Token: initialManagementToken},
}
if modify != nil {
@ -2313,11 +2308,11 @@ func upsertTestCustomizedAuthMethod(
return &out, nil
}
func upsertTestCustomizedBindingRule(rpc rpcFn, masterToken string, datacenter string, modify func(rule *structs.ACLBindingRule)) (*structs.ACLBindingRule, error) {
func upsertTestCustomizedBindingRule(rpc rpcFn, initialManagementToken string, datacenter string, modify func(rule *structs.ACLBindingRule)) (*structs.ACLBindingRule, error) {
req := structs.ACLBindingRuleSetRequest{
Datacenter: datacenter,
BindingRule: structs.ACLBindingRule{},
WriteRequest: structs.WriteRequest{Token: masterToken},
WriteRequest: structs.WriteRequest{Token: initialManagementToken},
}
if modify != nil {

View File

@ -209,9 +209,6 @@ type Agent struct {
// depending on the configuration
delegate delegate
// aclMasterAuthorizer is an object that helps manage local ACL enforcement.
aclMasterAuthorizer acl.Authorizer
// state stores a local representation of the node,
// services and checks. Used for anti-entropy.
State *local.State

View File

@ -155,9 +155,11 @@ func (s *HTTPHandlers) AgentMetrics(resp http.ResponseWriter, req *http.Request)
}
if enablePrometheusOutput(req) {
if s.agent.config.Telemetry.PrometheusOpts.Expiration < 1 {
resp.WriteHeader(http.StatusUnsupportedMediaType)
fmt.Fprint(resp, "Prometheus is not enabled since its retention time is not positive")
return nil, nil
return nil, CodeWithPayloadError{
StatusCode: http.StatusUnsupportedMediaType,
Reason: "Prometheus is not enabled since its retention time is not positive",
ContentType: "text/plain",
}
}
handlerOptions := promhttp.HandlerOpts{
ErrorLog: s.agent.logger.StandardLogger(&hclog.StandardLoggerOptions{
@ -379,7 +381,10 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request
// blocking watch using hash-based blocking.
func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Get the proxy ID. Note that this is the ID of a proxy's service instance.
id := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/")
id, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/service/")
if err != nil {
return nil, err
}
// Maybe block
var queryOpts structs.QueryOptions
@ -398,7 +403,7 @@ func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request)
}
// need to resolve to default the meta
_, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &entMeta, nil)
_, err = s.agent.delegate.ResolveTokenAndDefaultMeta(token, &entMeta, nil)
if err != nil {
return nil, err
}
@ -420,9 +425,7 @@ func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request)
svcState := s.agent.State.ServiceState(sid)
if svcState == nil {
resp.WriteHeader(http.StatusNotFound)
fmt.Fprintf(resp, "unknown service ID: %s", sid.String())
return "", nil, nil
return "", nil, NotFoundError{Reason: fmt.Sprintf("unknown service ID: %s", sid.String())}
}
svc := svcState.Service
@ -552,9 +555,7 @@ func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request)
// key are ok, otherwise the argument doesn't apply to
// the WAN.
default:
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Cannot provide a segment with wan=true")
return nil, nil
return nil, BadRequestError{Reason: "Cannot provide a segment with wan=true"}
}
}
@ -637,7 +638,10 @@ func (s *HTTPHandlers) AgentJoin(resp http.ResponseWriter, req *http.Request) (i
}
// Get the address
addr := strings.TrimPrefix(req.URL.Path, "/v1/agent/join/")
addr, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/join/")
if err != nil {
return nil, err
}
if wan {
if s.agent.config.ConnectMeshGatewayWANFederationEnabled {
@ -697,7 +701,10 @@ func (s *HTTPHandlers) AgentForceLeave(resp http.ResponseWriter, req *http.Reque
// Check if the WAN is being queried
_, wan := req.URL.Query()["wan"]
addr := strings.TrimPrefix(req.URL.Path, "/v1/agent/force-leave/")
addr, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/force-leave/")
if err != nil {
return nil, err
}
if wan {
return nil, s.agent.ForceLeaveWAN(addr, prune, entMeta)
} else {
@ -724,16 +731,16 @@ func (s *HTTPHandlers) AgentRegisterCheck(resp http.ResponseWriter, req *http.Re
}
if err := decodeBody(req.Body, &args); err != nil {
return nil, BadRequestError{fmt.Sprintf("Request decode failed: %v", err)}
return nil, BadRequestError{Reason: fmt.Sprintf("Request decode failed: %v", err)}
}
// Verify the check has a name.
if args.Name == "" {
return nil, BadRequestError{"Missing check name"}
return nil, BadRequestError{Reason: "Missing check name"}
}
if args.Status != "" && !structs.ValidStatus(args.Status) {
return nil, BadRequestError{"Bad check status"}
return nil, BadRequestError{Reason: "Bad check status"}
}
authz, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &args.EnterpriseMeta, nil)
@ -752,15 +759,15 @@ func (s *HTTPHandlers) AgentRegisterCheck(resp http.ResponseWriter, req *http.Re
chkType := args.CheckType()
err = chkType.Validate()
if err != nil {
return nil, BadRequestError{fmt.Sprintf("Invalid check: %v", err)}
return nil, BadRequestError{Reason: fmt.Sprintf("Invalid check: %v", err)}
}
// Store the type of check based on the definition
health.Type = chkType.Type()
if health.ServiceID != "" {
cid := health.CompoundServiceID()
// fixup the service name so that vetCheckRegister requires the right ACLs
cid := health.CompoundServiceID()
service := s.agent.State.Service(cid)
if service != nil {
health.ServiceName = service.Service
@ -783,7 +790,11 @@ func (s *HTTPHandlers) AgentRegisterCheck(resp http.ResponseWriter, req *http.Re
}
func (s *HTTPHandlers) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
checkID := structs.NewCheckID(types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/deregister/")), nil)
ID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/check/deregister/")
if err != nil {
return nil, err
}
checkID := structs.NewCheckID(types.CheckID(ID), nil)
// Get the provided token, if any, and vet against any ACL policies.
var token string
@ -816,13 +827,21 @@ func (s *HTTPHandlers) AgentDeregisterCheck(resp http.ResponseWriter, req *http.
}
func (s *HTTPHandlers) AgentCheckPass(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/pass/"))
ID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/check/pass/")
if err != nil {
return nil, err
}
checkID := types.CheckID(ID)
note := req.URL.Query().Get("note")
return s.agentCheckUpdate(resp, req, checkID, api.HealthPassing, note)
}
func (s *HTTPHandlers) AgentCheckWarn(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/warn/"))
ID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/check/warn/")
if err != nil {
return nil, err
}
checkID := types.CheckID(ID)
note := req.URL.Query().Get("note")
return s.agentCheckUpdate(resp, req, checkID, api.HealthWarning, note)
@ -830,7 +849,11 @@ func (s *HTTPHandlers) AgentCheckWarn(resp http.ResponseWriter, req *http.Reques
}
func (s *HTTPHandlers) AgentCheckFail(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/fail/"))
ID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/check/fail/")
if err != nil {
return nil, err
}
checkID := types.CheckID(ID)
note := req.URL.Query().Get("note")
return s.agentCheckUpdate(resp, req, checkID, api.HealthCritical, note)
@ -854,9 +877,7 @@ type checkUpdate struct {
func (s *HTTPHandlers) AgentCheckUpdate(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
var update checkUpdate
if err := decodeBody(req.Body, &update); err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "Request decode failed: %v", err)
return nil, nil
return nil, BadRequestError{Reason: fmt.Sprintf("Request decode failed: %v", err)}
}
switch update.Status {
@ -864,12 +885,15 @@ func (s *HTTPHandlers) AgentCheckUpdate(resp http.ResponseWriter, req *http.Requ
case api.HealthWarning:
case api.HealthCritical:
default:
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "Invalid check status: '%s'", update.Status)
return nil, nil
return nil, BadRequestError{Reason: fmt.Sprintf("Invalid check status: '%s'", update.Status)}
}
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/update/"))
ID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/check/update/")
if err != nil {
return nil, err
}
checkID := types.CheckID(ID)
return s.agentCheckUpdate(resp, req, checkID, update.Status, update.Output)
}
@ -951,7 +975,10 @@ func returnTextPlain(req *http.Request) bool {
// AgentHealthServiceByID return the local Service Health given its ID
func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Pull out the service id (service id since there may be several instance of the same service on this host)
serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/health/service/id/")
serviceID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/health/service/id/")
if err != nil {
return nil, err
}
if serviceID == "" {
return nil, &BadRequestError{Reason: "Missing serviceID"}
}
@ -1009,7 +1036,11 @@ func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *htt
// AgentHealthServiceByName return the worse status of all the services with given name on an agent
func (s *HTTPHandlers) AgentHealthServiceByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Pull out the service name
serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/health/service/name/")
serviceName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/health/service/name/")
if err != nil {
return nil, err
}
if serviceName == "" {
return nil, &BadRequestError{Reason: "Missing service Name"}
}
@ -1082,24 +1113,18 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
}
if err := decodeBody(req.Body, &args); err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "Request decode failed: %v", err)
return nil, nil
return nil, BadRequestError{Reason: fmt.Sprintf("Request decode failed: %v", err)}
}
// Verify the service has a name.
if args.Name == "" {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Missing service name")
return nil, nil
return nil, BadRequestError{Reason: "Missing service name"}
}
// Check the service address here and in the catalog RPC endpoint
// since service registration isn't synchronous.
if ipaddr.IsAny(args.Address) {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "Invalid service address")
return nil, nil
return nil, BadRequestError{Reason: "Invalid service address"}
}
var token string
@ -1118,37 +1143,27 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
ns := args.NodeService()
if ns.Weights != nil {
if err := structs.ValidateWeights(ns.Weights); err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, fmt.Errorf("Invalid Weights: %v", err))
return nil, nil
return nil, BadRequestError{Reason: fmt.Sprintf("Invalid Weights: %v", err)}
}
}
if err := structs.ValidateServiceMetadata(ns.Kind, ns.Meta, false); err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, fmt.Errorf("Invalid Service Meta: %v", err))
return nil, nil
return nil, BadRequestError{Reason: fmt.Sprintf("Invalid Service Meta: %v", err)}
}
// Run validation. This is the same validation that would happen on
// the catalog endpoint so it helps ensure the sync will work properly.
if err := ns.Validate(); err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, err.Error())
return nil, nil
return nil, BadRequestError{Reason: fmt.Sprintf("Validation failed: %v", err.Error())}
}
// Verify the check type.
chkTypes, err := args.CheckTypes()
if err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, fmt.Errorf("Invalid check: %v", err))
return nil, nil
return nil, BadRequestError{Reason: fmt.Sprintf("Invalid check: %v", err)}
}
for _, check := range chkTypes {
if check.Status != "" && !structs.ValidStatus(check.Status) {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Status for checks must 'passing', 'warning', 'critical'")
return nil, nil
return nil, BadRequestError{Reason: "Status for checks must 'passing', 'warning', 'critical'"}
}
}
@ -1182,9 +1197,7 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
}
if sidecar != nil {
if err := sidecar.Validate(); err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, err.Error())
return nil, nil
return nil, BadRequestError{Reason: fmt.Sprintf("Failed Validation: %v", err.Error())}
}
// Make sure we are allowed to register the sidecar using the token
// specified (might be specific to sidecar or the same one as the overall
@ -1237,7 +1250,12 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
}
func (s *HTTPHandlers) AgentDeregisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
sid := structs.NewServiceID(strings.TrimPrefix(req.URL.Path, "/v1/agent/service/deregister/"), nil)
serviceID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/service/deregister/")
if err != nil {
return nil, err
}
sid := structs.NewServiceID(serviceID, nil)
// Get the provided token, if any, and vet against any ACL policies.
var token string
@ -1272,28 +1290,27 @@ func (s *HTTPHandlers) AgentDeregisterService(resp http.ResponseWriter, req *htt
func (s *HTTPHandlers) AgentServiceMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Ensure we have a service ID
sid := structs.NewServiceID(strings.TrimPrefix(req.URL.Path, "/v1/agent/service/maintenance/"), nil)
serviceID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/service/maintenance/")
if err != nil {
return nil, err
}
sid := structs.NewServiceID(serviceID, nil)
if sid.ID == "" {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Missing service ID")
return nil, nil
return nil, BadRequestError{Reason: "Missing service ID"}
}
// Ensure we have some action
params := req.URL.Query()
if _, ok := params["enable"]; !ok {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Missing value for enable")
return nil, nil
return nil, BadRequestError{Reason: "Missing value for enable"}
}
raw := params.Get("enable")
enable, err := strconv.ParseBool(raw)
if err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "Invalid value for enable: %q", raw)
return nil, nil
return nil, BadRequestError{Reason: fmt.Sprintf("Invalid value for enable: %q", raw)}
}
// Get the provided token, if any, and vet against any ACL policies.
@ -1322,15 +1339,11 @@ func (s *HTTPHandlers) AgentServiceMaintenance(resp http.ResponseWriter, req *ht
if enable {
reason := params.Get("reason")
if err = s.agent.EnableServiceMaintenance(sid, reason, token); err != nil {
resp.WriteHeader(http.StatusNotFound)
fmt.Fprint(resp, err.Error())
return nil, nil
return nil, NotFoundError{Reason: err.Error()}
}
} else {
if err = s.agent.DisableServiceMaintenance(sid); err != nil {
resp.WriteHeader(http.StatusNotFound)
fmt.Fprint(resp, err.Error())
return nil, nil
return nil, NotFoundError{Reason: err.Error()}
}
}
s.syncChanges()
@ -1341,17 +1354,13 @@ func (s *HTTPHandlers) AgentNodeMaintenance(resp http.ResponseWriter, req *http.
// Ensure we have some action
params := req.URL.Query()
if _, ok := params["enable"]; !ok {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Missing value for enable")
return nil, nil
return nil, BadRequestError{Reason: "Missing value for enable"}
}
raw := params.Get("enable")
enable, err := strconv.ParseBool(raw)
if err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "Invalid value for enable: %q", raw)
return nil, nil
return nil, BadRequestError{Reason: fmt.Sprintf("Invalid value for enable: %q", raw)}
}
// Get the provided token, if any, and vet against any ACL policies.
@ -1458,8 +1467,8 @@ func (s *HTTPHandlers) AgentMonitor(resp http.ResponseWriter, req *http.Request)
}
func (s *HTTPHandlers) AgentToken(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.checkACLDisabled(resp, req) {
return nil, nil
if s.checkACLDisabled() {
return nil, UnauthorizedError{Reason: "ACL support disabled"}
}
// Fetch the ACL token, if any, and enforce agent policy.
@ -1485,7 +1494,10 @@ func (s *HTTPHandlers) AgentToken(resp http.ResponseWriter, req *http.Request) (
}
// Figure out the target token.
target := strings.TrimPrefix(req.URL.Path, "/v1/agent/token/")
target, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/token/")
if err != nil {
return nil, err
}
err = s.agent.tokens.WithPersistenceLock(func() error {
triggerAntiEntropySync := false
@ -1558,7 +1570,10 @@ func (s *HTTPHandlers) AgentConnectCARoots(resp http.ResponseWriter, req *http.R
func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Get the service name. Note that this is the name of the service,
// not the ID of the service instance.
serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/")
serviceName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/connect/ca/leaf/")
if err != nil {
return nil, err
}
args := cachetype.ConnectCALeafRequest{
Service: serviceName, // Need name not ID

File diff suppressed because it is too large Load Diff

View File

@ -1855,7 +1855,6 @@ func TestAgent_AddCheck_Alias(t *testing.T) {
t.Parallel()
require := require.New(t)
a := NewTestAgent(t, "")
defer a.Shutdown()
@ -1869,19 +1868,19 @@ func TestAgent_AddCheck_Alias(t *testing.T) {
AliasService: "foo",
}
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
require.NoError(err)
require.NoError(t, err)
// Ensure we have a check mapping
sChk := requireCheckExists(t, a, "aliashealth")
require.Equal(api.HealthCritical, sChk.Status)
require.Equal(t, api.HealthCritical, sChk.Status)
chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)]
require.True(ok, "missing aliashealth check")
require.Equal("", chkImpl.RPCReq.Token)
require.True(t, ok, "missing aliashealth check")
require.Equal(t, "", chkImpl.RPCReq.Token)
cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil))
require.NotNil(cs)
require.Equal("", cs.Token)
require.NotNil(t, cs)
require.Equal(t, "", cs.Token)
}
func TestAgent_AddCheck_Alias_setToken(t *testing.T) {
@ -1891,7 +1890,6 @@ func TestAgent_AddCheck_Alias_setToken(t *testing.T) {
t.Parallel()
require := require.New(t)
a := NewTestAgent(t, "")
defer a.Shutdown()
@ -1905,15 +1903,15 @@ func TestAgent_AddCheck_Alias_setToken(t *testing.T) {
AliasService: "foo",
}
err := a.AddCheck(health, chk, false, "foo", ConfigSourceLocal)
require.NoError(err)
require.NoError(t, err)
cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil))
require.NotNil(cs)
require.Equal("foo", cs.Token)
require.NotNil(t, cs)
require.Equal(t, "foo", cs.Token)
chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)]
require.True(ok, "missing aliashealth check")
require.Equal("foo", chkImpl.RPCReq.Token)
require.True(t, ok, "missing aliashealth check")
require.Equal(t, "foo", chkImpl.RPCReq.Token)
}
func TestAgent_AddCheck_Alias_userToken(t *testing.T) {
@ -1923,7 +1921,6 @@ func TestAgent_AddCheck_Alias_userToken(t *testing.T) {
t.Parallel()
require := require.New(t)
a := NewTestAgent(t, `
acl_token = "hello"
`)
@ -1939,15 +1936,15 @@ acl_token = "hello"
AliasService: "foo",
}
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
require.NoError(err)
require.NoError(t, err)
cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil))
require.NotNil(cs)
require.Equal("", cs.Token) // State token should still be empty
require.NotNil(t, cs)
require.Equal(t, "", cs.Token) // State token should still be empty
chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)]
require.True(ok, "missing aliashealth check")
require.Equal("hello", chkImpl.RPCReq.Token) // Check should use the token
require.True(t, ok, "missing aliashealth check")
require.Equal(t, "hello", chkImpl.RPCReq.Token) // Check should use the token
}
func TestAgent_AddCheck_Alias_userAndSetToken(t *testing.T) {
@ -1957,7 +1954,6 @@ func TestAgent_AddCheck_Alias_userAndSetToken(t *testing.T) {
t.Parallel()
require := require.New(t)
a := NewTestAgent(t, `
acl_token = "hello"
`)
@ -1973,15 +1969,15 @@ acl_token = "hello"
AliasService: "foo",
}
err := a.AddCheck(health, chk, false, "goodbye", ConfigSourceLocal)
require.NoError(err)
require.NoError(t, err)
cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil))
require.NotNil(cs)
require.Equal("goodbye", cs.Token)
require.NotNil(t, cs)
require.Equal(t, "goodbye", cs.Token)
chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)]
require.True(ok, "missing aliashealth check")
require.Equal("goodbye", chkImpl.RPCReq.Token)
require.True(t, ok, "missing aliashealth check")
require.Equal(t, "goodbye", chkImpl.RPCReq.Token)
}
func TestAgent_RemoveCheck(t *testing.T) {

View File

@ -11,7 +11,6 @@ import (
)
func TestCatalogServices(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &CatalogServices{RPC: rpc}
@ -22,10 +21,10 @@ func TestCatalogServices(t *testing.T) {
rpc.On("RPC", "Catalog.ServiceNodes", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.ServiceSpecificRequest)
require.Equal(uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal("web", req.ServiceName)
require.True(req.AllowStale)
require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal(t, "web", req.ServiceName)
require.True(t, req.AllowStale)
reply := args.Get(2).(*structs.IndexedServiceNodes)
reply.ServiceNodes = []*structs.ServiceNode{
@ -44,15 +43,14 @@ func TestCatalogServices(t *testing.T) {
ServiceName: "web",
ServiceTags: []string{"tag1", "tag2"},
})
require.NoError(err)
require.Equal(cache.FetchResult{
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, resultA)
}
func TestCatalogServices_badReqType(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &CatalogServices{RPC: rpc}
@ -60,7 +58,7 @@ func TestCatalogServices_badReqType(t *testing.T) {
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(err)
require.Contains(err.Error(), "wrong type")
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
}

View File

@ -123,23 +123,22 @@ func TestCalculateSoftExpire(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
require := require.New(t)
now, err := time.Parse("2006-01-02 15:04:05", tc.now)
require.NoError(err)
require.NoError(t, err)
issued, err := time.Parse("2006-01-02 15:04:05", tc.issued)
require.NoError(err)
require.NoError(t, err)
wantMin, err := time.Parse("2006-01-02 15:04:05", tc.wantMin)
require.NoError(err)
require.NoError(t, err)
wantMax, err := time.Parse("2006-01-02 15:04:05", tc.wantMax)
require.NoError(err)
require.NoError(t, err)
min, max := calculateSoftExpiry(now, &structs.IssuedCert{
ValidAfter: issued,
ValidBefore: issued.Add(tc.lifetime),
})
require.Equal(wantMin, min)
require.Equal(wantMax, max)
require.Equal(t, wantMin, min)
require.Equal(t, wantMax, max)
})
}
}
@ -156,7 +155,6 @@ func TestConnectCALeaf_changingRoots(t *testing.T) {
}
t.Parallel()
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
@ -211,8 +209,8 @@ func TestConnectCALeaf_changingRoots(t *testing.T) {
t.Fatal("shouldn't block waiting for fetch")
case result := <-fetchCh:
v := mustFetchResult(t, result)
require.Equal(resp, v.Value)
require.Equal(uint64(1), v.Index)
require.Equal(t, resp, v.Value)
require.Equal(t, uint64(1), v.Index)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
}
@ -244,9 +242,9 @@ func TestConnectCALeaf_changingRoots(t *testing.T) {
t.Fatal("shouldn't block waiting for fetch")
case result := <-fetchCh:
v := mustFetchResult(t, result)
require.Equal(resp, v.Value)
require.Equal(t, resp, v.Value)
// 3 since the second CA "update" used up 2
require.Equal(uint64(3), v.Index)
require.Equal(t, uint64(3), v.Index)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
opts.MinIndex = 3
@ -267,7 +265,6 @@ func TestConnectCALeaf_changingRoots(t *testing.T) {
func TestConnectCALeaf_changingRootsJitterBetweenCalls(t *testing.T) {
t.Parallel()
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
@ -323,8 +320,8 @@ func TestConnectCALeaf_changingRootsJitterBetweenCalls(t *testing.T) {
t.Fatal("shouldn't block waiting for fetch")
case result := <-fetchCh:
v := mustFetchResult(t, result)
require.Equal(resp, v.Value)
require.Equal(uint64(1), v.Index)
require.Equal(t, resp, v.Value)
require.Equal(t, uint64(1), v.Index)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
}
@ -378,24 +375,24 @@ func TestConnectCALeaf_changingRootsJitterBetweenCalls(t *testing.T) {
if v.Index > uint64(1) {
// Got a new cert
require.Equal(resp, v.Value)
require.Equal(uint64(3), v.Index)
require.Equal(t, resp, v.Value)
require.Equal(t, uint64(3), v.Index)
// Should not have been delivered before the delay
require.True(time.Since(earliestRootDelivery) > typ.TestOverrideCAChangeInitialDelay)
require.True(t, time.Since(earliestRootDelivery) > typ.TestOverrideCAChangeInitialDelay)
// All good. We are done!
rootsDelivered = true
} else {
// Should be the cached cert
require.Equal(resp, v.Value)
require.Equal(uint64(1), v.Index)
require.Equal(t, resp, v.Value)
require.Equal(t, uint64(1), v.Index)
// Sanity check we blocked for the whole timeout
require.Truef(timeTaken > opts.Timeout,
require.Truef(t, timeTaken > opts.Timeout,
"should block for at least %s, returned after %s",
opts.Timeout, timeTaken)
// Sanity check that the forceExpireAfter state was set correctly
shouldExpireAfter = v.State.(*fetchState).forceExpireAfter
require.True(shouldExpireAfter.After(time.Now()))
require.True(shouldExpireAfter.Before(time.Now().Add(typ.TestOverrideCAChangeInitialDelay)))
require.True(t, shouldExpireAfter.After(time.Now()))
require.True(t, shouldExpireAfter.Before(time.Now().Add(typ.TestOverrideCAChangeInitialDelay)))
}
// Set the LastResult for subsequent fetches
opts.LastResult = &v
@ -406,8 +403,7 @@ func TestConnectCALeaf_changingRootsJitterBetweenCalls(t *testing.T) {
// Sanity check that we've not gone way beyond the deadline without a
// new cert. We give some leeway to make it less brittle.
require.Falsef(
time.Now().After(shouldExpireAfter.Add(100*time.Millisecond)),
require.Falsef(t, time.Now().After(shouldExpireAfter.Add(100*time.Millisecond)),
"waited extra 100ms and delayed CA rotate renew didn't happen")
}
}
@ -416,7 +412,6 @@ func TestConnectCALeaf_changingRootsJitterBetweenCalls(t *testing.T) {
func TestConnectCALeaf_changingRootsBetweenBlockingCalls(t *testing.T) {
t.Parallel()
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
@ -461,8 +456,8 @@ func TestConnectCALeaf_changingRootsBetweenBlockingCalls(t *testing.T) {
t.Fatal("shouldn't block waiting for fetch")
case result := <-fetchCh:
v := mustFetchResult(t, result)
require.Equal(resp, v.Value)
require.Equal(uint64(1), v.Index)
require.Equal(t, resp, v.Value)
require.Equal(t, uint64(1), v.Index)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
}
@ -475,11 +470,11 @@ func TestConnectCALeaf_changingRootsBetweenBlockingCalls(t *testing.T) {
t.Fatal("shouldn't block for too long waiting for fetch")
case result := <-fetchCh:
v := mustFetchResult(t, result)
require.Equal(resp, v.Value)
require.Equal(t, resp, v.Value)
// Still the initial cached result
require.Equal(uint64(1), v.Index)
require.Equal(t, uint64(1), v.Index)
// Sanity check that it waited
require.True(time.Since(start) > opts.Timeout)
require.True(t, time.Since(start) > opts.Timeout)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
}
@ -507,11 +502,11 @@ func TestConnectCALeaf_changingRootsBetweenBlockingCalls(t *testing.T) {
t.Fatal("shouldn't block too long waiting for fetch")
case result := <-fetchCh:
v := mustFetchResult(t, result)
require.Equal(resp, v.Value)
require.Equal(t, resp, v.Value)
// Index should be 3 since root change consumed 2
require.Equal(uint64(3), v.Index)
require.Equal(t, uint64(3), v.Index)
// Sanity check that we didn't wait too long
require.True(time.Since(earliestRootDelivery) < opts.Timeout)
require.True(t, time.Since(earliestRootDelivery) < opts.Timeout)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
}
@ -525,7 +520,6 @@ func TestConnectCALeaf_CSRRateLimiting(t *testing.T) {
t.Parallel()
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
@ -594,8 +588,8 @@ func TestConnectCALeaf_CSRRateLimiting(t *testing.T) {
case result := <-fetchCh:
switch v := result.(type) {
case error:
require.Error(v)
require.Equal(consul.ErrRateLimited.Error(), v.Error())
require.Error(t, v)
require.Equal(t, consul.ErrRateLimited.Error(), v.Error())
case cache.FetchResult:
t.Fatalf("Expected error")
}
@ -608,8 +602,8 @@ func TestConnectCALeaf_CSRRateLimiting(t *testing.T) {
t.Fatal("shouldn't block waiting for fetch")
case result := <-fetchCh:
v := mustFetchResult(t, result)
require.Equal(resp, v.Value)
require.Equal(uint64(1), v.Index)
require.Equal(t, resp, v.Value)
require.Equal(t, uint64(1), v.Index)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
// Set MinIndex
@ -633,7 +627,7 @@ func TestConnectCALeaf_CSRRateLimiting(t *testing.T) {
earliestRootDelivery := time.Now()
// Sanity check state
require.Equal(uint64(1), atomic.LoadUint64(&rateLimitedRPCs))
require.Equal(t, uint64(1), atomic.LoadUint64(&rateLimitedRPCs))
// After root rotation jitter has been waited out, a new CSR will
// be attempted but will fail and return the previous cached result with no
@ -646,14 +640,14 @@ func TestConnectCALeaf_CSRRateLimiting(t *testing.T) {
// We should block for _at least_ one jitter period since we set that to
// 100ms and in test override mode we always pick the max jitter not a
// random amount.
require.True(time.Since(earliestRootDelivery) > 100*time.Millisecond)
require.Equal(uint64(2), atomic.LoadUint64(&rateLimitedRPCs))
require.True(t, time.Since(earliestRootDelivery) > 100*time.Millisecond)
require.Equal(t, uint64(2), atomic.LoadUint64(&rateLimitedRPCs))
v := mustFetchResult(t, result)
require.Equal(resp, v.Value)
require.Equal(t, resp, v.Value)
// 1 since this should still be the original cached result as we failed to
// get a new cert.
require.Equal(uint64(1), v.Index)
require.Equal(t, uint64(1), v.Index)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
}
@ -667,14 +661,14 @@ func TestConnectCALeaf_CSRRateLimiting(t *testing.T) {
t.Fatal("shouldn't block too long waiting for fetch")
case result := <-fetchCh:
// We should block for _at least_ two jitter periods now.
require.True(time.Since(earliestRootDelivery) > 200*time.Millisecond)
require.Equal(uint64(3), atomic.LoadUint64(&rateLimitedRPCs))
require.True(t, time.Since(earliestRootDelivery) > 200*time.Millisecond)
require.Equal(t, uint64(3), atomic.LoadUint64(&rateLimitedRPCs))
v := mustFetchResult(t, result)
require.Equal(resp, v.Value)
require.Equal(t, resp, v.Value)
// 1 since this should still be the original cached result as we failed to
// get a new cert.
require.Equal(uint64(1), v.Index)
require.Equal(t, uint64(1), v.Index)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
}
@ -689,13 +683,13 @@ func TestConnectCALeaf_CSRRateLimiting(t *testing.T) {
t.Fatal("shouldn't block too long waiting for fetch")
case result := <-fetchCh:
// We should block for _at least_ three jitter periods now.
require.True(time.Since(earliestRootDelivery) > 300*time.Millisecond)
require.Equal(uint64(3), atomic.LoadUint64(&rateLimitedRPCs))
require.True(t, time.Since(earliestRootDelivery) > 300*time.Millisecond)
require.Equal(t, uint64(3), atomic.LoadUint64(&rateLimitedRPCs))
v := mustFetchResult(t, result)
require.Equal(resp, v.Value)
require.Equal(t, resp, v.Value)
// 3 since the rootCA change used 2
require.Equal(uint64(3), v.Index)
require.Equal(t, uint64(3), v.Index)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
}
@ -909,7 +903,6 @@ func TestConnectCALeaf_expiringLeaf(t *testing.T) {
t.Parallel()
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
@ -963,10 +956,10 @@ func TestConnectCALeaf_expiringLeaf(t *testing.T) {
case result := <-fetchCh:
switch v := result.(type) {
case error:
require.NoError(v)
require.NoError(t, v)
case cache.FetchResult:
require.Equal(resp, v.Value)
require.Equal(uint64(1), v.Index)
require.Equal(t, resp, v.Value)
require.Equal(t, uint64(1), v.Index)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
}
@ -981,10 +974,10 @@ func TestConnectCALeaf_expiringLeaf(t *testing.T) {
case result := <-fetchCh:
switch v := result.(type) {
case error:
require.NoError(v)
require.NoError(t, v)
case cache.FetchResult:
require.Equal(resp, v.Value)
require.Equal(uint64(2), v.Index)
require.Equal(t, resp, v.Value)
require.Equal(t, uint64(2), v.Index)
// Set the LastResult for subsequent fetches
opts.LastResult = &v
}
@ -1004,7 +997,6 @@ func TestConnectCALeaf_expiringLeaf(t *testing.T) {
func TestConnectCALeaf_DNSSANForService(t *testing.T) {
t.Parallel()
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
@ -1040,12 +1032,12 @@ func TestConnectCALeaf_DNSSANForService(t *testing.T) {
DNSSAN: []string{"test.example.com"},
}
_, err := typ.Fetch(opts, req)
require.NoError(err)
require.NoError(t, err)
pemBlock, _ := pem.Decode([]byte(caReq.CSR))
csr, err := x509.ParseCertificateRequest(pemBlock.Bytes)
require.NoError(err)
require.Equal(csr.DNSNames, []string{"test.example.com"})
require.NoError(t, err)
require.Equal(t, csr.DNSNames, []string{"test.example.com"})
}
// testConnectCaRoot wraps ConnectCARoot to disable refresh so that the gated

View File

@ -11,7 +11,6 @@ import (
)
func TestConnectCARoot(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &ConnectCARoot{RPC: rpc}
@ -22,8 +21,8 @@ func TestConnectCARoot(t *testing.T) {
rpc.On("RPC", "ConnectCA.Roots", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.DCSpecificRequest)
require.Equal(uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
reply := args.Get(2).(*structs.IndexedCARoots)
reply.QueryMeta.Index = 48
@ -35,15 +34,14 @@ func TestConnectCARoot(t *testing.T) {
MinIndex: 24,
Timeout: 1 * time.Second,
}, &structs.DCSpecificRequest{Datacenter: "dc1"})
require.Nil(err)
require.Equal(cache.FetchResult{
require.Nil(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, result)
}
func TestConnectCARoot_badReqType(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &ConnectCARoot{RPC: rpc}
@ -51,7 +49,7 @@ func TestConnectCARoot_badReqType(t *testing.T) {
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.NotNil(err)
require.Contains(err.Error(), "wrong type")
require.NotNil(t, err)
require.Contains(t, err.Error(), "wrong type")
}

View File

@ -11,7 +11,6 @@ import (
)
func TestHealthServices(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &HealthServices{RPC: rpc}
@ -22,10 +21,10 @@ func TestHealthServices(t *testing.T) {
rpc.On("RPC", "Health.ServiceNodes", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.ServiceSpecificRequest)
require.Equal(uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal("web", req.ServiceName)
require.True(req.AllowStale)
require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal(t, "web", req.ServiceName)
require.True(t, req.AllowStale)
reply := args.Get(2).(*structs.IndexedCheckServiceNodes)
reply.Nodes = []structs.CheckServiceNode{
@ -44,15 +43,14 @@ func TestHealthServices(t *testing.T) {
ServiceName: "web",
ServiceTags: []string{"tag1", "tag2"},
})
require.NoError(err)
require.Equal(cache.FetchResult{
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, resultA)
}
func TestHealthServices_badReqType(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &HealthServices{RPC: rpc}
@ -60,7 +58,7 @@ func TestHealthServices_badReqType(t *testing.T) {
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(err)
require.Contains(err.Error(), "wrong type")
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
}

View File

@ -11,7 +11,6 @@ import (
)
func TestIntentionMatch(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &IntentionMatch{RPC: rpc}
@ -22,8 +21,8 @@ func TestIntentionMatch(t *testing.T) {
rpc.On("RPC", "Intention.Match", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.IntentionQueryRequest)
require.Equal(uint64(24), req.MinQueryIndex)
require.Equal(1*time.Second, req.MaxQueryTime)
require.Equal(t, uint64(24), req.MinQueryIndex)
require.Equal(t, 1*time.Second, req.MaxQueryTime)
reply := args.Get(2).(*structs.IndexedIntentionMatches)
reply.Index = 48
@ -35,15 +34,14 @@ func TestIntentionMatch(t *testing.T) {
MinIndex: 24,
Timeout: 1 * time.Second,
}, &structs.IntentionQueryRequest{Datacenter: "dc1"})
require.NoError(err)
require.Equal(cache.FetchResult{
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, result)
}
func TestIntentionMatch_badReqType(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &IntentionMatch{RPC: rpc}
@ -51,7 +49,7 @@ func TestIntentionMatch_badReqType(t *testing.T) {
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(err)
require.Contains(err.Error(), "wrong type")
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
}

View File

@ -11,7 +11,6 @@ import (
)
func TestNodeServices(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &NodeServices{RPC: rpc}
@ -22,10 +21,10 @@ func TestNodeServices(t *testing.T) {
rpc.On("RPC", "Catalog.NodeServices", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.NodeSpecificRequest)
require.Equal(uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal("node-01", req.Node)
require.True(req.AllowStale)
require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal(t, "node-01", req.Node)
require.True(t, req.AllowStale)
reply := args.Get(2).(*structs.IndexedNodeServices)
reply.NodeServices = &structs.NodeServices{
@ -49,15 +48,14 @@ func TestNodeServices(t *testing.T) {
Datacenter: "dc1",
Node: "node-01",
})
require.NoError(err)
require.Equal(cache.FetchResult{
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, resultA)
}
func TestNodeServices_badReqType(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &NodeServices{RPC: rpc}
@ -65,7 +63,7 @@ func TestNodeServices_badReqType(t *testing.T) {
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(err)
require.Contains(err.Error(), "wrong type")
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
}

View File

@ -10,7 +10,6 @@ import (
)
func TestPreparedQuery(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &PreparedQuery{RPC: rpc}
@ -21,9 +20,9 @@ func TestPreparedQuery(t *testing.T) {
rpc.On("RPC", "PreparedQuery.Execute", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.PreparedQueryExecuteRequest)
require.Equal("geo-db", req.QueryIDOrName)
require.Equal(10, req.Limit)
require.True(req.AllowStale)
require.Equal(t, "geo-db", req.QueryIDOrName)
require.Equal(t, 10, req.Limit)
require.True(t, req.AllowStale)
reply := args.Get(2).(*structs.PreparedQueryExecuteResponse)
reply.QueryMeta.Index = 48
@ -36,15 +35,14 @@ func TestPreparedQuery(t *testing.T) {
QueryIDOrName: "geo-db",
Limit: 10,
})
require.NoError(err)
require.Equal(cache.FetchResult{
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, result)
}
func TestPreparedQuery_badReqType(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &PreparedQuery{RPC: rpc}
@ -52,6 +50,6 @@ func TestPreparedQuery_badReqType(t *testing.T) {
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(err)
require.Contains(err.Error(), "wrong type")
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
}

View File

@ -11,7 +11,6 @@ import (
)
func TestResolvedServiceConfig(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &ResolvedServiceConfig{RPC: rpc}
@ -22,10 +21,10 @@ func TestResolvedServiceConfig(t *testing.T) {
rpc.On("RPC", "ConfigEntry.ResolveServiceConfig", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.ServiceConfigRequest)
require.Equal(uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal("foo", req.Name)
require.True(req.AllowStale)
require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal(t, "foo", req.Name)
require.True(t, req.AllowStale)
reply := args.Get(2).(*structs.ServiceConfigResponse)
reply.ProxyConfig = map[string]interface{}{
@ -49,15 +48,14 @@ func TestResolvedServiceConfig(t *testing.T) {
Datacenter: "dc1",
Name: "foo",
})
require.NoError(err)
require.Equal(cache.FetchResult{
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, resultA)
}
func TestResolvedServiceConfig_badReqType(t *testing.T) {
require := require.New(t)
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &ResolvedServiceConfig{RPC: rpc}
@ -65,7 +63,7 @@ func TestResolvedServiceConfig_badReqType(t *testing.T) {
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(err)
require.Contains(err.Error(), "wrong type")
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
}

View File

@ -24,8 +24,6 @@ import (
func TestCacheGet_noIndex(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := TestType(t)
defer typ.AssertExpectations(t)
c := New(Options{})
@ -37,15 +35,15 @@ func TestCacheGet_noIndex(t *testing.T) {
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Get, should not fetch since we already have a satisfying value
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.True(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.True(t, meta.Hit)
// Sleep a tiny bit just to let maybe some background calls happen
// then verify that we still only got the one call
@ -57,8 +55,6 @@ func TestCacheGet_noIndex(t *testing.T) {
func TestCacheGet_initError(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := TestType(t)
defer typ.AssertExpectations(t)
c := New(Options{})
@ -71,15 +67,15 @@ func TestCacheGet_initError(t *testing.T) {
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.Error(err)
require.Nil(result)
require.False(meta.Hit)
require.Error(t, err)
require.Nil(t, result)
require.False(t, meta.Hit)
// Get, should fetch again since our last fetch was an error
result, meta, err = c.Get(context.Background(), "t", req)
require.Error(err)
require.Nil(result)
require.False(meta.Hit)
require.Error(t, err)
require.Nil(t, result)
require.False(t, meta.Hit)
// Sleep a tiny bit just to let maybe some background calls happen
// then verify that we still only got the one call
@ -96,8 +92,6 @@ func TestCacheGet_cachedErrorsDontStick(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := TestType(t)
defer typ.AssertExpectations(t)
c := New(Options{})
@ -115,15 +109,15 @@ func TestCacheGet_cachedErrorsDontStick(t *testing.T) {
// Get, should fetch and get error
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.Error(err)
require.Nil(result)
require.False(meta.Hit)
require.Error(t, err)
require.Nil(t, result)
require.False(t, meta.Hit)
// Get, should fetch again since our last fetch was an error, but get success
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Now get should block until timeout and then get the same response NOT the
// cached error.
@ -157,8 +151,6 @@ func TestCacheGet_cachedErrorsDontStick(t *testing.T) {
func TestCacheGet_blankCacheKey(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := TestType(t)
defer typ.AssertExpectations(t)
c := New(Options{})
@ -170,15 +162,15 @@ func TestCacheGet_blankCacheKey(t *testing.T) {
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: ""})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Get, should not fetch since we already have a satisfying value
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Sleep a tiny bit just to let maybe some background calls happen
// then verify that we still only got the one call
@ -225,8 +217,6 @@ func TestCacheGet_blockingInitSameKey(t *testing.T) {
func TestCacheGet_blockingInitDiffKeys(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := TestType(t)
defer typ.AssertExpectations(t)
c := New(Options{})
@ -269,7 +259,7 @@ func TestCacheGet_blockingInitDiffKeys(t *testing.T) {
// Verify proper keys
sort.Strings(keys)
require.Equal([]string{"goodbye", "hello"}, keys)
require.Equal(t, []string{"goodbye", "hello"}, keys)
}
// Test a get with an index set will wait until an index that is higher
@ -414,8 +404,6 @@ func TestCacheGet_emptyFetchResult(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := TestType(t)
defer typ.AssertExpectations(t)
c := New(Options{})
@ -429,29 +417,29 @@ func TestCacheGet_emptyFetchResult(t *testing.T) {
typ.Static(FetchResult{Value: nil, State: 32}, nil).Run(func(args mock.Arguments) {
// We should get back the original state
opts := args.Get(0).(FetchOptions)
require.NotNil(opts.LastResult)
require.NotNil(t, opts.LastResult)
stateCh <- opts.LastResult.State.(int)
})
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Get, should not fetch since we already have a satisfying value
req = TestRequest(t, RequestInfo{
Key: "hello", MinIndex: 1, Timeout: 100 * time.Millisecond})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// State delivered to second call should be the result from first call.
select {
case state := <-stateCh:
require.Equal(31, state)
require.Equal(t, 31, state)
case <-time.After(20 * time.Millisecond):
t.Fatal("timed out")
}
@ -461,12 +449,12 @@ func TestCacheGet_emptyFetchResult(t *testing.T) {
req = TestRequest(t, RequestInfo{
Key: "hello", MinIndex: 1, Timeout: 100 * time.Millisecond})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
select {
case state := <-stateCh:
require.Equal(32, state)
require.Equal(t, 32, state)
case <-time.After(20 * time.Millisecond):
t.Fatal("timed out")
}
@ -737,8 +725,6 @@ func TestCacheGet_noIndexSetsOne(t *testing.T) {
func TestCacheGet_fetchTimeout(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := &MockType{}
timeout := 10 * time.Minute
typ.On("RegisterOptions").Return(RegisterOptions{
@ -761,12 +747,12 @@ func TestCacheGet_fetchTimeout(t *testing.T) {
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Test the timeout
require.Equal(timeout, actual)
require.Equal(t, timeout, actual)
}
// Test that entries expire
@ -777,8 +763,6 @@ func TestCacheGet_expire(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := &MockType{}
typ.On("RegisterOptions").Return(RegisterOptions{
LastGetTTL: 400 * time.Millisecond,
@ -795,9 +779,9 @@ func TestCacheGet_expire(t *testing.T) {
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Wait for a non-trivial amount of time to sanity check the age increases at
// least this amount. Note that this is not a fudge for some timing-dependent
@ -808,10 +792,10 @@ func TestCacheGet_expire(t *testing.T) {
// Get, should not fetch, verified via the mock assertions above
req = TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.True(meta.Hit)
require.True(meta.Age > 5*time.Millisecond)
require.NoError(t, err)
require.Equal(t, 42, result)
require.True(t, meta.Hit)
require.True(t, meta.Age > 5*time.Millisecond)
// Sleep for the expiry
time.Sleep(500 * time.Millisecond)
@ -819,9 +803,9 @@ func TestCacheGet_expire(t *testing.T) {
// Get, should fetch
req = TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Sleep a tiny bit just to let maybe some background calls happen then verify
// that we still only got the one call
@ -837,8 +821,6 @@ func TestCacheGet_expire(t *testing.T) {
func TestCacheGet_expireBackgroudRefreshCancel(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := &MockType{}
typ.On("RegisterOptions").Return(RegisterOptions{
LastGetTTL: 400 * time.Millisecond,
@ -879,18 +861,18 @@ func TestCacheGet_expireBackgroudRefreshCancel(t *testing.T) {
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(8, result)
require.Equal(uint64(4), meta.Index)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 8, result)
require.Equal(t, uint64(4), meta.Index)
require.False(t, meta.Hit)
// Get, should not fetch, verified via the mock assertions above
req = TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(8, result)
require.Equal(uint64(4), meta.Index)
require.True(meta.Hit)
require.NoError(t, err)
require.Equal(t, 8, result)
require.Equal(t, uint64(4), meta.Index)
require.True(t, meta.Hit)
// Sleep for the expiry
time.Sleep(500 * time.Millisecond)
@ -898,10 +880,10 @@ func TestCacheGet_expireBackgroudRefreshCancel(t *testing.T) {
// Get, should fetch
req = TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(8, result)
require.Equal(uint64(4), meta.Index)
require.False(meta.Hit, "the fetch should not have re-populated the cache "+
require.NoError(t, err)
require.Equal(t, 8, result)
require.Equal(t, uint64(4), meta.Index)
require.False(t, meta.Hit, "the fetch should not have re-populated the cache "+
"entry after it expired so this get should be a miss")
// Sleep a tiny bit just to let maybe some background calls happen
@ -915,8 +897,6 @@ func TestCacheGet_expireBackgroudRefreshCancel(t *testing.T) {
func TestCacheGet_expireBackgroudRefresh(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := &MockType{}
typ.On("RegisterOptions").Return(RegisterOptions{
LastGetTTL: 400 * time.Millisecond,
@ -948,18 +928,18 @@ func TestCacheGet_expireBackgroudRefresh(t *testing.T) {
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(8, result)
require.Equal(uint64(4), meta.Index)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 8, result)
require.Equal(t, uint64(4), meta.Index)
require.False(t, meta.Hit)
// Get, should not fetch, verified via the mock assertions above
req = TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(8, result)
require.Equal(uint64(4), meta.Index)
require.True(meta.Hit)
require.NoError(t, err)
require.Equal(t, 8, result)
require.Equal(t, uint64(4), meta.Index)
require.True(t, meta.Hit)
// Sleep for the expiry
time.Sleep(500 * time.Millisecond)
@ -971,10 +951,10 @@ func TestCacheGet_expireBackgroudRefresh(t *testing.T) {
// re-insert the value back into the cache and make it live forever).
req = TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(8, result)
require.Equal(uint64(4), meta.Index)
require.False(meta.Hit, "the fetch should not have re-populated the cache "+
require.NoError(t, err)
require.Equal(t, 8, result)
require.Equal(t, uint64(4), meta.Index)
require.False(t, meta.Hit, "the fetch should not have re-populated the cache "+
"entry after it expired so this get should be a miss")
// Sleep a tiny bit just to let maybe some background calls happen
@ -991,8 +971,6 @@ func TestCacheGet_expireResetGet(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := &MockType{}
typ.On("RegisterOptions").Return(RegisterOptions{
LastGetTTL: 150 * time.Millisecond,
@ -1009,9 +987,9 @@ func TestCacheGet_expireResetGet(t *testing.T) {
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Fetch multiple times, where the total time is well beyond
// the TTL. We should not trigger any fetches during this time.
@ -1022,9 +1000,9 @@ func TestCacheGet_expireResetGet(t *testing.T) {
// Get, should not fetch
req = TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.True(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.True(t, meta.Hit)
}
time.Sleep(200 * time.Millisecond)
@ -1032,9 +1010,9 @@ func TestCacheGet_expireResetGet(t *testing.T) {
// Get, should fetch
req = TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Sleep a tiny bit just to let maybe some background calls happen
// then verify that we still only got the one call
@ -1046,8 +1024,6 @@ func TestCacheGet_expireResetGet(t *testing.T) {
func TestCacheGet_expireResetGetNoChange(t *testing.T) {
t.Parallel()
require := require.New(t)
// Create a closer so we can tell if the entry gets evicted.
closer := &testCloser{}
@ -1080,19 +1056,19 @@ func TestCacheGet_expireResetGetNoChange(t *testing.T) {
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.Equal(uint64(10), meta.Index)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.Equal(t, uint64(10), meta.Index)
require.False(t, meta.Hit)
// Do a blocking watch of the value that won't time out until after the TTL.
start := time.Now()
req = TestRequest(t, RequestInfo{Key: "hello", MinIndex: 10, Timeout: 300 * time.Millisecond})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.Equal(uint64(10), meta.Index)
require.GreaterOrEqual(time.Since(start).Milliseconds(), int64(300))
require.NoError(t, err)
require.Equal(t, 42, result)
require.Equal(t, uint64(10), meta.Index)
require.GreaterOrEqual(t, time.Since(start).Milliseconds(), int64(300))
// This is the point of this test! Even though we waited for a change for
// longer than the TTL, we should have been updating the TTL so that the cache
@ -1100,7 +1076,7 @@ func TestCacheGet_expireResetGetNoChange(t *testing.T) {
// since that is not set for blocking Get calls but we can assert that the
// entry was never closed (which assuming the test for eviction closing is
// also passing is a reliable signal).
require.False(closer.isClosed(), "cache entry should not have been evicted")
require.False(t, closer.isClosed(), "cache entry should not have been evicted")
// Sleep a tiny bit just to let maybe some background calls happen
// then verify that we still only got the one call
@ -1116,8 +1092,6 @@ func TestCacheGet_expireClose(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := &MockType{}
defer typ.AssertExpectations(t)
c := New(Options{})
@ -1137,16 +1111,16 @@ func TestCacheGet_expireClose(t *testing.T) {
ctx := context.Background()
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(ctx, "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.False(state.isClosed())
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
require.False(t, state.isClosed())
// Sleep for the expiry
time.Sleep(200 * time.Millisecond)
// state.Close() should have been called
require.True(state.isClosed())
require.True(t, state.isClosed())
}
type testCloser struct {
@ -1171,8 +1145,6 @@ func (t *testCloser) isClosed() bool {
func TestCacheGet_duplicateKeyDifferentType(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := TestType(t)
defer typ.AssertExpectations(t)
typ2 := TestType(t)
@ -1189,23 +1161,23 @@ func TestCacheGet_duplicateKeyDifferentType(t *testing.T) {
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "foo"})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(100, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 100, result)
require.False(t, meta.Hit)
// Get from t2 with same key, should fetch
req = TestRequest(t, RequestInfo{Key: "foo"})
result, meta, err = c.Get(context.Background(), "t2", req)
require.NoError(err)
require.Equal(200, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 200, result)
require.False(t, meta.Hit)
// Get from t again with same key, should cache
req = TestRequest(t, RequestInfo{Key: "foo"})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(100, result)
require.True(meta.Hit)
require.NoError(t, err)
require.Equal(t, 100, result)
require.True(t, meta.Hit)
// Sleep a tiny bit just to let maybe some background calls happen
// then verify that we still only got the one call
@ -1283,8 +1255,6 @@ func TestCacheGet_refreshAge(t *testing.T) {
}
t.Parallel()
require := require.New(t)
typ := &MockType{}
typ.On("RegisterOptions").Return(RegisterOptions{
Refresh: true,
@ -1330,11 +1300,11 @@ func TestCacheGet_refreshAge(t *testing.T) {
// Fetch again, non-blocking
result, meta, err := c.Get(context.Background(), "t", TestRequest(t, RequestInfo{Key: "hello"}))
require.NoError(err)
require.Equal(8, result)
require.True(meta.Hit)
require.NoError(t, err)
require.Equal(t, 8, result)
require.True(t, meta.Hit)
// Age should be zero since background refresh was "active"
require.Equal(time.Duration(0), meta.Age)
require.Equal(t, time.Duration(0), meta.Age)
}
// Now fail the next background sync
@ -1350,21 +1320,21 @@ func TestCacheGet_refreshAge(t *testing.T) {
var lastAge time.Duration
{
result, meta, err := c.Get(context.Background(), "t", TestRequest(t, RequestInfo{Key: "hello"}))
require.NoError(err)
require.Equal(8, result)
require.True(meta.Hit)
require.NoError(t, err)
require.Equal(t, 8, result)
require.True(t, meta.Hit)
// Age should be non-zero since background refresh was "active"
require.True(meta.Age > 0)
require.True(t, meta.Age > 0)
lastAge = meta.Age
}
// Wait a bit longer - age should increase by at least this much
time.Sleep(5 * time.Millisecond)
{
result, meta, err := c.Get(context.Background(), "t", TestRequest(t, RequestInfo{Key: "hello"}))
require.NoError(err)
require.Equal(8, result)
require.True(meta.Hit)
require.True(meta.Age > (lastAge + (1 * time.Millisecond)))
require.NoError(t, err)
require.Equal(t, 8, result)
require.True(t, meta.Hit)
require.True(t, meta.Age > (lastAge+(1*time.Millisecond)))
}
// Now unfail the background refresh
@ -1384,18 +1354,18 @@ func TestCacheGet_refreshAge(t *testing.T) {
time.Sleep(100 * time.Millisecond)
result, meta, err := c.Get(context.Background(), "t", TestRequest(t, RequestInfo{Key: "hello"}))
// Should never error even if background is failing as we have cached value
require.NoError(err)
require.True(meta.Hit)
require.NoError(t, err)
require.True(t, meta.Hit)
// Got the new value!
if result == 10 {
// Age should be zero since background refresh is "active" again
t.Logf("Succeeded after %d attempts", attempts)
require.Equal(time.Duration(0), meta.Age)
require.Equal(t, time.Duration(0), meta.Age)
timeout = false
break
}
}
require.False(timeout, "failed to observe update after %s", time.Since(t0))
require.False(t, timeout, "failed to observe update after %s", time.Since(t0))
}
func TestCacheGet_nonRefreshAge(t *testing.T) {
@ -1405,8 +1375,6 @@ func TestCacheGet_nonRefreshAge(t *testing.T) {
t.Parallel()
require := require.New(t)
typ := &MockType{}
typ.On("RegisterOptions").Return(RegisterOptions{
Refresh: false,
@ -1440,10 +1408,10 @@ func TestCacheGet_nonRefreshAge(t *testing.T) {
// Fetch again, non-blocking
result, meta, err := c.Get(context.Background(), "t", TestRequest(t, RequestInfo{Key: "hello"}))
require.NoError(err)
require.Equal(8, result)
require.True(meta.Hit)
require.True(meta.Age > (5 * time.Millisecond))
require.NoError(t, err)
require.Equal(t, 8, result)
require.True(t, meta.Hit)
require.True(t, meta.Age > (5*time.Millisecond))
lastAge = meta.Age
}
@ -1452,11 +1420,11 @@ func TestCacheGet_nonRefreshAge(t *testing.T) {
{
result, meta, err := c.Get(context.Background(), "t", TestRequest(t, RequestInfo{Key: "hello"}))
require.NoError(err)
require.Equal(8, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 8, result)
require.False(t, meta.Hit)
// Age should smaller again
require.True(meta.Age < lastAge)
require.True(t, meta.Age < lastAge)
}
{
@ -1468,10 +1436,10 @@ func TestCacheGet_nonRefreshAge(t *testing.T) {
// Fetch again, non-blocking
result, meta, err := c.Get(context.Background(), "t", TestRequest(t, RequestInfo{Key: "hello"}))
require.NoError(err)
require.Equal(8, result)
require.True(meta.Hit)
require.True(meta.Age > (5 * time.Millisecond))
require.NoError(t, err)
require.Equal(t, 8, result)
require.True(t, meta.Hit)
require.True(t, meta.Age > (5*time.Millisecond))
lastAge = meta.Age
}
@ -1481,11 +1449,11 @@ func TestCacheGet_nonRefreshAge(t *testing.T) {
Key: "hello",
MaxAge: 1 * time.Millisecond,
}))
require.NoError(err)
require.Equal(8, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 8, result)
require.False(t, meta.Hit)
// Age should smaller again
require.True(meta.Age < lastAge)
require.True(t, meta.Age < lastAge)
}
}
@ -1505,21 +1473,19 @@ func TestCacheGet_nonBlockingType(t *testing.T) {
require.Equal(t, uint64(0), opts.MinIndex)
})
require := require.New(t)
// Get, should fetch
req := TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err := c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.False(t, meta.Hit)
// Get, should not fetch since we have a cached value
req = TestRequest(t, RequestInfo{Key: "hello"})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.True(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.True(t, meta.Hit)
// Get, should not attempt to fetch with blocking even if requested. The
// assertions below about the value being the same combined with the fact the
@ -1531,25 +1497,25 @@ func TestCacheGet_nonBlockingType(t *testing.T) {
Timeout: 10 * time.Minute,
})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(42, result)
require.True(meta.Hit)
require.NoError(t, err)
require.Equal(t, 42, result)
require.True(t, meta.Hit)
time.Sleep(10 * time.Millisecond)
// Get with a max age should fetch again
req = TestRequest(t, RequestInfo{Key: "hello", MaxAge: 5 * time.Millisecond})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(43, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 43, result)
require.False(t, meta.Hit)
// Get with a must revalidate should fetch again even without a delay.
req = TestRequest(t, RequestInfo{Key: "hello", MustRevalidate: true})
result, meta, err = c.Get(context.Background(), "t", req)
require.NoError(err)
require.Equal(43, result)
require.False(meta.Hit)
require.NoError(t, err)
require.Equal(t, 43, result)
require.False(t, meta.Hit)
// Sleep a tiny bit just to let maybe some background calls happen
// then verify that we still only got the one call

View File

@ -51,15 +51,13 @@ func TestCacheNotify(t *testing.T) {
// after cancellation as if it had timed out.
typ.Static(FetchResult{Value: 42, Index: 8}, nil).WaitUntil(trigger[4])
require := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := make(chan UpdateEvent)
err := c.Notify(ctx, "t", TestRequest(t, RequestInfo{Key: "hello"}), "test", ch)
require.NoError(err)
require.NoError(t, err)
// Should receive the error with index == 0 first.
TestCacheNotifyChResult(t, ch, UpdateEvent{
@ -70,7 +68,7 @@ func TestCacheNotify(t *testing.T) {
})
// There should be no more updates delivered yet
require.Len(ch, 0)
require.Len(t, ch, 0)
// Trigger blocking query to return a "change"
close(trigger[0])
@ -102,7 +100,7 @@ func TestCacheNotify(t *testing.T) {
// requests to the "backend"
// - that multiple watchers can distinguish their results using correlationID
err = c.Notify(ctx, "t", TestRequest(t, RequestInfo{Key: "hello"}), "test2", ch)
require.NoError(err)
require.NoError(t, err)
// Should get test2 notify immediately, and it should be a cache hit
TestCacheNotifyChResult(t, ch, UpdateEvent{
@ -121,7 +119,7 @@ func TestCacheNotify(t *testing.T) {
// it's only a sanity check, if we somehow _do_ get the change delivered later
// than 10ms the next value assertion will fail anyway.
time.Sleep(10 * time.Millisecond)
require.Len(ch, 0)
require.Len(t, ch, 0)
// Trigger final update
close(trigger[3])
@ -183,15 +181,13 @@ func TestCacheNotifyPolling(t *testing.T) {
typ.Static(FetchResult{Value: 12, Index: 1}, nil).Once()
typ.Static(FetchResult{Value: 42, Index: 1}, nil).Once()
require := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := make(chan UpdateEvent)
err := c.Notify(ctx, "t", TestRequest(t, RequestInfo{Key: "hello", MaxAge: 100 * time.Millisecond}), "test", ch)
require.NoError(err)
require.NoError(t, err)
// Should receive the first result pretty soon
TestCacheNotifyChResult(t, ch, UpdateEvent{
@ -202,32 +198,32 @@ func TestCacheNotifyPolling(t *testing.T) {
})
// There should be no more updates delivered yet
require.Len(ch, 0)
require.Len(t, ch, 0)
// make sure the updates do not come too quickly
select {
case <-time.After(50 * time.Millisecond):
case <-ch:
require.Fail("Received update too early")
require.Fail(t, "Received update too early")
}
// make sure we get the update not too far out.
select {
case <-time.After(100 * time.Millisecond):
require.Fail("Didn't receive the notification")
require.Fail(t, "Didn't receive the notification")
case result := <-ch:
require.Equal(result.Result, 12)
require.Equal(result.CorrelationID, "test")
require.Equal(result.Meta.Hit, false)
require.Equal(result.Meta.Index, uint64(1))
require.Equal(t, result.Result, 12)
require.Equal(t, result.CorrelationID, "test")
require.Equal(t, result.Meta.Hit, false)
require.Equal(t, result.Meta.Index, uint64(1))
// pretty conservative check it should be even newer because without a second
// notifier each value returned will have been executed just then and not served
// from the cache.
require.True(result.Meta.Age < 50*time.Millisecond)
require.NoError(result.Err)
require.True(t, result.Meta.Age < 50*time.Millisecond)
require.NoError(t, result.Err)
}
require.Len(ch, 0)
require.Len(t, ch, 0)
// Register a second observer using same chan and request. Note that this is
// testing a few things implicitly:
@ -235,7 +231,7 @@ func TestCacheNotifyPolling(t *testing.T) {
// requests to the "backend"
// - that multiple watchers can distinguish their results using correlationID
err = c.Notify(ctx, "t", TestRequest(t, RequestInfo{Key: "hello", MaxAge: 100 * time.Millisecond}), "test2", ch)
require.NoError(err)
require.NoError(t, err)
// Should get test2 notify immediately, and it should be a cache hit
TestCacheNotifyChResult(t, ch, UpdateEvent{
@ -245,7 +241,7 @@ func TestCacheNotifyPolling(t *testing.T) {
Err: nil,
})
require.Len(ch, 0)
require.Len(t, ch, 0)
// wait for the next batch of responses
events := make([]UpdateEvent, 0)
@ -255,25 +251,25 @@ func TestCacheNotifyPolling(t *testing.T) {
for i := 0; i < 2; i++ {
select {
case <-timeout:
require.Fail("UpdateEvent not received in time")
require.Fail(t, "UpdateEvent not received in time")
case eve := <-ch:
events = append(events, eve)
}
}
require.Equal(events[0].Result, 42)
require.Equal(events[0].Meta.Hit && events[1].Meta.Hit, false)
require.Equal(events[0].Meta.Index, uint64(1))
require.True(events[0].Meta.Age < 50*time.Millisecond)
require.NoError(events[0].Err)
require.Equal(events[1].Result, 42)
require.Equal(t, events[0].Result, 42)
require.Equal(t, events[0].Meta.Hit && events[1].Meta.Hit, false)
require.Equal(t, events[0].Meta.Index, uint64(1))
require.True(t, events[0].Meta.Age < 50*time.Millisecond)
require.NoError(t, events[0].Err)
require.Equal(t, events[1].Result, 42)
// Sometimes this would be a hit and others not. It all depends on when the various getWithIndex calls got fired.
// If both are done concurrently then it will not be a cache hit but the request gets single flighted and both
// get notified at the same time.
// require.Equal(events[1].Meta.Hit, true)
require.Equal(events[1].Meta.Index, uint64(1))
require.True(events[1].Meta.Age < 100*time.Millisecond)
require.NoError(events[1].Err)
// require.Equal(t,events[1].Meta.Hit, true)
require.Equal(t, events[1].Meta.Index, uint64(1))
require.True(t, events[1].Meta.Age < 100*time.Millisecond)
require.NoError(t, events[1].Err)
}
// Test that a refresh performs a backoff.
@ -298,15 +294,13 @@ func TestCacheWatch_ErrorBackoff(t *testing.T) {
atomic.AddUint32(&retries, 1)
})
require := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := make(chan UpdateEvent)
err := c.Notify(ctx, "t", TestRequest(t, RequestInfo{Key: "hello"}), "test", ch)
require.NoError(err)
require.NoError(t, err)
// Should receive the first result pretty soon
TestCacheNotifyChResult(t, ch, UpdateEvent{
@ -331,15 +325,15 @@ OUT:
break OUT
case u := <-ch:
numErrors++
require.Error(u.Err)
require.Error(t, u.Err)
}
}
// Must be fewer than 10 failures in that time
require.True(numErrors < 10, fmt.Sprintf("numErrors: %d", numErrors))
require.True(t, numErrors < 10, fmt.Sprintf("numErrors: %d", numErrors))
// Check the number of RPCs as a sanity check too
actual := atomic.LoadUint32(&retries)
require.True(actual < 10, fmt.Sprintf("actual: %d", actual))
require.True(t, actual < 10, fmt.Sprintf("actual: %d", actual))
}
// Test that a refresh performs a backoff.
@ -363,15 +357,13 @@ func TestCacheWatch_ErrorBackoffNonBlocking(t *testing.T) {
atomic.AddUint32(&retries, 1)
})
require := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := make(chan UpdateEvent)
err := c.Notify(ctx, "t", TestRequest(t, RequestInfo{Key: "hello", MaxAge: 100 * time.Millisecond}), "test", ch)
require.NoError(err)
require.NoError(t, err)
// Should receive the first result pretty soon
TestCacheNotifyChResult(t, ch, UpdateEvent{
@ -399,13 +391,13 @@ OUT:
break OUT
case u := <-ch:
numErrors++
require.Error(u.Err)
require.Error(t, u.Err)
}
}
// Must be fewer than 10 failures in that time
require.True(numErrors < 10, fmt.Sprintf("numErrors: %d", numErrors))
require.True(t, numErrors < 10, fmt.Sprintf("numErrors: %d", numErrors))
// Check the number of RPCs as a sanity check too
actual := atomic.LoadUint32(&retries)
require.True(actual < 10, fmt.Sprintf("actual: %d", actual))
require.True(t, actual < 10, fmt.Sprintf("actual: %d", actual))
}

View File

@ -3,7 +3,6 @@ package agent
import (
"fmt"
"net/http"
"strings"
metrics "github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
@ -362,7 +361,11 @@ func (s *HTTPHandlers) catalogServiceNodes(resp http.ResponseWriter, req *http.R
}
// Pull out the service name
args.ServiceName = strings.TrimPrefix(req.URL.Path, pathPrefix)
var err error
args.ServiceName, err = getPathSuffixUnescaped(req.URL.Path, pathPrefix)
if err != nil {
return nil, err
}
if args.ServiceName == "" {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Missing service name")
@ -435,7 +438,11 @@ func (s *HTTPHandlers) CatalogNodeServices(resp http.ResponseWriter, req *http.R
}
// Pull out the node name
args.Node = strings.TrimPrefix(req.URL.Path, "/v1/catalog/node/")
var err error
args.Node, err = getPathSuffixUnescaped(req.URL.Path, "/v1/catalog/node/")
if err != nil {
return nil, err
}
if args.Node == "" {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Missing node name")
@ -498,7 +505,11 @@ func (s *HTTPHandlers) CatalogNodeServiceList(resp http.ResponseWriter, req *htt
}
// Pull out the node name
args.Node = strings.TrimPrefix(req.URL.Path, "/v1/catalog/node-services/")
var err error
args.Node, err = getPathSuffixUnescaped(req.URL.Path, "/v1/catalog/node-services/")
if err != nil {
return nil, err
}
if args.Node == "" {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Missing node name")
@ -547,7 +558,11 @@ func (s *HTTPHandlers) CatalogGatewayServices(resp http.ResponseWriter, req *htt
}
// Pull out the gateway's service name
args.ServiceName = strings.TrimPrefix(req.URL.Path, "/v1/catalog/gateway-services/")
var err error
args.ServiceName, err = getPathSuffixUnescaped(req.URL.Path, "/v1/catalog/gateway-services/")
if err != nil {
return nil, err
}
if args.ServiceName == "" {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Missing gateway name")

View File

@ -635,9 +635,6 @@ func TestCatalogServiceNodes(t *testing.T) {
a := NewTestAgent(t, "")
defer a.Shutdown()
assert := assert.New(t)
require := require.New(t)
// Make sure an empty list is returned, not a nil
{
req, _ := http.NewRequest("GET", "/v1/catalog/service/api?tag=a", nil)
@ -691,12 +688,12 @@ func TestCatalogServiceNodes(t *testing.T) {
req, _ := http.NewRequest("GET", "/v1/catalog/service/api?cached", nil)
resp := httptest.NewRecorder()
obj, err := a.srv.CatalogServiceNodes(resp, req)
require.NoError(err)
require.NoError(t, err)
nodes := obj.(structs.ServiceNodes)
assert.Len(nodes, 1)
assert.Len(t, nodes, 1)
// Should be a cache miss
assert.Equal("MISS", resp.Header().Get("X-Cache"))
assert.Equal(t, "MISS", resp.Header().Get("X-Cache"))
}
{
@ -704,13 +701,13 @@ func TestCatalogServiceNodes(t *testing.T) {
req, _ := http.NewRequest("GET", "/v1/catalog/service/api?cached", nil)
resp := httptest.NewRecorder()
obj, err := a.srv.CatalogServiceNodes(resp, req)
require.NoError(err)
require.NoError(t, err)
nodes := obj.(structs.ServiceNodes)
assert.Len(nodes, 1)
assert.Len(t, nodes, 1)
// Should be a cache HIT now!
assert.Equal("HIT", resp.Header().Get("X-Cache"))
assert.Equal("0", resp.Header().Get("Age"))
assert.Equal(t, "HIT", resp.Header().Get("X-Cache"))
assert.Equal(t, "0", resp.Header().Get("Age"))
}
// Ensure background refresh works
@ -719,7 +716,7 @@ func TestCatalogServiceNodes(t *testing.T) {
args2 := args
args2.Node = "bar"
args2.Address = "127.0.0.2"
require.NoError(a.RPC("Catalog.Register", args, &out))
require.NoError(t, a.RPC("Catalog.Register", args, &out))
retry.Run(t, func(r *retry.R) {
// List it again
@ -1057,7 +1054,6 @@ func TestCatalogServiceNodes_ConnectProxy(t *testing.T) {
t.Parallel()
assert := assert.New(t)
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
@ -1065,19 +1061,19 @@ func TestCatalogServiceNodes_ConnectProxy(t *testing.T) {
// Register
args := structs.TestRegisterRequestProxy(t)
var out struct{}
assert.Nil(a.RPC("Catalog.Register", args, &out))
assert.Nil(t, a.RPC("Catalog.Register", args, &out))
req, _ := http.NewRequest("GET", fmt.Sprintf(
"/v1/catalog/service/%s", args.Service.Service), nil)
resp := httptest.NewRecorder()
obj, err := a.srv.CatalogServiceNodes(resp, req)
assert.Nil(err)
assert.Nil(t, err)
assertIndex(t, resp)
nodes := obj.(structs.ServiceNodes)
assert.Len(nodes, 1)
assert.Equal(structs.ServiceKindConnectProxy, nodes[0].ServiceKind)
assert.Equal(args.Service.Proxy, nodes[0].ServiceProxy)
assert.Len(t, nodes, 1)
assert.Equal(t, structs.ServiceKindConnectProxy, nodes[0].ServiceKind)
assert.Equal(t, args.Service.Proxy, nodes[0].ServiceProxy)
}
// Test that the Connect-compatible endpoints can be queried for a
@ -1089,7 +1085,6 @@ func TestCatalogConnectServiceNodes_good(t *testing.T) {
t.Parallel()
assert := assert.New(t)
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
@ -1098,20 +1093,20 @@ func TestCatalogConnectServiceNodes_good(t *testing.T) {
args := structs.TestRegisterRequestProxy(t)
args.Service.Address = "127.0.0.55"
var out struct{}
assert.Nil(a.RPC("Catalog.Register", args, &out))
assert.Nil(t, a.RPC("Catalog.Register", args, &out))
req, _ := http.NewRequest("GET", fmt.Sprintf(
"/v1/catalog/connect/%s", args.Service.Proxy.DestinationServiceName), nil)
resp := httptest.NewRecorder()
obj, err := a.srv.CatalogConnectServiceNodes(resp, req)
assert.Nil(err)
assert.Nil(t, err)
assertIndex(t, resp)
nodes := obj.(structs.ServiceNodes)
assert.Len(nodes, 1)
assert.Equal(structs.ServiceKindConnectProxy, nodes[0].ServiceKind)
assert.Equal(args.Service.Address, nodes[0].ServiceAddress)
assert.Equal(args.Service.Proxy, nodes[0].ServiceProxy)
assert.Len(t, nodes, 1)
assert.Equal(t, structs.ServiceKindConnectProxy, nodes[0].ServiceKind)
assert.Equal(t, args.Service.Address, nodes[0].ServiceAddress)
assert.Equal(t, args.Service.Proxy, nodes[0].ServiceProxy)
}
func TestCatalogConnectServiceNodes_Filter(t *testing.T) {
@ -1307,7 +1302,6 @@ func TestCatalogNodeServices_ConnectProxy(t *testing.T) {
t.Parallel()
assert := assert.New(t)
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
@ -1315,19 +1309,19 @@ func TestCatalogNodeServices_ConnectProxy(t *testing.T) {
// Register
args := structs.TestRegisterRequestProxy(t)
var out struct{}
assert.Nil(a.RPC("Catalog.Register", args, &out))
assert.Nil(t, a.RPC("Catalog.Register", args, &out))
req, _ := http.NewRequest("GET", fmt.Sprintf(
"/v1/catalog/node/%s", args.Node), nil)
resp := httptest.NewRecorder()
obj, err := a.srv.CatalogNodeServices(resp, req)
assert.Nil(err)
assert.Nil(t, err)
assertIndex(t, resp)
ns := obj.(*structs.NodeServices)
assert.Len(ns.Services, 1)
assert.Len(t, ns.Services, 1)
v := ns.Services[args.Service.Service]
assert.Equal(structs.ServiceKindConnectProxy, v.Kind)
assert.Equal(t, structs.ServiceKindConnectProxy, v.Kind)
}
func TestCatalogNodeServices_WanTranslation(t *testing.T) {

View File

@ -1631,7 +1631,7 @@ func (b *builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition {
meta := make(map[string]string)
if err := structs.ValidateServiceMetadata(kind, v.Meta, false); err != nil {
b.err = multierror.Append(fmt.Errorf("invalid meta for service %s: %v", stringVal(v.Name), err))
b.err = multierror.Append(b.err, fmt.Errorf("invalid meta for service %s: %v", stringVal(v.Name), err))
} else {
meta = v.Meta
}
@ -1646,13 +1646,13 @@ func (b *builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition {
}
if err := structs.ValidateWeights(serviceWeights); err != nil {
b.err = multierror.Append(fmt.Errorf("Invalid weight definition for service %s: %s", stringVal(v.Name), err))
b.err = multierror.Append(b.err, fmt.Errorf("Invalid weight definition for service %s: %s", stringVal(v.Name), err))
}
if (v.Port != nil || v.Address != nil) && (v.SocketPath != nil) {
b.err = multierror.Append(
b.err = multierror.Append(b.err,
fmt.Errorf("service %s cannot have both socket path %s and address/port",
stringVal(v.Name), stringVal(v.SocketPath)), b.err)
stringVal(v.Name), stringVal(v.SocketPath)))
}
return &structs.ServiceDefinition{
@ -1890,7 +1890,7 @@ func (b *builder) durationValWithDefault(name string, v *string, defaultVal time
}
d, err := time.ParseDuration(*v)
if err != nil {
b.err = multierror.Append(fmt.Errorf("%s: invalid duration: %q: %s", name, *v, err))
b.err = multierror.Append(b.err, fmt.Errorf("%s: invalid duration: %q: %s", name, *v, err))
}
return d
}

View File

@ -291,3 +291,39 @@ func TestLoad_EmptyClientAddr(t *testing.T) {
})
}
}
func TestBuilder_DurationVal_InvalidDuration(t *testing.T) {
b := builder{}
badDuration1 := "not-a-duration"
badDuration2 := "also-not"
b.durationVal("field1", &badDuration1)
b.durationVal("field1", &badDuration2)
require.Error(t, b.err)
require.Contains(t, b.err.Error(), "2 errors")
require.Contains(t, b.err.Error(), badDuration1)
require.Contains(t, b.err.Error(), badDuration2)
}
func TestBuilder_ServiceVal_MultiError(t *testing.T) {
b := builder{}
b.serviceVal(&ServiceDefinition{
Meta: map[string]string{"": "empty-key"},
Port: intPtr(12345),
SocketPath: strPtr("/var/run/socket.sock"),
Checks: []CheckDefinition{
{Interval: strPtr("bad-interval")},
},
Weights: &ServiceWeights{Passing: intPtr(-1)},
})
require.Error(t, b.err)
require.Contains(t, b.err.Error(), "4 errors")
require.Contains(t, b.err.Error(), "bad-interval")
require.Contains(t, b.err.Error(), "Key cannot be blank")
require.Contains(t, b.err.Error(), "Invalid weight")
require.Contains(t, b.err.Error(), "cannot have both socket path")
}
func intPtr(v int) *int {
return &v
}

View File

@ -88,7 +88,6 @@ enable_acl_replication = true
func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) {
t.Run("top-level fields", func(t *testing.T) {
require := require.New(t)
opts := LoadOpts{
HCL: []string{`
@ -101,21 +100,20 @@ func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) {
patchLoadOptsShims(&opts)
result, err := Load(opts)
require.NoError(err)
require.NoError(t, err)
expectWarns := []string{
deprecationWarning("acl_master_token", "acl.tokens.initial_management"),
deprecationWarning("acl_agent_master_token", "acl.tokens.agent_recovery"),
}
require.ElementsMatch(expectWarns, result.Warnings)
require.ElementsMatch(t, expectWarns, result.Warnings)
rt := result.RuntimeConfig
require.Equal("token1", rt.ACLInitialManagementToken)
require.Equal("token2", rt.ACLTokens.ACLAgentRecoveryToken)
require.Equal(t, "token1", rt.ACLInitialManagementToken)
require.Equal(t, "token2", rt.ACLTokens.ACLAgentRecoveryToken)
})
t.Run("embedded in tokens struct", func(t *testing.T) {
require := require.New(t)
opts := LoadOpts{
HCL: []string{`
@ -132,21 +130,20 @@ func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) {
patchLoadOptsShims(&opts)
result, err := Load(opts)
require.NoError(err)
require.NoError(t, err)
expectWarns := []string{
deprecationWarning("acl.tokens.master", "acl.tokens.initial_management"),
deprecationWarning("acl.tokens.agent_master", "acl.tokens.agent_recovery"),
}
require.ElementsMatch(expectWarns, result.Warnings)
require.ElementsMatch(t, expectWarns, result.Warnings)
rt := result.RuntimeConfig
require.Equal("token1", rt.ACLInitialManagementToken)
require.Equal("token2", rt.ACLTokens.ACLAgentRecoveryToken)
require.Equal(t, "token1", rt.ACLInitialManagementToken)
require.Equal(t, "token2", rt.ACLTokens.ACLAgentRecoveryToken)
})
t.Run("both", func(t *testing.T) {
require := require.New(t)
opts := LoadOpts{
HCL: []string{`
@ -166,10 +163,10 @@ func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) {
patchLoadOptsShims(&opts)
result, err := Load(opts)
require.NoError(err)
require.NoError(t, err)
rt := result.RuntimeConfig
require.Equal("token3", rt.ACLInitialManagementToken)
require.Equal("token4", rt.ACLTokens.ACLAgentRecoveryToken)
require.Equal(t, "token3", rt.ACLInitialManagementToken)
require.Equal(t, "token4", rt.ACLTokens.ACLAgentRecoveryToken)
})
}

View File

@ -32,7 +32,11 @@ func (s *HTTPHandlers) configGet(resp http.ResponseWriter, req *http.Request) (i
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
pathArgs := strings.SplitN(strings.TrimPrefix(req.URL.Path, "/v1/config/"), "/", 2)
kindAndName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/config/")
if err != nil {
return nil, err
}
pathArgs := strings.SplitN(kindAndName, "/", 2)
switch len(pathArgs) {
case 2:
@ -79,7 +83,11 @@ func (s *HTTPHandlers) configDelete(resp http.ResponseWriter, req *http.Request)
var args structs.ConfigEntryRequest
s.parseDC(req, &args.Datacenter)
s.parseToken(req, &args.Token)
pathArgs := strings.SplitN(strings.TrimPrefix(req.URL.Path, "/v1/config/"), "/", 2)
kindAndName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/config/")
if err != nil {
return nil, err
}
pathArgs := strings.SplitN(kindAndName, "/", 2)
if len(pathArgs) != 2 {
resp.WriteHeader(http.StatusNotFound)

View File

@ -149,7 +149,6 @@ func TestConfig_Delete(t *testing.T) {
t.Parallel()
require := require.New(t)
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
@ -171,7 +170,7 @@ func TestConfig_Delete(t *testing.T) {
}
for _, req := range reqs {
out := false
require.NoError(a.RPC("ConfigEntry.Apply", &req, &out))
require.NoError(t, a.RPC("ConfigEntry.Apply", &req, &out))
}
// Delete an entry.
@ -179,7 +178,7 @@ func TestConfig_Delete(t *testing.T) {
req, _ := http.NewRequest("DELETE", "/v1/config/service-defaults/bar", nil)
resp := httptest.NewRecorder()
_, err := a.srv.Config(resp, req)
require.NoError(err)
require.NoError(t, err)
}
// Get the remaining entry.
{
@ -188,11 +187,11 @@ func TestConfig_Delete(t *testing.T) {
Datacenter: "dc1",
}
var out structs.IndexedConfigEntries
require.NoError(a.RPC("ConfigEntry.List", &args, &out))
require.Equal(structs.ServiceDefaults, out.Kind)
require.Len(out.Entries, 1)
require.NoError(t, a.RPC("ConfigEntry.List", &args, &out))
require.Equal(t, structs.ServiceDefaults, out.Kind)
require.Len(t, out.Entries, 1)
entry := out.Entries[0].(*structs.ServiceConfigEntry)
require.Equal(entry.Name, "foo")
require.Equal(t, entry.Name, "foo")
}
}
@ -202,8 +201,6 @@ func TestConfig_Delete_CAS(t *testing.T) {
}
t.Parallel()
require := require.New(t)
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
@ -214,20 +211,20 @@ func TestConfig_Delete_CAS(t *testing.T) {
Name: "foo",
}
var created bool
require.NoError(a.RPC("ConfigEntry.Apply", &structs.ConfigEntryRequest{
require.NoError(t, a.RPC("ConfigEntry.Apply", &structs.ConfigEntryRequest{
Datacenter: "dc1",
Entry: entry,
}, &created))
require.True(created)
require.True(t, created)
// Read it back to get its ModifyIndex.
var out structs.ConfigEntryResponse
require.NoError(a.RPC("ConfigEntry.Get", &structs.ConfigEntryQuery{
require.NoError(t, a.RPC("ConfigEntry.Get", &structs.ConfigEntryQuery{
Datacenter: "dc1",
Kind: entry.Kind,
Name: entry.Name,
}, &out))
require.NotNil(out.Entry)
require.NotNil(t, out.Entry)
modifyIndex := out.Entry.GetRaftIndex().ModifyIndex
@ -238,20 +235,20 @@ func TestConfig_Delete_CAS(t *testing.T) {
nil,
)
rawRsp, err := a.srv.Config(httptest.NewRecorder(), req)
require.NoError(err)
require.NoError(t, err)
deleted, isBool := rawRsp.(bool)
require.True(isBool, "response should be a boolean")
require.False(deleted, "entry should not have been deleted")
require.True(t, isBool, "response should be a boolean")
require.False(t, deleted, "entry should not have been deleted")
// Verify it was not deleted.
var out structs.ConfigEntryResponse
require.NoError(a.RPC("ConfigEntry.Get", &structs.ConfigEntryQuery{
require.NoError(t, a.RPC("ConfigEntry.Get", &structs.ConfigEntryQuery{
Datacenter: "dc1",
Kind: entry.Kind,
Name: entry.Name,
}, &out))
require.NotNil(out.Entry)
require.NotNil(t, out.Entry)
})
t.Run("attempt to delete with a valid index", func(t *testing.T) {
@ -261,20 +258,20 @@ func TestConfig_Delete_CAS(t *testing.T) {
nil,
)
rawRsp, err := a.srv.Config(httptest.NewRecorder(), req)
require.NoError(err)
require.NoError(t, err)
deleted, isBool := rawRsp.(bool)
require.True(isBool, "response should be a boolean")
require.True(deleted, "entry should have been deleted")
require.True(t, isBool, "response should be a boolean")
require.True(t, deleted, "entry should have been deleted")
// Verify it was deleted.
var out structs.ConfigEntryResponse
require.NoError(a.RPC("ConfigEntry.Get", &structs.ConfigEntryQuery{
require.NoError(t, a.RPC("ConfigEntry.Get", &structs.ConfigEntryQuery{
Datacenter: "dc1",
Kind: entry.Kind,
Name: entry.Name,
}, &out))
require.Nil(out.Entry)
require.Nil(t, out.Entry)
})
}
@ -285,7 +282,6 @@ func TestConfig_Apply(t *testing.T) {
t.Parallel()
require := require.New(t)
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
@ -301,7 +297,7 @@ func TestConfig_Apply(t *testing.T) {
req, _ := http.NewRequest("PUT", "/v1/config", body)
resp := httptest.NewRecorder()
_, err := a.srv.ConfigApply(resp, req)
require.NoError(err)
require.NoError(t, err)
if resp.Code != 200 {
t.Fatalf(resp.Body.String())
}
@ -314,10 +310,10 @@ func TestConfig_Apply(t *testing.T) {
Datacenter: "dc1",
}
var out structs.ConfigEntryResponse
require.NoError(a.RPC("ConfigEntry.Get", &args, &out))
require.NotNil(out.Entry)
require.NoError(t, a.RPC("ConfigEntry.Get", &args, &out))
require.NotNil(t, out.Entry)
entry := out.Entry.(*structs.ServiceConfigEntry)
require.Equal(entry.Name, "foo")
require.Equal(t, entry.Name, "foo")
}
}
@ -503,7 +499,6 @@ func TestConfig_Apply_CAS(t *testing.T) {
t.Parallel()
require := require.New(t)
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
@ -519,7 +514,7 @@ func TestConfig_Apply_CAS(t *testing.T) {
req, _ := http.NewRequest("PUT", "/v1/config", body)
resp := httptest.NewRecorder()
_, err := a.srv.ConfigApply(resp, req)
require.NoError(err)
require.NoError(t, err)
if resp.Code != 200 {
t.Fatalf(resp.Body.String())
}
@ -532,8 +527,8 @@ func TestConfig_Apply_CAS(t *testing.T) {
}
out := &structs.ConfigEntryResponse{}
require.NoError(a.RPC("ConfigEntry.Get", &args, out))
require.NotNil(out.Entry)
require.NoError(t, a.RPC("ConfigEntry.Get", &args, out))
require.NotNil(t, out.Entry)
entry := out.Entry.(*structs.ServiceConfigEntry)
body = bytes.NewBuffer([]byte(`
@ -546,11 +541,11 @@ func TestConfig_Apply_CAS(t *testing.T) {
req, _ = http.NewRequest("PUT", "/v1/config?cas=0", body)
resp = httptest.NewRecorder()
writtenRaw, err := a.srv.ConfigApply(resp, req)
require.NoError(err)
require.NoError(t, err)
written, ok := writtenRaw.(bool)
require.True(ok)
require.False(written)
require.EqualValues(200, resp.Code, resp.Body.String())
require.True(t, ok)
require.False(t, written)
require.EqualValues(t, 200, resp.Code, resp.Body.String())
body = bytes.NewBuffer([]byte(`
{
@ -562,11 +557,11 @@ func TestConfig_Apply_CAS(t *testing.T) {
req, _ = http.NewRequest("PUT", fmt.Sprintf("/v1/config?cas=%d", entry.GetRaftIndex().ModifyIndex), body)
resp = httptest.NewRecorder()
writtenRaw, err = a.srv.ConfigApply(resp, req)
require.NoError(err)
require.NoError(t, err)
written, ok = writtenRaw.(bool)
require.True(ok)
require.True(written)
require.EqualValues(200, resp.Code, resp.Body.String())
require.True(t, ok)
require.True(t, written)
require.EqualValues(t, 200, resp.Code, resp.Body.String())
// Get the entry remaining entry.
args = structs.ConfigEntryQuery{
@ -576,10 +571,10 @@ func TestConfig_Apply_CAS(t *testing.T) {
}
out = &structs.ConfigEntryResponse{}
require.NoError(a.RPC("ConfigEntry.Get", &args, out))
require.NotNil(out.Entry)
require.NoError(t, a.RPC("ConfigEntry.Get", &args, out))
require.NotNil(t, out.Entry)
newEntry := out.Entry.(*structs.ServiceConfigEntry)
require.NotEqual(entry.GetRaftIndex(), newEntry.GetRaftIndex())
require.NotEqual(t, entry.GetRaftIndex(), newEntry.GetRaftIndex())
}
func TestConfig_Apply_Decoding(t *testing.T) {

View File

@ -38,7 +38,6 @@ func TestAWSBootstrapAndSignPrimary(t *testing.T) {
for _, tc := range KeyTestCases {
tc := tc
t.Run(tc.Desc, func(t *testing.T) {
require := require.New(t)
cfg := map[string]interface{}{
"PrivateKeyType": tc.KeyType,
"PrivateKeyBits": tc.KeyBits,
@ -48,33 +47,33 @@ func TestAWSBootstrapAndSignPrimary(t *testing.T) {
defer provider.Cleanup(true, nil)
// Generate the root
require.NoError(provider.GenerateRoot())
require.NoError(t, provider.GenerateRoot())
// Fetch Active Root
rootPEM, err := provider.ActiveRoot()
require.NoError(err)
require.NoError(t, err)
// Generate Intermediate (not actually needed for this provider for now
// but this simulates the calls in Server.initializeRoot).
interPEM, err := provider.GenerateIntermediate()
require.NoError(err)
require.NoError(t, err)
// Should be the same for now
require.Equal(rootPEM, interPEM)
require.Equal(t, rootPEM, interPEM)
// Ensure they use the right key type
rootCert, err := connect.ParseCert(rootPEM)
require.NoError(err)
require.NoError(t, err)
keyType, keyBits, err := connect.KeyInfoFromCert(rootCert)
require.NoError(err)
require.Equal(tc.KeyType, keyType)
require.Equal(tc.KeyBits, keyBits)
require.NoError(t, err)
require.Equal(t, tc.KeyType, keyType)
require.Equal(t, tc.KeyBits, keyBits)
// Ensure that the root cert ttl is withing the configured value
// computation is similar to how we are passing the TTL thru the aws client
expectedTime := time.Now().AddDate(0, 0, int(8761*60*time.Minute/day)).UTC()
require.WithinDuration(expectedTime, rootCert.NotAfter, 10*time.Minute, "expected parsed cert ttl to be the same as the value configured")
require.WithinDuration(t, expectedTime, rootCert.NotAfter, 10*time.Minute, "expected parsed cert ttl to be the same as the value configured")
// Sign a leaf with it
testSignAndValidate(t, provider, rootPEM, nil)

View File

@ -78,26 +78,25 @@ func requireNotEncoded(t *testing.T, v []byte) {
func TestConsulCAProvider_Bootstrap(t *testing.T) {
t.Parallel()
require := require.New(t)
conf := testConsulCAConfig()
delegate := newMockDelegate(t, conf)
provider := TestConsulProvider(t, delegate)
require.NoError(provider.Configure(testProviderConfig(conf)))
require.NoError(provider.GenerateRoot())
require.NoError(t, provider.Configure(testProviderConfig(conf)))
require.NoError(t, provider.GenerateRoot())
root, err := provider.ActiveRoot()
require.NoError(err)
require.NoError(t, err)
// Intermediate should be the same cert.
inter, err := provider.ActiveIntermediate()
require.NoError(err)
require.Equal(root, inter)
require.NoError(t, err)
require.Equal(t, root, inter)
// Should be a valid cert
parsed, err := connect.ParseCert(root)
require.NoError(err)
require.Equal(parsed.URIs[0].String(), fmt.Sprintf("spiffe://%s.consul", conf.ClusterID))
require.NoError(t, err)
require.Equal(t, parsed.URIs[0].String(), fmt.Sprintf("spiffe://%s.consul", conf.ClusterID))
requireNotEncoded(t, parsed.SubjectKeyId)
requireNotEncoded(t, parsed.AuthorityKeyId)
@ -105,16 +104,15 @@ func TestConsulCAProvider_Bootstrap(t *testing.T) {
// notice that we allow a margin of "error" of 10 minutes between the
// generateCA() creation and this check
defaultRootCertTTL, err := time.ParseDuration(structs.DefaultRootCertTTL)
require.NoError(err)
require.NoError(t, err)
expectedNotAfter := time.Now().Add(defaultRootCertTTL).UTC()
require.WithinDuration(expectedNotAfter, parsed.NotAfter, 10*time.Minute, "expected parsed cert ttl to be the same as the value configured")
require.WithinDuration(t, expectedNotAfter, parsed.NotAfter, 10*time.Minute, "expected parsed cert ttl to be the same as the value configured")
}
func TestConsulCAProvider_Bootstrap_WithCert(t *testing.T) {
t.Parallel()
// Make sure setting a custom private key/root cert works.
require := require.New(t)
rootCA := connect.TestCAWithTTL(t, nil, 5*time.Hour)
conf := testConsulCAConfig()
conf.Config = map[string]interface{}{
@ -124,24 +122,24 @@ func TestConsulCAProvider_Bootstrap_WithCert(t *testing.T) {
delegate := newMockDelegate(t, conf)
provider := TestConsulProvider(t, delegate)
require.NoError(provider.Configure(testProviderConfig(conf)))
require.NoError(provider.GenerateRoot())
require.NoError(t, provider.Configure(testProviderConfig(conf)))
require.NoError(t, provider.GenerateRoot())
root, err := provider.ActiveRoot()
require.NoError(err)
require.Equal(root, rootCA.RootCert)
require.NoError(t, err)
require.Equal(t, root, rootCA.RootCert)
// Should be a valid cert
parsed, err := connect.ParseCert(root)
require.NoError(err)
require.NoError(t, err)
// test that the default root cert ttl was not applied to the provided cert
defaultRootCertTTL, err := time.ParseDuration(structs.DefaultRootCertTTL)
require.NoError(err)
require.NoError(t, err)
defaultNotAfter := time.Now().Add(defaultRootCertTTL).UTC()
// we can't compare given the "delta" between the time the cert is generated
// and when we start the test; so just look at the years for now, given different years
require.NotEqualf(defaultNotAfter.Year(), parsed.NotAfter.Year(), "parsed cert ttl expected to be different from default root cert ttl")
require.NotEqualf(t, defaultNotAfter.Year(), parsed.NotAfter.Year(), "parsed cert ttl expected to be different from default root cert ttl")
}
func TestConsulCAProvider_SignLeaf(t *testing.T) {
@ -154,7 +152,6 @@ func TestConsulCAProvider_SignLeaf(t *testing.T) {
for _, tc := range KeyTestCases {
tc := tc
t.Run(tc.Desc, func(t *testing.T) {
require := require.New(t)
conf := testConsulCAConfig()
conf.Config["LeafCertTTL"] = "1h"
conf.Config["PrivateKeyType"] = tc.KeyType
@ -162,8 +159,8 @@ func TestConsulCAProvider_SignLeaf(t *testing.T) {
delegate := newMockDelegate(t, conf)
provider := TestConsulProvider(t, delegate)
require.NoError(provider.Configure(testProviderConfig(conf)))
require.NoError(provider.GenerateRoot())
require.NoError(t, provider.Configure(testProviderConfig(conf)))
require.NoError(t, provider.GenerateRoot())
spiffeService := &connect.SpiffeIDService{
Host: connect.TestClusterID + ".consul",
@ -177,26 +174,26 @@ func TestConsulCAProvider_SignLeaf(t *testing.T) {
raw, _ := connect.TestCSR(t, spiffeService)
csr, err := connect.ParseCSR(raw)
require.NoError(err)
require.NoError(t, err)
cert, err := provider.Sign(csr)
require.NoError(err)
require.NoError(t, err)
requireTrailingNewline(t, cert)
parsed, err := connect.ParseCert(cert)
require.NoError(err)
require.Equal(spiffeService.URI(), parsed.URIs[0])
require.Empty(parsed.Subject.CommonName)
require.Equal(uint64(3), parsed.SerialNumber.Uint64())
require.NoError(t, err)
require.Equal(t, spiffeService.URI(), parsed.URIs[0])
require.Empty(t, parsed.Subject.CommonName)
require.Equal(t, uint64(3), parsed.SerialNumber.Uint64())
subjectKeyID, err := connect.KeyId(csr.PublicKey)
require.NoError(err)
require.Equal(subjectKeyID, parsed.SubjectKeyId)
require.NoError(t, err)
require.Equal(t, subjectKeyID, parsed.SubjectKeyId)
requireNotEncoded(t, parsed.SubjectKeyId)
requireNotEncoded(t, parsed.AuthorityKeyId)
// Ensure the cert is valid now and expires within the correct limit.
now := time.Now()
require.True(parsed.NotAfter.Sub(now) < time.Hour)
require.True(parsed.NotBefore.Before(now))
require.True(t, parsed.NotAfter.Sub(now) < time.Hour)
require.True(t, parsed.NotBefore.Before(now))
}
// Generate a new cert for another service and make sure
@ -206,22 +203,22 @@ func TestConsulCAProvider_SignLeaf(t *testing.T) {
raw, _ := connect.TestCSR(t, spiffeService)
csr, err := connect.ParseCSR(raw)
require.NoError(err)
require.NoError(t, err)
cert, err := provider.Sign(csr)
require.NoError(err)
require.NoError(t, err)
parsed, err := connect.ParseCert(cert)
require.NoError(err)
require.Equal(spiffeService.URI(), parsed.URIs[0])
require.Empty(parsed.Subject.CommonName)
require.Equal(uint64(4), parsed.SerialNumber.Uint64())
require.NoError(t, err)
require.Equal(t, spiffeService.URI(), parsed.URIs[0])
require.Empty(t, parsed.Subject.CommonName)
require.Equal(t, uint64(4), parsed.SerialNumber.Uint64())
requireNotEncoded(t, parsed.SubjectKeyId)
requireNotEncoded(t, parsed.AuthorityKeyId)
// Ensure the cert is valid now and expires within the correct limit.
require.True(time.Until(parsed.NotAfter) < 3*24*time.Hour)
require.True(parsed.NotBefore.Before(time.Now()))
require.True(t, time.Until(parsed.NotAfter) < 3*24*time.Hour)
require.True(t, parsed.NotBefore.Before(time.Now()))
}
spiffeAgent := &connect.SpiffeIDAgent{
@ -234,23 +231,23 @@ func TestConsulCAProvider_SignLeaf(t *testing.T) {
raw, _ := connect.TestCSR(t, spiffeAgent)
csr, err := connect.ParseCSR(raw)
require.NoError(err)
require.NoError(t, err)
cert, err := provider.Sign(csr)
require.NoError(err)
require.NoError(t, err)
parsed, err := connect.ParseCert(cert)
require.NoError(err)
require.Equal(spiffeAgent.URI(), parsed.URIs[0])
require.Empty(parsed.Subject.CommonName)
require.Equal(uint64(5), parsed.SerialNumber.Uint64())
require.NoError(t, err)
require.Equal(t, spiffeAgent.URI(), parsed.URIs[0])
require.Empty(t, parsed.Subject.CommonName)
require.Equal(t, uint64(5), parsed.SerialNumber.Uint64())
requireNotEncoded(t, parsed.SubjectKeyId)
requireNotEncoded(t, parsed.AuthorityKeyId)
// Ensure the cert is valid now and expires within the correct limit.
now := time.Now()
require.True(parsed.NotAfter.Sub(now) < time.Hour)
require.True(parsed.NotBefore.Before(now))
require.True(t, parsed.NotAfter.Sub(now) < time.Hour)
require.True(t, parsed.NotBefore.Before(now))
}
})
}
@ -268,15 +265,14 @@ func TestConsulCAProvider_CrossSignCA(t *testing.T) {
for _, tc := range tests {
tc := tc
t.Run(tc.Desc, func(t *testing.T) {
require := require.New(t)
conf1 := testConsulCAConfig()
delegate1 := newMockDelegate(t, conf1)
provider1 := TestConsulProvider(t, delegate1)
conf1.Config["PrivateKeyType"] = tc.SigningKeyType
conf1.Config["PrivateKeyBits"] = tc.SigningKeyBits
require.NoError(provider1.Configure(testProviderConfig(conf1)))
require.NoError(provider1.GenerateRoot())
require.NoError(t, provider1.Configure(testProviderConfig(conf1)))
require.NoError(t, provider1.GenerateRoot())
conf2 := testConsulCAConfig()
conf2.CreateIndex = 10
@ -284,8 +280,8 @@ func TestConsulCAProvider_CrossSignCA(t *testing.T) {
provider2 := TestConsulProvider(t, delegate2)
conf2.Config["PrivateKeyType"] = tc.CSRKeyType
conf2.Config["PrivateKeyBits"] = tc.CSRKeyBits
require.NoError(provider2.Configure(testProviderConfig(conf2)))
require.NoError(provider2.GenerateRoot())
require.NoError(t, provider2.Configure(testProviderConfig(conf2)))
require.NoError(t, provider2.GenerateRoot())
testCrossSignProviders(t, provider1, provider2)
})
@ -293,52 +289,51 @@ func TestConsulCAProvider_CrossSignCA(t *testing.T) {
}
func testCrossSignProviders(t *testing.T, provider1, provider2 Provider) {
require := require.New(t)
// Get the root from the new provider to be cross-signed.
newRootPEM, err := provider2.ActiveRoot()
require.NoError(err)
require.NoError(t, err)
newRoot, err := connect.ParseCert(newRootPEM)
require.NoError(err)
require.NoError(t, err)
oldSubject := newRoot.Subject.CommonName
requireNotEncoded(t, newRoot.SubjectKeyId)
requireNotEncoded(t, newRoot.AuthorityKeyId)
newInterPEM, err := provider2.ActiveIntermediate()
require.NoError(err)
require.NoError(t, err)
newIntermediate, err := connect.ParseCert(newInterPEM)
require.NoError(err)
require.NoError(t, err)
requireNotEncoded(t, newIntermediate.SubjectKeyId)
requireNotEncoded(t, newIntermediate.AuthorityKeyId)
// Have provider1 cross sign our new root cert.
xcPEM, err := provider1.CrossSignCA(newRoot)
require.NoError(err)
require.NoError(t, err)
xc, err := connect.ParseCert(xcPEM)
require.NoError(err)
require.NoError(t, err)
requireNotEncoded(t, xc.SubjectKeyId)
requireNotEncoded(t, xc.AuthorityKeyId)
oldRootPEM, err := provider1.ActiveRoot()
require.NoError(err)
require.NoError(t, err)
oldRoot, err := connect.ParseCert(oldRootPEM)
require.NoError(err)
require.NoError(t, err)
requireNotEncoded(t, oldRoot.SubjectKeyId)
requireNotEncoded(t, oldRoot.AuthorityKeyId)
// AuthorityKeyID should now be the signing root's, SubjectKeyId should be kept.
require.Equal(oldRoot.SubjectKeyId, xc.AuthorityKeyId,
require.Equal(t, oldRoot.SubjectKeyId, xc.AuthorityKeyId,
"newSKID=%x\nnewAKID=%x\noldSKID=%x\noldAKID=%x\nxcSKID=%x\nxcAKID=%x",
newRoot.SubjectKeyId, newRoot.AuthorityKeyId,
oldRoot.SubjectKeyId, oldRoot.AuthorityKeyId,
xc.SubjectKeyId, xc.AuthorityKeyId)
require.Equal(newRoot.SubjectKeyId, xc.SubjectKeyId)
require.Equal(t, newRoot.SubjectKeyId, xc.SubjectKeyId)
// Subject name should not have changed.
require.Equal(oldSubject, xc.Subject.CommonName)
require.Equal(t, oldSubject, xc.Subject.CommonName)
// Issuer should be the signing root.
require.Equal(oldRoot.Issuer.CommonName, xc.Issuer.CommonName)
require.Equal(t, oldRoot.Issuer.CommonName, xc.Issuer.CommonName)
// Get a leaf cert so we can verify against the cross-signed cert.
spiffeService := &connect.SpiffeIDService{
@ -350,13 +345,13 @@ func testCrossSignProviders(t *testing.T, provider1, provider2 Provider) {
raw, _ := connect.TestCSR(t, spiffeService)
leafCsr, err := connect.ParseCSR(raw)
require.NoError(err)
require.NoError(t, err)
leafPEM, err := provider2.Sign(leafCsr)
require.NoError(err)
require.NoError(t, err)
cert, err := connect.ParseCert(leafPEM)
require.NoError(err)
require.NoError(t, err)
requireNotEncoded(t, cert.SubjectKeyId)
requireNotEncoded(t, cert.AuthorityKeyId)
@ -374,7 +369,7 @@ func testCrossSignProviders(t *testing.T, provider1, provider2 Provider) {
Intermediates: intermediatePool,
Roots: rootPool,
})
require.NoError(err)
require.NoError(t, err)
}
}
@ -390,15 +385,14 @@ func TestConsulProvider_SignIntermediate(t *testing.T) {
for _, tc := range tests {
tc := tc
t.Run(tc.Desc, func(t *testing.T) {
require := require.New(t)
conf1 := testConsulCAConfig()
delegate1 := newMockDelegate(t, conf1)
provider1 := TestConsulProvider(t, delegate1)
conf1.Config["PrivateKeyType"] = tc.SigningKeyType
conf1.Config["PrivateKeyBits"] = tc.SigningKeyBits
require.NoError(provider1.Configure(testProviderConfig(conf1)))
require.NoError(provider1.GenerateRoot())
require.NoError(t, provider1.Configure(testProviderConfig(conf1)))
require.NoError(t, provider1.GenerateRoot())
conf2 := testConsulCAConfig()
conf2.CreateIndex = 10
@ -409,7 +403,7 @@ func TestConsulProvider_SignIntermediate(t *testing.T) {
cfg := testProviderConfig(conf2)
cfg.IsPrimary = false
cfg.Datacenter = "dc2"
require.NoError(provider2.Configure(cfg))
require.NoError(t, provider2.Configure(cfg))
testSignIntermediateCrossDC(t, provider1, provider2)
})
@ -418,22 +412,21 @@ func TestConsulProvider_SignIntermediate(t *testing.T) {
}
func testSignIntermediateCrossDC(t *testing.T, provider1, provider2 Provider) {
require := require.New(t)
// Get the intermediate CSR from provider2.
csrPEM, err := provider2.GenerateIntermediateCSR()
require.NoError(err)
require.NoError(t, err)
csr, err := connect.ParseCSR(csrPEM)
require.NoError(err)
require.NoError(t, err)
// Sign the CSR with provider1.
intermediatePEM, err := provider1.SignIntermediate(csr)
require.NoError(err)
require.NoError(t, err)
rootPEM, err := provider1.ActiveRoot()
require.NoError(err)
require.NoError(t, err)
// Give the new intermediate to provider2 to use.
require.NoError(provider2.SetIntermediate(intermediatePEM, rootPEM))
require.NoError(t, provider2.SetIntermediate(intermediatePEM, rootPEM))
// Have provider2 sign a leaf cert and make sure the chain is correct.
spiffeService := &connect.SpiffeIDService{
@ -445,13 +438,13 @@ func testSignIntermediateCrossDC(t *testing.T, provider1, provider2 Provider) {
raw, _ := connect.TestCSR(t, spiffeService)
leafCsr, err := connect.ParseCSR(raw)
require.NoError(err)
require.NoError(t, err)
leafPEM, err := provider2.Sign(leafCsr)
require.NoError(err)
require.NoError(t, err)
cert, err := connect.ParseCert(leafPEM)
require.NoError(err)
require.NoError(t, err)
requireNotEncoded(t, cert.SubjectKeyId)
requireNotEncoded(t, cert.AuthorityKeyId)
@ -466,7 +459,7 @@ func testSignIntermediateCrossDC(t *testing.T, provider1, provider2 Provider) {
Intermediates: intermediatePool,
Roots: rootPool,
})
require.NoError(err)
require.NoError(t, err)
}
func TestConsulCAProvider_MigrateOldID(t *testing.T) {

View File

@ -116,13 +116,12 @@ func TestVaultCAProvider_VaultTLSConfig(t *testing.T) {
TLSSkipVerify: true,
}
tlsConfig := vaultTLSConfig(config)
require := require.New(t)
require.Equal(config.CAFile, tlsConfig.CACert)
require.Equal(config.CAPath, tlsConfig.CAPath)
require.Equal(config.CertFile, tlsConfig.ClientCert)
require.Equal(config.KeyFile, tlsConfig.ClientKey)
require.Equal(config.TLSServerName, tlsConfig.TLSServerName)
require.Equal(config.TLSSkipVerify, tlsConfig.Insecure)
require.Equal(t, config.CAFile, tlsConfig.CACert)
require.Equal(t, config.CAPath, tlsConfig.CAPath)
require.Equal(t, config.CertFile, tlsConfig.ClientCert)
require.Equal(t, config.KeyFile, tlsConfig.ClientKey)
require.Equal(t, config.TLSServerName, tlsConfig.TLSServerName)
require.Equal(t, config.TLSSkipVerify, tlsConfig.Insecure)
}
func TestVaultCAProvider_Configure(t *testing.T) {
@ -171,11 +170,10 @@ func TestVaultCAProvider_SecondaryActiveIntermediate(t *testing.T) {
provider, testVault := testVaultProviderWithConfig(t, false, nil)
defer testVault.Stop()
require := require.New(t)
cert, err := provider.ActiveIntermediate()
require.Empty(cert)
require.NoError(err)
require.Empty(t, cert)
require.NoError(t, err)
}
func TestVaultCAProvider_RenewToken(t *testing.T) {
@ -231,8 +229,6 @@ func TestVaultCAProvider_Bootstrap(t *testing.T) {
defer testvault2.Stop()
client2 := testvault2.client
require := require.New(t)
cases := []struct {
certFunc func() (string, error)
backendPath string
@ -264,28 +260,28 @@ func TestVaultCAProvider_Bootstrap(t *testing.T) {
provider := tc.provider
client := tc.client
cert, err := tc.certFunc()
require.NoError(err)
require.NoError(t, err)
req := client.NewRequest("GET", "/v1/"+tc.backendPath+"ca/pem")
resp, err := client.RawRequest(req)
require.NoError(err)
require.NoError(t, err)
bytes, err := ioutil.ReadAll(resp.Body)
require.NoError(err)
require.Equal(cert, string(bytes)+"\n")
require.NoError(t, err)
require.Equal(t, cert, string(bytes)+"\n")
// Should be a valid CA cert
parsed, err := connect.ParseCert(cert)
require.NoError(err)
require.True(parsed.IsCA)
require.Len(parsed.URIs, 1)
require.Equal(fmt.Sprintf("spiffe://%s.consul", provider.clusterID), parsed.URIs[0].String())
require.NoError(t, err)
require.True(t, parsed.IsCA)
require.Len(t, parsed.URIs, 1)
require.Equal(t, fmt.Sprintf("spiffe://%s.consul", provider.clusterID), parsed.URIs[0].String())
// test that the root cert ttl as applied
if tc.rootCaCreation {
rootCertTTL, err := time.ParseDuration(tc.expectedRootCertTTL)
require.NoError(err)
require.NoError(t, err)
expectedNotAfter := time.Now().Add(rootCertTTL).UTC()
require.WithinDuration(expectedNotAfter, parsed.NotAfter, 10*time.Minute, "expected parsed cert ttl to be the same as the value configured")
require.WithinDuration(t, expectedNotAfter, parsed.NotAfter, 10*time.Minute, "expected parsed cert ttl to be the same as the value configured")
}
}
}
@ -313,7 +309,6 @@ func TestVaultCAProvider_SignLeaf(t *testing.T) {
for _, tc := range KeyTestCases {
tc := tc
t.Run(tc.Desc, func(t *testing.T) {
require := require.New(t)
provider, testVault := testVaultProviderWithConfig(t, true, map[string]interface{}{
"LeafCertTTL": "1h",
"PrivateKeyType": tc.KeyType,
@ -329,11 +324,11 @@ func TestVaultCAProvider_SignLeaf(t *testing.T) {
}
rootPEM, err := provider.ActiveRoot()
require.NoError(err)
require.NoError(t, err)
assertCorrectKeyType(t, tc.KeyType, rootPEM)
intPEM, err := provider.ActiveIntermediate()
require.NoError(err)
require.NoError(t, err)
assertCorrectKeyType(t, tc.KeyType, intPEM)
// Generate a leaf cert for the service.
@ -342,23 +337,23 @@ func TestVaultCAProvider_SignLeaf(t *testing.T) {
raw, _ := connect.TestCSR(t, spiffeService)
csr, err := connect.ParseCSR(raw)
require.NoError(err)
require.NoError(t, err)
cert, err := provider.Sign(csr)
require.NoError(err)
require.NoError(t, err)
parsed, err := connect.ParseCert(cert)
require.NoError(err)
require.Equal(parsed.URIs[0], spiffeService.URI())
require.NoError(t, err)
require.Equal(t, parsed.URIs[0], spiffeService.URI())
firstSerial = parsed.SerialNumber.Uint64()
// Ensure the cert is valid now and expires within the correct limit.
now := time.Now()
require.True(parsed.NotAfter.Sub(now) < time.Hour)
require.True(parsed.NotBefore.Before(now))
require.True(t, parsed.NotAfter.Sub(now) < time.Hour)
require.True(t, parsed.NotBefore.Before(now))
// Make sure we can validate the cert as expected.
require.NoError(connect.ValidateLeaf(rootPEM, cert, []string{intPEM}))
require.NoError(t, connect.ValidateLeaf(rootPEM, cert, []string{intPEM}))
requireTrailingNewline(t, cert)
}
@ -369,22 +364,22 @@ func TestVaultCAProvider_SignLeaf(t *testing.T) {
raw, _ := connect.TestCSR(t, spiffeService)
csr, err := connect.ParseCSR(raw)
require.NoError(err)
require.NoError(t, err)
cert, err := provider.Sign(csr)
require.NoError(err)
require.NoError(t, err)
parsed, err := connect.ParseCert(cert)
require.NoError(err)
require.Equal(parsed.URIs[0], spiffeService.URI())
require.NotEqual(firstSerial, parsed.SerialNumber.Uint64())
require.NoError(t, err)
require.Equal(t, parsed.URIs[0], spiffeService.URI())
require.NotEqual(t, firstSerial, parsed.SerialNumber.Uint64())
// Ensure the cert is valid now and expires within the correct limit.
require.True(time.Until(parsed.NotAfter) < time.Hour)
require.True(parsed.NotBefore.Before(time.Now()))
require.True(t, time.Until(parsed.NotAfter) < time.Hour)
require.True(t, parsed.NotBefore.Before(time.Now()))
// Make sure we can validate the cert as expected.
require.NoError(connect.ValidateLeaf(rootPEM, cert, []string{intPEM}))
require.NoError(t, connect.ValidateLeaf(rootPEM, cert, []string{intPEM}))
}
})
}
@ -399,7 +394,6 @@ func TestVaultCAProvider_CrossSignCA(t *testing.T) {
for _, tc := range tests {
tc := tc
t.Run(tc.Desc, func(t *testing.T) {
require := require.New(t)
if tc.SigningKeyType != tc.CSRKeyType {
// See https://github.com/hashicorp/vault/issues/7709
@ -414,11 +408,11 @@ func TestVaultCAProvider_CrossSignCA(t *testing.T) {
{
rootPEM, err := provider1.ActiveRoot()
require.NoError(err)
require.NoError(t, err)
assertCorrectKeyType(t, tc.SigningKeyType, rootPEM)
intPEM, err := provider1.ActiveIntermediate()
require.NoError(err)
require.NoError(t, err)
assertCorrectKeyType(t, tc.SigningKeyType, intPEM)
}
@ -431,11 +425,11 @@ func TestVaultCAProvider_CrossSignCA(t *testing.T) {
{
rootPEM, err := provider2.ActiveRoot()
require.NoError(err)
require.NoError(t, err)
assertCorrectKeyType(t, tc.CSRKeyType, rootPEM)
intPEM, err := provider2.ActiveIntermediate()
require.NoError(err)
require.NoError(t, err)
assertCorrectKeyType(t, tc.CSRKeyType, intPEM)
}

View File

@ -8,8 +8,9 @@ import (
"crypto/x509"
"encoding/pem"
"github.com/hashicorp/consul/agent/structs"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
)
type KeyConfig struct {
@ -47,32 +48,30 @@ func makeConfig(kc KeyConfig) structs.CommonCAProviderConfig {
}
func testGenerateRSAKey(t *testing.T, bits int) {
r := require.New(t)
_, rsaBlock, err := GeneratePrivateKeyWithConfig("rsa", bits)
r.NoError(err)
r.Contains(rsaBlock, "RSA PRIVATE KEY")
require.NoError(t, err)
require.Contains(t, rsaBlock, "RSA PRIVATE KEY")
rsaBytes, _ := pem.Decode([]byte(rsaBlock))
r.NotNil(rsaBytes)
require.NotNil(t, rsaBytes)
rsaKey, err := x509.ParsePKCS1PrivateKey(rsaBytes.Bytes)
r.NoError(err)
r.NoError(rsaKey.Validate())
r.Equal(bits/8, rsaKey.Size()) // note: returned size is in bytes. 2048/8==256
require.NoError(t, err)
require.NoError(t, rsaKey.Validate())
require.Equal(t, bits/8, rsaKey.Size()) // note: returned size is in bytes. 2048/8==256
}
func testGenerateECDSAKey(t *testing.T, bits int) {
r := require.New(t)
_, pemBlock, err := GeneratePrivateKeyWithConfig("ec", bits)
r.NoError(err)
r.Contains(pemBlock, "EC PRIVATE KEY")
require.NoError(t, err)
require.Contains(t, pemBlock, "EC PRIVATE KEY")
block, _ := pem.Decode([]byte(pemBlock))
r.NotNil(block)
require.NotNil(t, block)
pk, err := x509.ParseECPrivateKey(block.Bytes)
r.NoError(err)
r.Equal(bits, pk.Curve.Params().BitSize)
require.NoError(t, err)
require.Equal(t, bits, pk.Curve.Params().BitSize)
}
// Tests to make sure we are able to generate every type of private key supported by the x509 lib.
@ -104,7 +103,7 @@ func TestValidateGoodConfigs(t *testing.T) {
config := makeConfig(params)
t.Run(fmt.Sprintf("TestValidateGoodConfigs-%s-%d", params.keyType, params.keyBits),
func(t *testing.T) {
require.New(t).NoError(config.Validate(), "unexpected error: type=%s bits=%d",
require.NoError(t, config.Validate(), "unexpected error: type=%s bits=%d",
params.keyType, params.keyBits)
})
@ -117,7 +116,7 @@ func TestValidateBadConfigs(t *testing.T) {
for _, params := range badParams {
config := makeConfig(params)
t.Run(fmt.Sprintf("TestValidateBadConfigs-%s-%d", params.keyType, params.keyBits), func(t *testing.T) {
require.New(t).Error(config.Validate(), "expected error: type=%s bits=%d",
require.Error(t, config.Validate(), "expected error: type=%s bits=%d",
params.keyType, params.keyBits)
})
}
@ -131,7 +130,6 @@ func TestSignatureMismatches(t *testing.T) {
}
t.Parallel()
r := require.New(t)
for _, p1 := range goodParams {
for _, p2 := range goodParams {
if p1 == p2 {
@ -139,14 +137,14 @@ func TestSignatureMismatches(t *testing.T) {
}
t.Run(fmt.Sprintf("TestMismatches-%s%d-%s%d", p1.keyType, p1.keyBits, p2.keyType, p2.keyBits), func(t *testing.T) {
ca := TestCAWithKeyType(t, nil, p1.keyType, p1.keyBits)
r.Equal(p1.keyType, ca.PrivateKeyType)
r.Equal(p1.keyBits, ca.PrivateKeyBits)
require.Equal(t, p1.keyType, ca.PrivateKeyType)
require.Equal(t, p1.keyBits, ca.PrivateKeyBits)
certPEM, keyPEM, err := testLeaf(t, "foobar.service.consul", "default", ca, p2.keyType, p2.keyBits)
r.NoError(err)
require.NoError(t, err)
_, err = ParseCert(certPEM)
r.NoError(err)
require.NoError(t, err)
_, err = ParseSigner(keyPEM)
r.NoError(err)
require.NoError(t, err)
})
}
}

View File

@ -29,20 +29,18 @@ func skipIfMissingOpenSSL(t *testing.T) {
func testCAAndLeaf(t *testing.T, keyType string, keyBits int) {
skipIfMissingOpenSSL(t)
require := require.New(t)
// Create the certs
ca := TestCAWithKeyType(t, nil, keyType, keyBits)
leaf, _ := TestLeaf(t, "web", ca)
// Create a temporary directory for storing the certs
td, err := ioutil.TempDir("", "consul")
require.NoError(err)
require.NoError(t, err)
defer os.RemoveAll(td)
// Write the cert
require.NoError(ioutil.WriteFile(filepath.Join(td, "ca.pem"), []byte(ca.RootCert), 0644))
require.NoError(ioutil.WriteFile(filepath.Join(td, "leaf.pem"), []byte(leaf[:]), 0644))
require.NoError(t, ioutil.WriteFile(filepath.Join(td, "ca.pem"), []byte(ca.RootCert), 0644))
require.NoError(t, ioutil.WriteFile(filepath.Join(td, "leaf.pem"), []byte(leaf[:]), 0644))
// Use OpenSSL to verify so we have an external, known-working process
// that can verify this outside of our own implementations.
@ -54,15 +52,13 @@ func testCAAndLeaf(t *testing.T, keyType string, keyBits int) {
if ee, ok := err.(*exec.ExitError); ok {
t.Log("STDERR:", string(ee.Stderr))
}
require.NoError(err)
require.NoError(t, err)
}
// Test cross-signing.
func testCAAndLeaf_xc(t *testing.T, keyType string, keyBits int) {
skipIfMissingOpenSSL(t)
assert := assert.New(t)
// Create the certs
ca1 := TestCAWithKeyType(t, nil, keyType, keyBits)
ca2 := TestCAWithKeyType(t, ca1, keyType, keyBits)
@ -71,16 +67,16 @@ func testCAAndLeaf_xc(t *testing.T, keyType string, keyBits int) {
// Create a temporary directory for storing the certs
td, err := ioutil.TempDir("", "consul")
assert.Nil(err)
assert.Nil(t, err)
defer os.RemoveAll(td)
// Write the cert
xcbundle := []byte(ca1.RootCert)
xcbundle = append(xcbundle, '\n')
xcbundle = append(xcbundle, []byte(ca2.SigningCert)...)
assert.Nil(ioutil.WriteFile(filepath.Join(td, "ca.pem"), xcbundle, 0644))
assert.Nil(ioutil.WriteFile(filepath.Join(td, "leaf1.pem"), []byte(leaf1), 0644))
assert.Nil(ioutil.WriteFile(filepath.Join(td, "leaf2.pem"), []byte(leaf2), 0644))
assert.Nil(t, ioutil.WriteFile(filepath.Join(td, "ca.pem"), xcbundle, 0644))
assert.Nil(t, ioutil.WriteFile(filepath.Join(td, "leaf1.pem"), []byte(leaf1), 0644))
assert.Nil(t, ioutil.WriteFile(filepath.Join(td, "leaf2.pem"), []byte(leaf2), 0644))
// OpenSSL verify the cross-signed leaf (leaf2)
{
@ -89,7 +85,7 @@ func testCAAndLeaf_xc(t *testing.T, keyType string, keyBits int) {
cmd.Dir = td
output, err := cmd.Output()
t.Log(string(output))
assert.Nil(err)
assert.Nil(t, err)
}
// OpenSSL verify the old leaf (leaf1)
@ -99,7 +95,7 @@ func testCAAndLeaf_xc(t *testing.T, keyType string, keyBits int) {
cmd.Dir = td
output, err := cmd.Output()
t.Log(string(output))
assert.Nil(err)
assert.Nil(t, err)
}
}

View File

@ -43,7 +43,6 @@ func TestConnectCARoots_list(t *testing.T) {
t.Parallel()
assertion := assert.New(t)
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
@ -56,16 +55,16 @@ func TestConnectCARoots_list(t *testing.T) {
req, _ := http.NewRequest("GET", "/v1/connect/ca/roots", nil)
resp := httptest.NewRecorder()
obj, err := a.srv.ConnectCARoots(resp, req)
assertion.NoError(err)
assert.NoError(t, err)
value := obj.(structs.IndexedCARoots)
assertion.Equal(value.ActiveRootID, ca2.ID)
assertion.Len(value.Roots, 2)
assert.Equal(t, value.ActiveRootID, ca2.ID)
assert.Len(t, value.Roots, 2)
// We should never have the secret information
for _, r := range value.Roots {
assertion.Equal("", r.SigningCert)
assertion.Equal("", r.SigningKey)
assert.Equal(t, "", r.SigningCert)
assert.Equal(t, "", r.SigningKey)
}
}

View File

@ -263,19 +263,19 @@ type ACLResolver struct {
// disabledLock synchronizes access to disabledUntil
disabledLock sync.RWMutex
agentMasterAuthz acl.Authorizer
agentRecoveryAuthz acl.Authorizer
}
func agentMasterAuthorizer(nodeName string, entMeta *structs.EnterpriseMeta, aclConf *acl.Config) (acl.Authorizer, error) {
func agentRecoveryAuthorizer(nodeName string, entMeta *structs.EnterpriseMeta, aclConf *acl.Config) (acl.Authorizer, error) {
var conf acl.Config
if aclConf != nil {
conf = *aclConf
}
setEnterpriseConf(entMeta, &conf)
// Build a policy for the agent master token.
// Build a policy for the agent recovery token.
//
// The builtin agent master policy allows reading any node information
// The builtin agent recovery policy allows reading any node information
// and allows writes to the agent with the node name of the running agent
// only. This used to allow a prefix match on agent names but that seems
// entirely unnecessary so it is now using an exact match.
@ -323,21 +323,21 @@ func NewACLResolver(config *ACLResolverConfig) (*ACLResolver, error) {
return nil, fmt.Errorf("invalid ACL down policy %q", config.Config.ACLDownPolicy)
}
authz, err := agentMasterAuthorizer(config.Config.NodeName, &config.Config.EnterpriseMeta, config.ACLConfig)
authz, err := agentRecoveryAuthorizer(config.Config.NodeName, &config.Config.EnterpriseMeta, config.ACLConfig)
if err != nil {
return nil, fmt.Errorf("failed to initialize the agent master authorizer")
return nil, fmt.Errorf("failed to initialize the agent recovery authorizer")
}
return &ACLResolver{
config: config.Config,
logger: config.Logger.Named(logging.ACL),
delegate: config.Delegate,
aclConf: config.ACLConfig,
cache: cache,
disableDuration: config.DisableDuration,
down: down,
tokens: config.Tokens,
agentMasterAuthz: authz,
config: config.Config,
logger: config.Logger.Named(logging.ACL),
delegate: config.Delegate,
aclConf: config.ACLConfig,
cache: cache,
disableDuration: config.DisableDuration,
down: down,
tokens: config.Tokens,
agentRecoveryAuthz: authz,
}, nil
}
@ -1049,7 +1049,7 @@ func (r *ACLResolver) resolveLocallyManagedToken(token string) (structs.ACLIdent
}
if r.tokens.IsAgentRecoveryToken(token) {
return structs.NewAgentMasterTokenIdentity(r.config.NodeName, token), r.agentMasterAuthz, true
return structs.NewAgentRecoveryTokenIdentity(r.config.NodeName, token), r.agentRecoveryAuthz, true
}
return r.resolveLocallyManagedEnterpriseToken(token)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -58,7 +58,7 @@ func testACLTokenReap_Primary(t *testing.T, local, global bool) {
acl := ACL{srv: s1}
masterTokenAccessorID, err := retrieveTestTokenAccessorForSecret(codec, "root", "dc1", "root")
initialManagementTokenAccessorID, err := retrieveTestTokenAccessorForSecret(codec, "root", "dc1", "root")
require.NoError(t, err)
listTokens := func() (localTokens, globalTokens []string, err error) {
@ -88,9 +88,9 @@ func testACLTokenReap_Primary(t *testing.T, local, global bool) {
t.Helper()
var expectLocal, expectGlobal []string
// The master token and the anonymous token are always going to be
// present and global.
expectGlobal = append(expectGlobal, masterTokenAccessorID)
// The initial management token and the anonymous token are always
// going to be present and global.
expectGlobal = append(expectGlobal, initialManagementTokenAccessorID)
expectGlobal = append(expectGlobal, structs.ACLTokenAnonymousID)
if local {

View File

@ -41,7 +41,7 @@ func TestAutoConfigBackend_CreateACLToken(t *testing.T) {
waitForLeaderEstablishment(t, srv)
r1, err := upsertTestRole(codec, TestDefaultMasterToken, "dc1")
r1, err := upsertTestRole(codec, TestDefaultInitialManagementToken, "dc1")
require.NoError(t, err)
t.Run("predefined-ids", func(t *testing.T) {

View File

@ -6,12 +6,13 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
)
func TestAutopilot_IdempotentShutdown(t *testing.T) {
@ -19,7 +20,7 @@ func TestAutopilot_IdempotentShutdown(t *testing.T) {
t.Skip("too slow for testing.Short")
}
dir1, s1 := testServerWithConfig(t, nil)
dir1, s1 := testServerWithConfig(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
retry.Run(t, func(r *retry.R) { r.Check(waitForLeader(s1)) })
@ -76,7 +77,6 @@ func TestAutopilot_CleanupDeadServer(t *testing.T) {
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 5)) })
}
require := require.New(t)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
leaderIndex := -1
for i, s := range servers {
@ -85,7 +85,7 @@ func TestAutopilot_CleanupDeadServer(t *testing.T) {
break
}
}
require.NotEqual(leaderIndex, -1)
require.NotEqual(t, leaderIndex, -1)
// Shutdown two non-leader servers
killed := make(map[string]struct{})

View File

@ -309,12 +309,12 @@ func vetRegisterWithACL(
// Service-level check for some other service. Make sure they've
// got write permissions for that service.
if ns == nil {
return fmt.Errorf("Unknown service '%s' for check '%s'", check.ServiceID, check.CheckID)
return fmt.Errorf("Unknown service ID '%s' for check ID '%s'", check.ServiceID, check.CheckID)
}
other, ok := ns.Services[check.ServiceID]
if !ok {
return fmt.Errorf("Unknown service '%s' for check '%s'", check.ServiceID, check.CheckID)
return fmt.Errorf("Unknown service ID '%s' for check ID '%s'", check.ServiceID, check.CheckID)
}
// We are only adding a check here, so we don't add the scope,
@ -417,7 +417,7 @@ func vetDeregisterWithACL(
// ignore them from an ACL perspective.
if subj.ServiceID != "" {
if ns == nil {
return fmt.Errorf("Unknown service '%s'", subj.ServiceID)
return fmt.Errorf("Unknown service ID '%s'", subj.ServiceID)
}
ns.FillAuthzContext(&authzContext)
@ -427,7 +427,7 @@ func vetDeregisterWithACL(
}
} else if subj.CheckID != "" {
if nc == nil {
return fmt.Errorf("Unknown check '%s'", subj.CheckID)
return fmt.Errorf("Unknown check ID '%s'", subj.CheckID)
}
nc.FillAuthzContext(&authzContext)

View File

@ -388,7 +388,6 @@ func TestCatalog_Register_ConnectProxy(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -399,7 +398,7 @@ func TestCatalog_Register_ConnectProxy(t *testing.T) {
// Register
var out struct{}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
// List
req := structs.ServiceSpecificRequest{
@ -407,11 +406,11 @@ func TestCatalog_Register_ConnectProxy(t *testing.T) {
ServiceName: args.Service.Service,
}
var resp structs.IndexedServiceNodes
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(resp.ServiceNodes, 1)
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(t, resp.ServiceNodes, 1)
v := resp.ServiceNodes[0]
assert.Equal(structs.ServiceKindConnectProxy, v.ServiceKind)
assert.Equal(args.Service.Proxy.DestinationServiceName, v.ServiceProxy.DestinationServiceName)
assert.Equal(t, structs.ServiceKindConnectProxy, v.ServiceKind)
assert.Equal(t, args.Service.Proxy.DestinationServiceName, v.ServiceProxy.DestinationServiceName)
}
// Test an invalid ConnectProxy. We don't need to exhaustively test because
@ -423,7 +422,6 @@ func TestCatalog_Register_ConnectProxy_invalid(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -436,8 +434,8 @@ func TestCatalog_Register_ConnectProxy_invalid(t *testing.T) {
// Register
var out struct{}
err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)
assert.NotNil(err)
assert.Contains(err.Error(), "DestinationServiceName")
assert.NotNil(t, err)
assert.Contains(t, err.Error(), "DestinationServiceName")
}
// Test that write is required for the proxy destination to register a proxy.
@ -448,7 +446,6 @@ func TestCatalog_Register_ConnectProxy_ACLDestinationServiceName(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -479,7 +476,7 @@ node "foo" {
args.WriteRequest.Token = token
var out struct{}
err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)
assert.True(acl.IsErrPermissionDenied(err))
assert.True(t, acl.IsErrPermissionDenied(err))
// Register should fail with the right destination but wrong name
args = structs.TestRegisterRequestProxy(t)
@ -487,14 +484,14 @@ node "foo" {
args.Service.Proxy.DestinationServiceName = "foo"
args.WriteRequest.Token = token
err = msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)
assert.True(acl.IsErrPermissionDenied(err))
assert.True(t, acl.IsErrPermissionDenied(err))
// Register should work with the right destination
args = structs.TestRegisterRequestProxy(t)
args.Service.Service = "foo"
args.Service.Proxy.DestinationServiceName = "foo"
args.WriteRequest.Token = token
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
}
func TestCatalog_Register_ConnectNative(t *testing.T) {
@ -504,7 +501,6 @@ func TestCatalog_Register_ConnectNative(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -516,7 +512,7 @@ func TestCatalog_Register_ConnectNative(t *testing.T) {
// Register
var out struct{}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
// List
req := structs.ServiceSpecificRequest{
@ -524,11 +520,11 @@ func TestCatalog_Register_ConnectNative(t *testing.T) {
ServiceName: args.Service.Service,
}
var resp structs.IndexedServiceNodes
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(resp.ServiceNodes, 1)
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(t, resp.ServiceNodes, 1)
v := resp.ServiceNodes[0]
assert.Equal(structs.ServiceKindTypical, v.ServiceKind)
assert.True(v.ServiceConnect.Native)
assert.Equal(t, structs.ServiceKindTypical, v.ServiceKind)
assert.True(t, v.ServiceConnect.Native)
}
func TestCatalog_Deregister(t *testing.T) {
@ -2149,7 +2145,6 @@ func TestCatalog_ListServiceNodes_ConnectProxy(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -2161,7 +2156,7 @@ func TestCatalog_ListServiceNodes_ConnectProxy(t *testing.T) {
// Register the service
args := structs.TestRegisterRequestProxy(t)
var out struct{}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
// List
req := structs.ServiceSpecificRequest{
@ -2170,11 +2165,11 @@ func TestCatalog_ListServiceNodes_ConnectProxy(t *testing.T) {
TagFilter: false,
}
var resp structs.IndexedServiceNodes
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(resp.ServiceNodes, 1)
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(t, resp.ServiceNodes, 1)
v := resp.ServiceNodes[0]
assert.Equal(structs.ServiceKindConnectProxy, v.ServiceKind)
assert.Equal(args.Service.Proxy.DestinationServiceName, v.ServiceProxy.DestinationServiceName)
assert.Equal(t, structs.ServiceKindConnectProxy, v.ServiceKind)
assert.Equal(t, args.Service.Proxy.DestinationServiceName, v.ServiceProxy.DestinationServiceName)
}
func TestCatalog_ServiceNodes_Gateway(t *testing.T) {
@ -2304,7 +2299,6 @@ func TestCatalog_ListServiceNodes_ConnectDestination(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -2316,7 +2310,7 @@ func TestCatalog_ListServiceNodes_ConnectDestination(t *testing.T) {
// Register the proxy service
args := structs.TestRegisterRequestProxy(t)
var out struct{}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
// Register the service
{
@ -2324,7 +2318,7 @@ func TestCatalog_ListServiceNodes_ConnectDestination(t *testing.T) {
args := structs.TestRegisterRequest(t)
args.Service.Service = dst
var out struct{}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
}
// List
@ -2334,22 +2328,22 @@ func TestCatalog_ListServiceNodes_ConnectDestination(t *testing.T) {
ServiceName: args.Service.Proxy.DestinationServiceName,
}
var resp structs.IndexedServiceNodes
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(resp.ServiceNodes, 1)
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(t, resp.ServiceNodes, 1)
v := resp.ServiceNodes[0]
assert.Equal(structs.ServiceKindConnectProxy, v.ServiceKind)
assert.Equal(args.Service.Proxy.DestinationServiceName, v.ServiceProxy.DestinationServiceName)
assert.Equal(t, structs.ServiceKindConnectProxy, v.ServiceKind)
assert.Equal(t, args.Service.Proxy.DestinationServiceName, v.ServiceProxy.DestinationServiceName)
// List by non-Connect
req = structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: args.Service.Proxy.DestinationServiceName,
}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(resp.ServiceNodes, 1)
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(t, resp.ServiceNodes, 1)
v = resp.ServiceNodes[0]
assert.Equal(args.Service.Proxy.DestinationServiceName, v.ServiceName)
assert.Equal("", v.ServiceProxy.DestinationServiceName)
assert.Equal(t, args.Service.Proxy.DestinationServiceName, v.ServiceName)
assert.Equal(t, "", v.ServiceProxy.DestinationServiceName)
}
// Test that calling ServiceNodes with Connect: true will return
@ -2361,7 +2355,6 @@ func TestCatalog_ListServiceNodes_ConnectDestinationNative(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -2374,7 +2367,7 @@ func TestCatalog_ListServiceNodes_ConnectDestinationNative(t *testing.T) {
args := structs.TestRegisterRequest(t)
args.Service.Connect.Native = true
var out struct{}
require.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
// List
req := structs.ServiceSpecificRequest{
@ -2383,20 +2376,20 @@ func TestCatalog_ListServiceNodes_ConnectDestinationNative(t *testing.T) {
ServiceName: args.Service.Service,
}
var resp structs.IndexedServiceNodes
require.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
require.Len(resp.ServiceNodes, 1)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
require.Len(t, resp.ServiceNodes, 1)
v := resp.ServiceNodes[0]
require.Equal(args.Service.Service, v.ServiceName)
require.Equal(t, args.Service.Service, v.ServiceName)
// List by non-Connect
req = structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: args.Service.Service,
}
require.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
require.Len(resp.ServiceNodes, 1)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
require.Len(t, resp.ServiceNodes, 1)
v = resp.ServiceNodes[0]
require.Equal(args.Service.Service, v.ServiceName)
require.Equal(t, args.Service.Service, v.ServiceName)
}
func TestCatalog_ListServiceNodes_ConnectProxy_ACL(t *testing.T) {
@ -2491,7 +2484,6 @@ func TestCatalog_ListServiceNodes_ConnectNative(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -2504,7 +2496,7 @@ func TestCatalog_ListServiceNodes_ConnectNative(t *testing.T) {
args := structs.TestRegisterRequest(t)
args.Service.Connect.Native = true
var out struct{}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
// List
req := structs.ServiceSpecificRequest{
@ -2513,10 +2505,10 @@ func TestCatalog_ListServiceNodes_ConnectNative(t *testing.T) {
TagFilter: false,
}
var resp structs.IndexedServiceNodes
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(resp.ServiceNodes, 1)
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp))
assert.Len(t, resp.ServiceNodes, 1)
v := resp.ServiceNodes[0]
assert.Equal(args.Service.Connect.Native, v.ServiceConnect.Native)
assert.Equal(t, args.Service.Connect.Native, v.ServiceConnect.Native)
}
func TestCatalog_NodeServices(t *testing.T) {
@ -2581,7 +2573,6 @@ func TestCatalog_NodeServices_ConnectProxy(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -2593,7 +2584,7 @@ func TestCatalog_NodeServices_ConnectProxy(t *testing.T) {
// Register the service
args := structs.TestRegisterRequestProxy(t)
var out struct{}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
// List
req := structs.NodeSpecificRequest{
@ -2601,12 +2592,12 @@ func TestCatalog_NodeServices_ConnectProxy(t *testing.T) {
Node: args.Node,
}
var resp structs.IndexedNodeServices
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &resp))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &resp))
assert.Len(resp.NodeServices.Services, 1)
assert.Len(t, resp.NodeServices.Services, 1)
v := resp.NodeServices.Services[args.Service.Service]
assert.Equal(structs.ServiceKindConnectProxy, v.Kind)
assert.Equal(args.Service.Proxy.DestinationServiceName, v.Proxy.DestinationServiceName)
assert.Equal(t, structs.ServiceKindConnectProxy, v.Kind)
assert.Equal(t, args.Service.Proxy.DestinationServiceName, v.Proxy.DestinationServiceName)
}
func TestCatalog_NodeServices_ConnectNative(t *testing.T) {
@ -2616,7 +2607,6 @@ func TestCatalog_NodeServices_ConnectNative(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -2628,7 +2618,7 @@ func TestCatalog_NodeServices_ConnectNative(t *testing.T) {
// Register the service
args := structs.TestRegisterRequest(t)
var out struct{}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out))
// List
req := structs.NodeSpecificRequest{
@ -2636,11 +2626,11 @@ func TestCatalog_NodeServices_ConnectNative(t *testing.T) {
Node: args.Node,
}
var resp structs.IndexedNodeServices
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &resp))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &resp))
assert.Len(resp.NodeServices.Services, 1)
assert.Len(t, resp.NodeServices.Services, 1)
v := resp.NodeServices.Services[args.Service.Service]
assert.Equal(args.Service.Connect.Native, v.Connect.Native)
assert.Equal(t, args.Service.Connect.Native, v.Connect.Native)
}
// Used to check for a regression against a known bug
@ -2883,27 +2873,25 @@ func TestCatalog_NodeServices_ACL(t *testing.T) {
}
t.Run("deny", func(t *testing.T) {
require := require.New(t)
args.Token = token("deny")
var reply structs.IndexedNodeServices
err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &reply)
require.NoError(err)
require.Nil(reply.NodeServices)
require.True(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.NoError(t, err)
require.Nil(t, reply.NodeServices)
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
})
t.Run("allow", func(t *testing.T) {
require := require.New(t)
args.Token = token("read")
var reply structs.IndexedNodeServices
err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &reply)
require.NoError(err)
require.NotNil(reply.NodeServices)
require.False(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
require.NoError(t, err)
require.NotNil(t, reply.NodeServices)
require.False(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
})
}

View File

@ -347,7 +347,7 @@ func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io
// Let the caller peek at the reply.
if replyFn != nil {
if err := replyFn(&reply); err != nil {
return nil
return err
}
}

View File

@ -150,8 +150,6 @@ func TestConfigEntry_Apply_ACLDeny(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -191,16 +189,16 @@ operator = "write"
Name: "foo",
}
err = msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &args, &out)
require.NoError(err)
require.NoError(t, err)
state := s1.fsm.State()
_, entry, err := state.ConfigEntry(nil, structs.ServiceDefaults, "foo", nil)
require.NoError(err)
require.NoError(t, err)
serviceConf, ok := entry.(*structs.ServiceConfigEntry)
require.True(ok)
require.Equal("foo", serviceConf.Name)
require.Equal(structs.ServiceDefaults, serviceConf.Kind)
require.True(t, ok)
require.Equal(t, "foo", serviceConf.Name)
require.Equal(t, structs.ServiceDefaults, serviceConf.Kind)
// Try to update the global proxy args with the anonymous token - this should fail.
proxyArgs := structs.ConfigEntryRequest{
@ -219,7 +217,7 @@ operator = "write"
// Now with the privileged token.
proxyArgs.WriteRequest.Token = id
err = msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &proxyArgs, &out)
require.NoError(err)
require.NoError(t, err)
}
func TestConfigEntry_Get(t *testing.T) {
@ -229,8 +227,6 @@ func TestConfigEntry_Get(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -243,7 +239,7 @@ func TestConfigEntry_Get(t *testing.T) {
Name: "foo",
}
state := s1.fsm.State()
require.NoError(state.EnsureConfigEntry(1, entry))
require.NoError(t, state.EnsureConfigEntry(1, entry))
args := structs.ConfigEntryQuery{
Kind: structs.ServiceDefaults,
@ -251,12 +247,12 @@ func TestConfigEntry_Get(t *testing.T) {
Datacenter: s1.config.Datacenter,
}
var out structs.ConfigEntryResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.Get", &args, &out))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Get", &args, &out))
serviceConf, ok := out.Entry.(*structs.ServiceConfigEntry)
require.True(ok)
require.Equal("foo", serviceConf.Name)
require.Equal(structs.ServiceDefaults, serviceConf.Kind)
require.True(t, ok)
require.Equal(t, "foo", serviceConf.Name)
require.Equal(t, structs.ServiceDefaults, serviceConf.Kind)
}
func TestConfigEntry_Get_ACLDeny(t *testing.T) {
@ -266,8 +262,6 @@ func TestConfigEntry_Get_ACLDeny(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -290,11 +284,11 @@ operator = "read"
// Create some dummy service/proxy configs to be looked up.
state := s1.fsm.State()
require.NoError(state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
}))
@ -314,12 +308,12 @@ operator = "read"
// The "foo" service should work.
args.Name = "foo"
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.Get", &args, &out))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Get", &args, &out))
serviceConf, ok := out.Entry.(*structs.ServiceConfigEntry)
require.True(ok)
require.Equal("foo", serviceConf.Name)
require.Equal(structs.ServiceDefaults, serviceConf.Kind)
require.True(t, ok)
require.Equal(t, "foo", serviceConf.Name)
require.Equal(t, structs.ServiceDefaults, serviceConf.Kind)
}
func TestConfigEntry_List(t *testing.T) {
@ -329,8 +323,6 @@ func TestConfigEntry_List(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -351,19 +343,19 @@ func TestConfigEntry_List(t *testing.T) {
},
},
}
require.NoError(state.EnsureConfigEntry(1, expected.Entries[0]))
require.NoError(state.EnsureConfigEntry(2, expected.Entries[1]))
require.NoError(t, state.EnsureConfigEntry(1, expected.Entries[0]))
require.NoError(t, state.EnsureConfigEntry(2, expected.Entries[1]))
args := structs.ConfigEntryQuery{
Kind: structs.ServiceDefaults,
Datacenter: "dc1",
}
var out structs.IndexedConfigEntries
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &out))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &out))
expected.Kind = structs.ServiceDefaults
expected.QueryMeta = out.QueryMeta
require.Equal(expected, out)
require.Equal(t, expected, out)
}
func TestConfigEntry_ListAll(t *testing.T) {
@ -466,8 +458,6 @@ func TestConfigEntry_List_ACLDeny(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -490,15 +480,15 @@ operator = "read"
// Create some dummy service/proxy configs to be looked up.
state := s1.fsm.State()
require.NoError(state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
}))
require.NoError(state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "db",
}))
@ -511,26 +501,26 @@ operator = "read"
}
var out structs.IndexedConfigEntries
err := msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &out)
require.NoError(err)
require.NoError(t, err)
serviceConf, ok := out.Entries[0].(*structs.ServiceConfigEntry)
require.Len(out.Entries, 1)
require.True(ok)
require.Equal("foo", serviceConf.Name)
require.Equal(structs.ServiceDefaults, serviceConf.Kind)
require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.Len(t, out.Entries, 1)
require.True(t, ok)
require.Equal(t, "foo", serviceConf.Name)
require.Equal(t, structs.ServiceDefaults, serviceConf.Kind)
require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
// Get the global proxy config.
args.Kind = structs.ProxyDefaults
err = msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &out)
require.NoError(err)
require.NoError(t, err)
proxyConf, ok := out.Entries[0].(*structs.ProxyConfigEntry)
require.Len(out.Entries, 1)
require.True(ok)
require.Equal(structs.ProxyConfigGlobal, proxyConf.Name)
require.Equal(structs.ProxyDefaults, proxyConf.Kind)
require.False(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
require.Len(t, out.Entries, 1)
require.True(t, ok)
require.Equal(t, structs.ProxyConfigGlobal, proxyConf.Name)
require.Equal(t, structs.ProxyDefaults, proxyConf.Kind)
require.False(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
}
func TestConfigEntry_ListAll_ACLDeny(t *testing.T) {
@ -540,8 +530,6 @@ func TestConfigEntry_ListAll_ACLDeny(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -564,15 +552,15 @@ operator = "read"
// Create some dummy service/proxy configs to be looked up.
state := s1.fsm.State()
require.NoError(state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
}))
require.NoError(state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "db",
}))
@ -585,8 +573,8 @@ operator = "read"
}
var out structs.IndexedGenericConfigEntries
err := msgpackrpc.CallWithCodec(codec, "ConfigEntry.ListAll", &args, &out)
require.NoError(err)
require.Len(out.Entries, 2)
require.NoError(t, err)
require.Len(t, out.Entries, 2)
svcIndex := 0
proxyIndex := 1
if out.Entries[0].GetKind() == structs.ProxyDefaults {
@ -595,15 +583,15 @@ operator = "read"
}
svcConf, ok := out.Entries[svcIndex].(*structs.ServiceConfigEntry)
require.True(ok)
require.True(t, ok)
proxyConf, ok := out.Entries[proxyIndex].(*structs.ProxyConfigEntry)
require.True(ok)
require.True(t, ok)
require.Equal("foo", svcConf.Name)
require.Equal(structs.ServiceDefaults, svcConf.Kind)
require.Equal(structs.ProxyConfigGlobal, proxyConf.Name)
require.Equal(structs.ProxyDefaults, proxyConf.Kind)
require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.Equal(t, "foo", svcConf.Name)
require.Equal(t, structs.ServiceDefaults, svcConf.Kind)
require.Equal(t, structs.ProxyConfigGlobal, proxyConf.Name)
require.Equal(t, structs.ProxyDefaults, proxyConf.Kind)
require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
}
func TestConfigEntry_Delete(t *testing.T) {
@ -686,8 +674,6 @@ func TestConfigEntry_DeleteCAS(t *testing.T) {
}
t.Parallel()
require := require.New(t)
dir, s := testServer(t)
defer os.RemoveAll(dir)
defer s.Shutdown()
@ -703,11 +689,11 @@ func TestConfigEntry_DeleteCAS(t *testing.T) {
Name: "foo",
}
state := s.fsm.State()
require.NoError(state.EnsureConfigEntry(1, entry))
require.NoError(t, state.EnsureConfigEntry(1, entry))
// Verify it's there.
_, existing, err := state.ConfigEntry(nil, entry.Kind, entry.Name, nil)
require.NoError(err)
require.NoError(t, err)
// Send a delete CAS request with an invalid index.
args := structs.ConfigEntryRequest{
@ -718,24 +704,24 @@ func TestConfigEntry_DeleteCAS(t *testing.T) {
args.Entry.GetRaftIndex().ModifyIndex = existing.GetRaftIndex().ModifyIndex - 1
var rsp structs.ConfigEntryDeleteResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.Delete", &args, &rsp))
require.False(rsp.Deleted)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Delete", &args, &rsp))
require.False(t, rsp.Deleted)
// Verify the entry was not deleted.
_, existing, err = s.fsm.State().ConfigEntry(nil, structs.ServiceDefaults, "foo", nil)
require.NoError(err)
require.NotNil(existing)
require.NoError(t, err)
require.NotNil(t, existing)
// Restore the valid index and try again.
args.Entry.GetRaftIndex().ModifyIndex = existing.GetRaftIndex().ModifyIndex
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.Delete", &args, &rsp))
require.True(rsp.Deleted)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Delete", &args, &rsp))
require.True(t, rsp.Deleted)
// Verify the entry was deleted.
_, existing, err = s.fsm.State().ConfigEntry(nil, structs.ServiceDefaults, "foo", nil)
require.NoError(err)
require.Nil(existing)
require.NoError(t, err)
require.Nil(t, existing)
}
func TestConfigEntry_Delete_ACLDeny(t *testing.T) {
@ -745,8 +731,6 @@ func TestConfigEntry_Delete_ACLDeny(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -769,11 +753,11 @@ operator = "write"
// Create some dummy service/proxy configs to be looked up.
state := s1.fsm.State()
require.NoError(state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
}))
@ -796,12 +780,12 @@ operator = "write"
args.Entry = &structs.ServiceConfigEntry{
Name: "foo",
}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.Delete", &args, &out))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Delete", &args, &out))
// Verify the entry was deleted.
_, existing, err := state.ConfigEntry(nil, structs.ServiceDefaults, "foo", nil)
require.NoError(err)
require.Nil(existing)
require.NoError(t, err)
require.Nil(t, existing)
// Try to delete the global proxy config without a token.
args = structs.ConfigEntryRequest{
@ -817,11 +801,11 @@ operator = "write"
// Now delete with a valid token.
args.WriteRequest.Token = id
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.Delete", &args, &out))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Delete", &args, &out))
_, existing, err = state.ConfigEntry(nil, structs.ServiceDefaults, "foo", nil)
require.NoError(err)
require.Nil(existing)
require.NoError(t, err)
require.Nil(t, existing)
}
func TestConfigEntry_ResolveServiceConfig(t *testing.T) {
@ -831,8 +815,6 @@ func TestConfigEntry_ResolveServiceConfig(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -841,19 +823,19 @@ func TestConfigEntry_ResolveServiceConfig(t *testing.T) {
// Create a dummy proxy/service config in the state store to look up.
state := s1.fsm.State()
require.NoError(state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Config: map[string]interface{}{
"foo": 1,
},
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
Protocol: "http",
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "bar",
Protocol: "grpc",
@ -865,7 +847,7 @@ func TestConfigEntry_ResolveServiceConfig(t *testing.T) {
Upstreams: []string{"bar", "baz"},
}
var out structs.ServiceConfigResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
@ -880,14 +862,14 @@ func TestConfigEntry_ResolveServiceConfig(t *testing.T) {
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(expected, out)
require.Equal(t, expected, out)
_, entry, err := s1.fsm.State().ConfigEntry(nil, structs.ProxyDefaults, structs.ProxyConfigGlobal, nil)
require.NoError(err)
require.NotNil(entry)
require.NoError(t, err)
require.NotNil(t, entry)
proxyConf, ok := entry.(*structs.ProxyConfigEntry)
require.True(ok)
require.Equal(map[string]interface{}{"foo": 1}, proxyConf.Config)
require.True(t, ok)
require.Equal(t, map[string]interface{}{"foo": 1}, proxyConf.Config)
}
func TestConfigEntry_ResolveServiceConfig_TransparentProxy(t *testing.T) {
@ -1426,8 +1408,6 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -1443,19 +1423,19 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
// TestConfigEntry_ResolveServiceConfig_Upstreams_Blocking
state := s1.fsm.State()
require.NoError(state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Config: map[string]interface{}{
"global": 1,
},
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
Protocol: "grpc",
}))
require.NoError(state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "bar",
Protocol: "http",
@ -1465,7 +1445,7 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
{ // Verify that we get the results of proxy-defaults and service-defaults for 'foo'.
var out structs.ServiceConfigResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
&structs.ServiceConfigRequest{
Name: "foo",
Datacenter: "dc1",
@ -1480,7 +1460,7 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
},
QueryMeta: out.QueryMeta,
}
require.Equal(expected, out)
require.Equal(t, expected, out)
index = out.Index
}
@ -1490,7 +1470,7 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
start := time.Now()
go func() {
time.Sleep(100 * time.Millisecond)
require.NoError(state.DeleteConfigEntry(index+1,
require.NoError(t, state.DeleteConfigEntry(index+1,
structs.ServiceDefaults,
"foo",
nil,
@ -1499,7 +1479,7 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
// Re-run the query
var out structs.ServiceConfigResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
&structs.ServiceConfigRequest{
Name: "foo",
Datacenter: "dc1",
@ -1512,10 +1492,10 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
))
// Should block at least 100ms
require.True(time.Since(start) >= 100*time.Millisecond, "too fast")
require.True(t, time.Since(start) >= 100*time.Millisecond, "too fast")
// Check the indexes
require.Equal(out.Index, index+1)
require.Equal(t, out.Index, index+1)
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
@ -1523,14 +1503,14 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
},
QueryMeta: out.QueryMeta,
}
require.Equal(expected, out)
require.Equal(t, expected, out)
index = out.Index
}
{ // Verify that we get the results of proxy-defaults and service-defaults for 'bar'.
var out structs.ServiceConfigResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
&structs.ServiceConfigRequest{
Name: "bar",
Datacenter: "dc1",
@ -1545,7 +1525,7 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
},
QueryMeta: out.QueryMeta,
}
require.Equal(expected, out)
require.Equal(t, expected, out)
index = out.Index
}
@ -1555,7 +1535,7 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
start := time.Now()
go func() {
time.Sleep(100 * time.Millisecond)
require.NoError(state.DeleteConfigEntry(index+1,
require.NoError(t, state.DeleteConfigEntry(index+1,
structs.ProxyDefaults,
structs.ProxyConfigGlobal,
nil,
@ -1564,7 +1544,7 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
// Re-run the query
var out structs.ServiceConfigResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
&structs.ServiceConfigRequest{
Name: "bar",
Datacenter: "dc1",
@ -1577,10 +1557,10 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
))
// Should block at least 100ms
require.True(time.Since(start) >= 100*time.Millisecond, "too fast")
require.True(t, time.Since(start) >= 100*time.Millisecond, "too fast")
// Check the indexes
require.Equal(out.Index, index+1)
require.Equal(t, out.Index, index+1)
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
@ -1588,7 +1568,7 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
},
QueryMeta: out.QueryMeta,
}
require.Equal(expected, out)
require.Equal(t, expected, out)
}
}
@ -1798,8 +1778,6 @@ func TestConfigEntry_ResolveServiceConfig_UpstreamProxyDefaultsProtocol(t *testi
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -1808,26 +1786,26 @@ func TestConfigEntry_ResolveServiceConfig_UpstreamProxyDefaultsProtocol(t *testi
// Create a dummy proxy/service config in the state store to look up.
state := s1.fsm.State()
require.NoError(state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Config: map[string]interface{}{
"protocol": "http",
},
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "bar",
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "other",
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "alreadyprotocol",
Protocol: "grpc",
@ -1839,7 +1817,7 @@ func TestConfigEntry_ResolveServiceConfig_UpstreamProxyDefaultsProtocol(t *testi
Upstreams: []string{"bar", "other", "alreadyprotocol", "dne"},
}
var out structs.ServiceConfigResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
@ -1862,7 +1840,7 @@ func TestConfigEntry_ResolveServiceConfig_UpstreamProxyDefaultsProtocol(t *testi
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(expected, out)
require.Equal(t, expected, out)
}
func TestConfigEntry_ResolveServiceConfig_ProxyDefaultsProtocol_UsedForAllUpstreams(t *testing.T) {
@ -1872,8 +1850,6 @@ func TestConfigEntry_ResolveServiceConfig_ProxyDefaultsProtocol_UsedForAllUpstre
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -1882,7 +1858,7 @@ func TestConfigEntry_ResolveServiceConfig_ProxyDefaultsProtocol_UsedForAllUpstre
// Create a dummy proxy/service config in the state store to look up.
state := s1.fsm.State()
require.NoError(state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Config: map[string]interface{}{
@ -1896,7 +1872,7 @@ func TestConfigEntry_ResolveServiceConfig_ProxyDefaultsProtocol_UsedForAllUpstre
Upstreams: []string{"bar"},
}
var out structs.ServiceConfigResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
@ -1910,7 +1886,7 @@ func TestConfigEntry_ResolveServiceConfig_ProxyDefaultsProtocol_UsedForAllUpstre
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(expected, out)
require.Equal(t, expected, out)
}
func TestConfigEntry_ResolveServiceConfigNoConfig(t *testing.T) {
@ -1920,8 +1896,6 @@ func TestConfigEntry_ResolveServiceConfigNoConfig(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -1936,7 +1910,7 @@ func TestConfigEntry_ResolveServiceConfigNoConfig(t *testing.T) {
Upstreams: []string{"bar", "baz"},
}
var out structs.ServiceConfigResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
expected := structs.ServiceConfigResponse{
ProxyConfig: nil,
@ -1944,7 +1918,7 @@ func TestConfigEntry_ResolveServiceConfigNoConfig(t *testing.T) {
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(expected, out)
require.Equal(t, expected, out)
}
func TestConfigEntry_ResolveServiceConfig_ACLDeny(t *testing.T) {
@ -1954,8 +1928,6 @@ func TestConfigEntry_ResolveServiceConfig_ACLDeny(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -1978,15 +1950,15 @@ operator = "write"
// Create some dummy service/proxy configs to be looked up.
state := s1.fsm.State()
require.NoError(state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
}))
require.NoError(state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
}))
require.NoError(state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
require.NoError(t, state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "db",
}))
@ -2005,7 +1977,7 @@ operator = "write"
// The "foo" service should work.
args.Name = "foo"
require.NoError(msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
}

View File

@ -38,8 +38,6 @@ func TestConnectCARoots(t *testing.T) {
t.Parallel()
assert := assert.New(t)
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -54,29 +52,29 @@ func TestConnectCARoots(t *testing.T) {
ca2 := connect.TestCA(t, nil)
ca2.Active = false
idx, _, err := state.CARoots(nil)
require.NoError(err)
require.NoError(t, err)
ok, err := state.CARootSetCAS(idx, idx, []*structs.CARoot{ca1, ca2})
assert.True(ok)
require.NoError(err)
assert.True(t, ok)
require.NoError(t, err)
_, caCfg, err := state.CAConfig(nil)
require.NoError(err)
require.NoError(t, err)
// Request
args := &structs.DCSpecificRequest{
Datacenter: "dc1",
}
var reply structs.IndexedCARoots
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply))
// Verify
assert.Equal(ca1.ID, reply.ActiveRootID)
assert.Len(reply.Roots, 2)
assert.Equal(t, ca1.ID, reply.ActiveRootID)
assert.Len(t, reply.Roots, 2)
for _, r := range reply.Roots {
// These must never be set, for security
assert.Equal("", r.SigningCert)
assert.Equal("", r.SigningKey)
assert.Equal(t, "", r.SigningCert)
assert.Equal(t, "", r.SigningKey)
}
assert.Equal(fmt.Sprintf("%s.consul", caCfg.ClusterID), reply.TrustDomain)
assert.Equal(t, fmt.Sprintf("%s.consul", caCfg.ClusterID), reply.TrustDomain)
}
func TestConnectCAConfig_GetSet(t *testing.T) {
@ -86,7 +84,6 @@ func TestConnectCAConfig_GetSet(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -101,14 +98,14 @@ func TestConnectCAConfig_GetSet(t *testing.T) {
Datacenter: "dc1",
}
var reply structs.CAConfiguration
assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply))
assert.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply))
actual, err := ca.ParseConsulCAConfig(reply.Config)
assert.NoError(err)
assert.NoError(t, err)
expected, err := ca.ParseConsulCAConfig(s1.config.CAConfig.Config)
assert.NoError(err)
assert.Equal(reply.Provider, s1.config.CAConfig.Provider)
assert.Equal(actual, expected)
assert.NoError(t, err)
assert.Equal(t, reply.Provider, s1.config.CAConfig.Provider)
assert.Equal(t, actual, expected)
}
testState := map[string]string{"foo": "bar"}
@ -141,15 +138,15 @@ func TestConnectCAConfig_GetSet(t *testing.T) {
Datacenter: "dc1",
}
var reply structs.CAConfiguration
assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply))
assert.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply))
actual, err := ca.ParseConsulCAConfig(reply.Config)
assert.NoError(err)
assert.NoError(t, err)
expected, err := ca.ParseConsulCAConfig(newConfig.Config)
assert.NoError(err)
assert.Equal(reply.Provider, newConfig.Provider)
assert.Equal(actual, expected)
assert.Equal(testState, reply.State)
assert.NoError(t, err)
assert.Equal(t, reply.Provider, newConfig.Provider)
assert.Equal(t, actual, expected)
assert.Equal(t, testState, reply.State)
}
}
@ -163,7 +160,7 @@ func TestConnectCAConfig_GetSet_ACLDeny(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
c.ACLInitialManagementToken = TestDefaultMasterToken
c.ACLInitialManagementToken = TestDefaultInitialManagementToken
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
})
defer os.RemoveAll(dir1)
@ -175,11 +172,11 @@ func TestConnectCAConfig_GetSet_ACLDeny(t *testing.T) {
testrpc.WaitForLeader(t, s1.RPC, "dc1")
opReadToken, err := upsertTestTokenWithPolicyRules(
codec, TestDefaultMasterToken, "dc1", `operator = "read"`)
codec, TestDefaultInitialManagementToken, "dc1", `operator = "read"`)
require.NoError(t, err)
opWriteToken, err := upsertTestTokenWithPolicyRules(
codec, TestDefaultMasterToken, "dc1", `operator = "write"`)
codec, TestDefaultInitialManagementToken, "dc1", `operator = "write"`)
require.NoError(t, err)
// Update a config value
@ -215,7 +212,7 @@ pY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=
args := &structs.CARequest{
Datacenter: "dc1",
Config: newConfig,
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
}
var reply interface{}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
@ -254,7 +251,6 @@ func TestConnectCAConfig_GetSetForceNoCrossSigning(t *testing.T) {
t.Parallel()
require := require.New(t)
// Setup a server with a built-in CA that as artificially disabled cross
// signing. This is simpler than running tests with external CA dependencies.
dir1, s1 := testServerWithConfig(t, func(c *Config) {
@ -272,8 +268,8 @@ func TestConnectCAConfig_GetSetForceNoCrossSigning(t *testing.T) {
Datacenter: "dc1",
}
var rootList structs.IndexedCARoots
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(rootList.Roots, 1)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(t, rootList.Roots, 1)
oldRoot := rootList.Roots[0]
// Get the starting config
@ -282,20 +278,20 @@ func TestConnectCAConfig_GetSetForceNoCrossSigning(t *testing.T) {
Datacenter: "dc1",
}
var reply structs.CAConfiguration
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply))
actual, err := ca.ParseConsulCAConfig(reply.Config)
require.NoError(err)
require.NoError(t, err)
expected, err := ca.ParseConsulCAConfig(s1.config.CAConfig.Config)
require.NoError(err)
require.Equal(reply.Provider, s1.config.CAConfig.Provider)
require.Equal(actual, expected)
require.NoError(t, err)
require.Equal(t, reply.Provider, s1.config.CAConfig.Provider)
require.Equal(t, actual, expected)
}
// Update to a new CA with different key. This should fail since the existing
// CA doesn't support cross signing so can't rotate safely.
_, newKey, err := connect.GeneratePrivateKey()
require.NoError(err)
require.NoError(t, err)
newConfig := &structs.CAConfiguration{
Provider: "consul",
Config: map[string]interface{}{
@ -309,7 +305,7 @@ func TestConnectCAConfig_GetSetForceNoCrossSigning(t *testing.T) {
}
var reply interface{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply)
require.EqualError(err, "The current CA Provider does not support cross-signing. "+
require.EqualError(t, err, "The current CA Provider does not support cross-signing. "+
"You can try again with ForceWithoutCrossSigningSet but this may cause disruption"+
" - see documentation for more.")
}
@ -323,7 +319,7 @@ func TestConnectCAConfig_GetSetForceNoCrossSigning(t *testing.T) {
}
var reply interface{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply)
require.NoError(err)
require.NoError(t, err)
}
// Make sure the new root has been added but with no cross-signed intermediate
@ -332,23 +328,23 @@ func TestConnectCAConfig_GetSetForceNoCrossSigning(t *testing.T) {
Datacenter: "dc1",
}
var reply structs.IndexedCARoots
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply))
require.Len(reply.Roots, 2)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply))
require.Len(t, reply.Roots, 2)
for _, r := range reply.Roots {
if r.ID == oldRoot.ID {
// The old root should no longer be marked as the active root,
// and none of its other fields should have changed.
require.False(r.Active)
require.Equal(r.Name, oldRoot.Name)
require.Equal(r.RootCert, oldRoot.RootCert)
require.Equal(r.SigningCert, oldRoot.SigningCert)
require.Equal(r.IntermediateCerts, oldRoot.IntermediateCerts)
require.False(t, r.Active)
require.Equal(t, r.Name, oldRoot.Name)
require.Equal(t, r.RootCert, oldRoot.RootCert)
require.Equal(t, r.SigningCert, oldRoot.SigningCert)
require.Equal(t, r.IntermediateCerts, oldRoot.IntermediateCerts)
} else {
// The new root should NOT have a valid cross-signed cert from the old
// root as an intermediate.
require.True(r.Active)
require.Empty(r.IntermediateCerts)
require.True(t, r.Active)
require.Empty(t, r.IntermediateCerts)
}
}
}
@ -664,9 +660,6 @@ func TestConnectCAConfig_UpdateSecondary(t *testing.T) {
t.Parallel()
assert := assert.New(t)
require := require.New(t)
// Initialize primary as the primary DC
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "primary"
@ -693,8 +686,8 @@ func TestConnectCAConfig_UpdateSecondary(t *testing.T) {
// Capture the current root
rootList, activeRoot, err := getTestRoots(s1, "primary")
require.NoError(err)
require.Len(rootList.Roots, 1)
require.NoError(t, err)
require.Len(t, rootList.Roots, 1)
rootCert := activeRoot
testrpc.WaitForActiveCARoot(t, s1.RPC, "primary", rootCert)
@ -702,15 +695,15 @@ func TestConnectCAConfig_UpdateSecondary(t *testing.T) {
// Capture the current intermediate
rootList, activeRoot, err = getTestRoots(s2, "secondary")
require.NoError(err)
require.Len(rootList.Roots, 1)
require.Len(activeRoot.IntermediateCerts, 1)
require.NoError(t, err)
require.Len(t, rootList.Roots, 1)
require.Len(t, activeRoot.IntermediateCerts, 1)
oldIntermediatePEM := activeRoot.IntermediateCerts[0]
// Update the secondary CA config to use a new private key, which should
// cause a re-signing with a new intermediate.
_, newKey, err := connect.GeneratePrivateKey()
assert.NoError(err)
assert.NoError(t, err)
newConfig := &structs.CAConfiguration{
Provider: "consul",
Config: map[string]interface{}{
@ -725,7 +718,7 @@ func TestConnectCAConfig_UpdateSecondary(t *testing.T) {
}
var reply interface{}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
}
// Make sure the new intermediate has replaced the old one in the active root,
@ -736,12 +729,12 @@ func TestConnectCAConfig_UpdateSecondary(t *testing.T) {
Datacenter: "secondary",
}
var reply structs.IndexedCARoots
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply))
require.Len(reply.Roots, 1)
require.Len(reply.Roots[0].IntermediateCerts, 1)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply))
require.Len(t, reply.Roots, 1)
require.Len(t, reply.Roots[0].IntermediateCerts, 1)
newIntermediatePEM = reply.Roots[0].IntermediateCerts[0]
require.NotEqual(oldIntermediatePEM, newIntermediatePEM)
require.Equal(reply.Roots[0].RootCert, rootCert.RootCert)
require.NotEqual(t, oldIntermediatePEM, newIntermediatePEM)
require.Equal(t, reply.Roots[0].RootCert, rootCert.RootCert)
}
// Verify the new config was set.
@ -750,14 +743,14 @@ func TestConnectCAConfig_UpdateSecondary(t *testing.T) {
Datacenter: "secondary",
}
var reply structs.CAConfiguration
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply))
actual, err := ca.ParseConsulCAConfig(reply.Config)
require.NoError(err)
require.NoError(t, err)
expected, err := ca.ParseConsulCAConfig(newConfig.Config)
require.NoError(err)
assert.Equal(reply.Provider, newConfig.Provider)
assert.Equal(actual, expected)
require.NoError(t, err)
assert.Equal(t, reply.Provider, newConfig.Provider)
assert.Equal(t, actual, expected)
}
// Verify that new leaf certs get the new intermediate bundled
@ -770,28 +763,28 @@ func TestConnectCAConfig_UpdateSecondary(t *testing.T) {
CSR: csr,
}
var reply structs.IssuedCert
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply))
// Verify the leaf cert has the new intermediate.
{
roots := x509.NewCertPool()
assert.True(roots.AppendCertsFromPEM([]byte(rootCert.RootCert)))
assert.True(t, roots.AppendCertsFromPEM([]byte(rootCert.RootCert)))
leaf, err := connect.ParseCert(reply.CertPEM)
require.NoError(err)
require.NoError(t, err)
intermediates := x509.NewCertPool()
require.True(intermediates.AppendCertsFromPEM([]byte(newIntermediatePEM)))
require.True(t, intermediates.AppendCertsFromPEM([]byte(newIntermediatePEM)))
_, err = leaf.Verify(x509.VerifyOptions{
Roots: roots,
Intermediates: intermediates,
})
require.NoError(err)
require.NoError(t, err)
}
// Verify other fields
assert.Equal("web", reply.Service)
assert.Equal(spiffeId.URI().String(), reply.ServiceURI)
assert.Equal(t, "web", reply.Service)
assert.Equal(t, spiffeId.URI().String(), reply.ServiceURI)
}
// Update a minor field in the config that doesn't trigger an intermediate refresh.
@ -810,7 +803,7 @@ func TestConnectCAConfig_UpdateSecondary(t *testing.T) {
}
var reply interface{}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
}
}
}
@ -840,8 +833,6 @@ func TestConnectCASign(t *testing.T) {
for _, tt := range tests {
t.Run(fmt.Sprintf("%s-%d", tt.caKeyType, tt.caKeyBits), func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(cfg *Config) {
cfg.PrimaryDatacenter = "dc1"
cfg.CAConfig.Config["PrivateKeyType"] = tt.caKeyType
@ -864,7 +855,7 @@ func TestConnectCASign(t *testing.T) {
CSR: csr,
}
var reply structs.IssuedCert
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply))
// Generate a second CSR and request signing
spiffeId2 := connect.TestSpiffeIDService(t, "web2")
@ -875,20 +866,20 @@ func TestConnectCASign(t *testing.T) {
}
var reply2 structs.IssuedCert
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply2))
require.True(reply2.ModifyIndex > reply.ModifyIndex)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply2))
require.True(t, reply2.ModifyIndex > reply.ModifyIndex)
// Get the current CA
state := s1.fsm.State()
_, ca, err := state.CARootActive(nil)
require.NoError(err)
require.NoError(t, err)
// Verify that the cert is signed by the CA
require.NoError(connect.ValidateLeaf(ca.RootCert, reply.CertPEM, nil))
require.NoError(t, connect.ValidateLeaf(ca.RootCert, reply.CertPEM, nil))
// Verify other fields
assert.Equal("web", reply.Service)
assert.Equal(spiffeId.URI().String(), reply.ServiceURI)
assert.Equal(t, "web", reply.Service)
assert.Equal(t, spiffeId.URI().String(), reply.ServiceURI)
})
}
}
@ -899,7 +890,6 @@ func TestConnectCASign(t *testing.T) {
func BenchmarkConnectCASign(b *testing.B) {
t := &testing.T{}
require := require.New(b)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -919,7 +909,9 @@ func BenchmarkConnectCASign(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply))
if err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply); err != nil {
b.Fatalf("err: %v", err)
}
}
}
@ -930,7 +922,6 @@ func TestConnectCASign_rateLimit(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.PrimaryDatacenter = "dc1"
@ -975,7 +966,7 @@ func TestConnectCASign_rateLimit(t *testing.T) {
} else if err.Error() == ErrRateLimited.Error() {
limitedCount++
} else {
require.NoError(err)
require.NoError(t, err)
}
}
// I've only ever seen this as 1/9 however if the test runs slowly on an
@ -985,8 +976,8 @@ func TestConnectCASign_rateLimit(t *testing.T) {
// check that some limiting is being applied. Note that we can't just measure
// the time it took to send them all and infer how many should have succeeded
// without some complex modeling of the token bucket algorithm.
require.Truef(successCount >= 1, "at least 1 CSRs should have succeeded, got %d", successCount)
require.Truef(limitedCount >= 7, "at least 7 CSRs should have been rate limited, got %d", limitedCount)
require.Truef(t, successCount >= 1, "at least 1 CSRs should have succeeded, got %d", successCount)
require.Truef(t, limitedCount >= 7, "at least 7 CSRs should have been rate limited, got %d", limitedCount)
}
func TestConnectCASign_concurrencyLimit(t *testing.T) {
@ -996,7 +987,6 @@ func TestConnectCASign_concurrencyLimit(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.PrimaryDatacenter = "dc1"
@ -1056,7 +1046,7 @@ func TestConnectCASign_concurrencyLimit(t *testing.T) {
} else if err.Error() == ErrRateLimited.Error() {
limitedCount++
} else {
require.NoError(err)
require.NoError(t, err)
}
}
@ -1095,7 +1085,7 @@ func TestConnectCASign_concurrencyLimit(t *testing.T) {
// requests were serialized.
t.Logf("min=%s, max=%s", minTime, maxTime)
//t.Fail() // Uncomment to see the time spread logged
require.Truef(successCount >= 1, "at least 1 CSRs should have succeeded, got %d", successCount)
require.Truef(t, successCount >= 1, "at least 1 CSRs should have succeeded, got %d", successCount)
}
func TestConnectCASignValidation(t *testing.T) {

View File

@ -541,7 +541,7 @@ func TestFederationState_List_ACLDeny(t *testing.T) {
gwListEmpty: true,
gwFilteredByACLs: true,
},
"master token": {
"initial management token": {
token: "root",
},
}

View File

@ -105,7 +105,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) {
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master"},
Tags: []string{"primary"},
Port: 8000,
},
Check: &structs.HealthCheck{
@ -170,7 +170,7 @@ func TestFSM_DeregisterService(t *testing.T) {
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master"},
Tags: []string{"primary"},
Port: 8000,
},
}
@ -296,7 +296,7 @@ func TestFSM_DeregisterNode(t *testing.T) {
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master"},
Tags: []string{"primary"},
Port: 8000,
},
Check: &structs.HealthCheck{
@ -1101,10 +1101,9 @@ func TestFSM_Autopilot(t *testing.T) {
func TestFSM_Intention_CRUD(t *testing.T) {
t.Parallel()
assert := assert.New(t)
logger := testutil.Logger(t)
fsm, err := New(nil, logger)
assert.Nil(err)
assert.Nil(t, err)
// Create a new intention.
ixn := structs.IntentionRequest{
@ -1118,19 +1117,19 @@ func TestFSM_Intention_CRUD(t *testing.T) {
{
buf, err := structs.Encode(structs.IntentionRequestType, ixn)
assert.Nil(err)
assert.Nil(fsm.Apply(makeLog(buf)))
assert.Nil(t, err)
assert.Nil(t, fsm.Apply(makeLog(buf)))
}
// Verify it's in the state store.
{
_, _, actual, err := fsm.state.IntentionGet(nil, ixn.Intention.ID)
assert.Nil(err)
assert.Nil(t, err)
actual.CreateIndex, actual.ModifyIndex = 0, 0
actual.CreatedAt = ixn.Intention.CreatedAt
actual.UpdatedAt = ixn.Intention.UpdatedAt
assert.Equal(ixn.Intention, actual)
assert.Equal(t, ixn.Intention, actual)
}
// Make an update
@ -1138,44 +1137,43 @@ func TestFSM_Intention_CRUD(t *testing.T) {
ixn.Intention.SourceName = "api"
{
buf, err := structs.Encode(structs.IntentionRequestType, ixn)
assert.Nil(err)
assert.Nil(fsm.Apply(makeLog(buf)))
assert.Nil(t, err)
assert.Nil(t, fsm.Apply(makeLog(buf)))
}
// Verify the update.
{
_, _, actual, err := fsm.state.IntentionGet(nil, ixn.Intention.ID)
assert.Nil(err)
assert.Nil(t, err)
actual.CreateIndex, actual.ModifyIndex = 0, 0
actual.CreatedAt = ixn.Intention.CreatedAt
actual.UpdatedAt = ixn.Intention.UpdatedAt
assert.Equal(ixn.Intention, actual)
assert.Equal(t, ixn.Intention, actual)
}
// Delete
ixn.Op = structs.IntentionOpDelete
{
buf, err := structs.Encode(structs.IntentionRequestType, ixn)
assert.Nil(err)
assert.Nil(fsm.Apply(makeLog(buf)))
assert.Nil(t, err)
assert.Nil(t, fsm.Apply(makeLog(buf)))
}
// Make sure it's gone.
{
_, _, actual, err := fsm.state.IntentionGet(nil, ixn.Intention.ID)
assert.Nil(err)
assert.Nil(actual)
assert.Nil(t, err)
assert.Nil(t, actual)
}
}
func TestFSM_CAConfig(t *testing.T) {
t.Parallel()
assert := assert.New(t)
logger := testutil.Logger(t)
fsm, err := New(nil, logger)
assert.Nil(err)
assert.Nil(t, err)
// Set the autopilot config using a request.
req := structs.CARequest{
@ -1190,7 +1188,7 @@ func TestFSM_CAConfig(t *testing.T) {
},
}
buf, err := structs.Encode(structs.ConnectCARequestType, req)
assert.Nil(err)
assert.Nil(t, err)
resp := fsm.Apply(makeLog(buf))
if _, ok := resp.(error); ok {
t.Fatalf("bad: %v", resp)
@ -1231,7 +1229,7 @@ func TestFSM_CAConfig(t *testing.T) {
}
_, config, err = fsm.state.CAConfig(nil)
assert.Nil(err)
assert.Nil(t, err)
if config.Provider != "static" {
t.Fatalf("bad: %v", config.Provider)
}
@ -1240,10 +1238,9 @@ func TestFSM_CAConfig(t *testing.T) {
func TestFSM_CARoots(t *testing.T) {
t.Parallel()
assert := assert.New(t)
logger := testutil.Logger(t)
fsm, err := New(nil, logger)
assert.Nil(err)
assert.Nil(t, err)
// Roots
ca1 := connect.TestCA(t, nil)
@ -1258,25 +1255,24 @@ func TestFSM_CARoots(t *testing.T) {
{
buf, err := structs.Encode(structs.ConnectCARequestType, req)
assert.Nil(err)
assert.True(fsm.Apply(makeLog(buf)).(bool))
assert.Nil(t, err)
assert.True(t, fsm.Apply(makeLog(buf)).(bool))
}
// Verify it's in the state store.
{
_, roots, err := fsm.state.CARoots(nil)
assert.Nil(err)
assert.Len(roots, 2)
assert.Nil(t, err)
assert.Len(t, roots, 2)
}
}
func TestFSM_CABuiltinProvider(t *testing.T) {
t.Parallel()
assert := assert.New(t)
logger := testutil.Logger(t)
fsm, err := New(nil, logger)
assert.Nil(err)
assert.Nil(t, err)
// Provider state.
expected := &structs.CAConsulProviderState{
@ -1297,25 +1293,24 @@ func TestFSM_CABuiltinProvider(t *testing.T) {
{
buf, err := structs.Encode(structs.ConnectCARequestType, req)
assert.Nil(err)
assert.True(fsm.Apply(makeLog(buf)).(bool))
assert.Nil(t, err)
assert.True(t, fsm.Apply(makeLog(buf)).(bool))
}
// Verify it's in the state store.
{
_, state, err := fsm.state.CAProviderState("foo")
assert.Nil(err)
assert.Equal(expected, state)
assert.Nil(t, err)
assert.Equal(t, expected, state)
}
}
func TestFSM_ConfigEntry(t *testing.T) {
t.Parallel()
require := require.New(t)
logger := testutil.Logger(t)
fsm, err := New(nil, logger)
require.NoError(err)
require.NoError(t, err)
// Create a simple config entry
entry := &structs.ProxyConfigEntry{
@ -1335,7 +1330,7 @@ func TestFSM_ConfigEntry(t *testing.T) {
{
buf, err := structs.Encode(structs.ConfigEntryRequestType, req)
require.NoError(err)
require.NoError(t, err)
resp := fsm.Apply(makeLog(buf))
if _, ok := resp.(error); ok {
t.Fatalf("bad: %v", resp)
@ -1345,33 +1340,31 @@ func TestFSM_ConfigEntry(t *testing.T) {
// Verify it's in the state store.
{
_, config, err := fsm.state.ConfigEntry(nil, structs.ProxyDefaults, "global", nil)
require.NoError(err)
require.NoError(t, err)
entry.RaftIndex.CreateIndex = 1
entry.RaftIndex.ModifyIndex = 1
require.Equal(entry, config)
require.Equal(t, entry, config)
}
}
func TestFSM_ConfigEntry_DeleteCAS(t *testing.T) {
t.Parallel()
require := require.New(t)
logger := testutil.Logger(t)
fsm, err := New(nil, logger)
require.NoError(err)
require.NoError(t, err)
// Create a simple config entry and write it to the state store.
entry := &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "global",
}
require.NoError(fsm.state.EnsureConfigEntry(1, entry))
require.NoError(t, fsm.state.EnsureConfigEntry(1, entry))
// Raft index is populated by EnsureConfigEntry, hold on to it so that we can
// restore it later.
raftIndex := entry.RaftIndex
require.NotZero(raftIndex.ModifyIndex)
require.NotZero(t, raftIndex.ModifyIndex)
// Attempt a CAS delete with an invalid index.
entry = entry.Clone()
@ -1383,24 +1376,24 @@ func TestFSM_ConfigEntry_DeleteCAS(t *testing.T) {
Entry: entry,
}
buf, err := structs.Encode(structs.ConfigEntryRequestType, req)
require.NoError(err)
require.NoError(t, err)
// Expect to get boolean false back.
rsp := fsm.Apply(makeLog(buf))
didDelete, isBool := rsp.(bool)
require.True(isBool)
require.False(didDelete)
require.True(t, isBool)
require.False(t, didDelete)
// Attempt a CAS delete with a valid index.
entry.RaftIndex = raftIndex
buf, err = structs.Encode(structs.ConfigEntryRequestType, req)
require.NoError(err)
require.NoError(t, err)
// Expect to get boolean true back.
rsp = fsm.Apply(makeLog(buf))
didDelete, isBool = rsp.(bool)
require.True(isBool)
require.True(didDelete)
require.True(t, isBool)
require.True(t, didDelete)
}
// This adapts another test by chunking the encoded data and then performing
@ -1413,12 +1406,10 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
}
t.Parallel()
require := require.New(t)
assert := assert.New(t)
logger := testutil.Logger(t)
fsm, err := New(nil, logger)
require.NoError(err)
require.NoError(t, err)
var logOfLogs [][]*raft.Log
for i := 0; i < 10; i++ {
@ -1429,7 +1420,7 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master"},
Tags: []string{"primary"},
Port: 8000,
},
Check: &structs.HealthCheck{
@ -1442,7 +1433,7 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
}
buf, err := structs.Encode(structs.RegisterRequestType, req)
require.NoError(err)
require.NoError(t, err)
var logs []*raft.Log
@ -1453,7 +1444,7 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
NumChunks: uint32(len(buf)),
}
chunkBytes, err := proto.Marshal(chunkInfo)
require.NoError(err)
require.NoError(t, err)
logs = append(logs, &raft.Log{
Data: []byte{b},
@ -1468,41 +1459,41 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
// the full set, and out of order.
for _, logs := range logOfLogs {
resp := fsm.chunker.Apply(logs[8])
assert.Nil(resp)
assert.Nil(t, resp)
resp = fsm.chunker.Apply(logs[0])
assert.Nil(resp)
assert.Nil(t, resp)
resp = fsm.chunker.Apply(logs[3])
assert.Nil(resp)
assert.Nil(t, resp)
}
// Verify we are not registered
for i := 0; i < 10; i++ {
_, node, err := fsm.state.GetNode(fmt.Sprintf("foo%d", i), nil)
require.NoError(err)
assert.Nil(node)
require.NoError(t, err)
assert.Nil(t, node)
}
// Snapshot, restore elsewhere, apply the rest of the logs, make sure it
// looks right
snap, err := fsm.Snapshot()
require.NoError(err)
require.NoError(t, err)
defer snap.Release()
sinkBuf := bytes.NewBuffer(nil)
sink := &MockSink{sinkBuf, false}
err = snap.Persist(sink)
require.NoError(err)
require.NoError(t, err)
fsm2, err := New(nil, logger)
require.NoError(err)
require.NoError(t, err)
err = fsm2.Restore(sink)
require.NoError(err)
require.NoError(t, err)
// Verify we are still not registered
for i := 0; i < 10; i++ {
_, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil)
require.NoError(err)
assert.Nil(node)
require.NoError(t, err)
assert.Nil(t, node)
}
// Apply the rest of the logs
@ -1514,43 +1505,41 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
default:
resp = fsm2.chunker.Apply(log)
if i != len(logs)-1 {
assert.Nil(resp)
assert.Nil(t, resp)
}
}
}
_, ok := resp.(raftchunking.ChunkingSuccess)
assert.True(ok)
assert.True(t, ok)
}
// Verify we are registered
for i := 0; i < 10; i++ {
_, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil)
require.NoError(err)
assert.NotNil(node)
require.NoError(t, err)
assert.NotNil(t, node)
// Verify service registered
_, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i), structs.DefaultEnterpriseMetaInDefaultPartition())
require.NoError(err)
require.NotNil(services)
require.NoError(t, err)
require.NotNil(t, services)
_, ok := services.Services["db"]
assert.True(ok)
assert.True(t, ok)
// Verify check
_, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i), nil)
require.NoError(err)
require.NotNil(checks)
assert.Equal(string(checks[0].CheckID), "db")
require.NoError(t, err)
require.NotNil(t, checks)
assert.Equal(t, string(checks[0].CheckID), "db")
}
}
func TestFSM_Chunking_TermChange(t *testing.T) {
t.Parallel()
assert := assert.New(t)
require := require.New(t)
logger := testutil.Logger(t)
fsm, err := New(nil, logger)
require.NoError(err)
require.NoError(t, err)
req := structs.RegisterRequest{
Datacenter: "dc1",
@ -1559,7 +1548,7 @@ func TestFSM_Chunking_TermChange(t *testing.T) {
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master"},
Tags: []string{"primary"},
Port: 8000,
},
Check: &structs.HealthCheck{
@ -1571,7 +1560,7 @@ func TestFSM_Chunking_TermChange(t *testing.T) {
},
}
buf, err := structs.Encode(structs.RegisterRequestType, req)
require.NoError(err)
require.NoError(t, err)
// Only need two chunks to test this
chunks := [][]byte{
@ -1599,7 +1588,7 @@ func TestFSM_Chunking_TermChange(t *testing.T) {
// We should see nil for both
for _, log := range logs {
resp := fsm.chunker.Apply(log)
assert.Nil(resp)
assert.Nil(t, resp)
}
// Now verify the other baseline, that when the term doesn't change we see
@ -1616,10 +1605,10 @@ func TestFSM_Chunking_TermChange(t *testing.T) {
for i, log := range logs {
resp := fsm.chunker.Apply(log)
if i == 0 {
assert.Nil(resp)
assert.Nil(t, resp)
}
if i == 1 {
assert.NotNil(resp)
assert.NotNil(t, resp)
}
}
}

View File

@ -979,7 +979,6 @@ func TestHealth_ServiceNodes_ConnectProxy_ACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -1020,7 +1019,7 @@ node "foo" {
Status: api.HealthPassing,
ServiceID: args.Service.ID,
}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
// Register a service
args = structs.TestRegisterRequestProxy(t)
@ -1032,7 +1031,7 @@ node "foo" {
Status: api.HealthPassing,
ServiceID: args.Service.Service,
}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
// Register a service
args = structs.TestRegisterRequestProxy(t)
@ -1044,7 +1043,7 @@ node "foo" {
Status: api.HealthPassing,
ServiceID: args.Service.Service,
}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
}
// List w/ token. This should disallow because we don't have permission
@ -1056,8 +1055,8 @@ node "foo" {
QueryOptions: structs.QueryOptions{Token: token},
}
var resp structs.IndexedCheckServiceNodes
assert.Nil(msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp))
assert.Len(resp.Nodes, 0)
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp))
assert.Len(t, resp.Nodes, 0)
// List w/ token. This should work since we're requesting "foo", but should
// also only contain the proxies with names that adhere to our ACL.
@ -1067,8 +1066,8 @@ node "foo" {
ServiceName: "foo",
QueryOptions: structs.QueryOptions{Token: token},
}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp))
assert.Len(resp.Nodes, 1)
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp))
assert.Len(t, resp.Nodes, 1)
}
func TestHealth_ServiceNodes_Gateway(t *testing.T) {
@ -1432,8 +1431,6 @@ func TestHealth_NodeChecks_FilterACL(t *testing.T) {
t.Parallel()
require := require.New(t)
dir, token, srv, codec := testACLFilterServer(t)
defer os.RemoveAll(dir)
defer srv.Shutdown()
@ -1446,7 +1443,7 @@ func TestHealth_NodeChecks_FilterACL(t *testing.T) {
}
reply := structs.IndexedHealthChecks{}
err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply)
require.NoError(err)
require.NoError(t, err)
found := false
for _, chk := range reply.HealthChecks {
@ -1457,8 +1454,8 @@ func TestHealth_NodeChecks_FilterACL(t *testing.T) {
t.Fatalf("bad: %#v", reply.HealthChecks)
}
}
require.True(found, "bad: %#v", reply.HealthChecks)
require.True(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.True(t, found, "bad: %#v", reply.HealthChecks)
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
// We've already proven that we call the ACL filtering function so we
// test node filtering down in acl.go for node cases. This also proves
@ -1474,8 +1471,6 @@ func TestHealth_ServiceChecks_FilterACL(t *testing.T) {
t.Parallel()
require := require.New(t)
dir, token, srv, codec := testACLFilterServer(t)
defer os.RemoveAll(dir)
defer srv.Shutdown()
@ -1488,7 +1483,7 @@ func TestHealth_ServiceChecks_FilterACL(t *testing.T) {
}
reply := structs.IndexedHealthChecks{}
err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply)
require.NoError(err)
require.NoError(t, err)
found := false
for _, chk := range reply.HealthChecks {
@ -1497,14 +1492,14 @@ func TestHealth_ServiceChecks_FilterACL(t *testing.T) {
break
}
}
require.True(found, "bad: %#v", reply.HealthChecks)
require.True(t, found, "bad: %#v", reply.HealthChecks)
opt.ServiceName = "bar"
reply = structs.IndexedHealthChecks{}
err = msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply)
require.NoError(err)
require.Empty(reply.HealthChecks)
require.True(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.NoError(t, err)
require.Empty(t, reply.HealthChecks)
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
// We've already proven that we call the ACL filtering function so we
// test node filtering down in acl.go for node cases. This also proves
@ -1520,8 +1515,6 @@ func TestHealth_ServiceNodes_FilterACL(t *testing.T) {
t.Parallel()
require := require.New(t)
dir, token, srv, codec := testACLFilterServer(t)
defer os.RemoveAll(dir)
defer srv.Shutdown()
@ -1534,15 +1527,15 @@ func TestHealth_ServiceNodes_FilterACL(t *testing.T) {
}
reply := structs.IndexedCheckServiceNodes{}
err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply)
require.NoError(err)
require.Len(reply.Nodes, 1)
require.NoError(t, err)
require.Len(t, reply.Nodes, 1)
opt.ServiceName = "bar"
reply = structs.IndexedCheckServiceNodes{}
err = msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply)
require.NoError(err)
require.Empty(reply.Nodes)
require.True(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.NoError(t, err)
require.Empty(t, reply.Nodes)
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
// We've already proven that we call the ACL filtering function so we
// test node filtering down in acl.go for node cases. This also proves
@ -1558,8 +1551,6 @@ func TestHealth_ChecksInState_FilterACL(t *testing.T) {
t.Parallel()
require := require.New(t)
dir, token, srv, codec := testACLFilterServer(t)
defer os.RemoveAll(dir)
defer srv.Shutdown()
@ -1572,7 +1563,7 @@ func TestHealth_ChecksInState_FilterACL(t *testing.T) {
}
reply := structs.IndexedHealthChecks{}
err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &opt, &reply)
require.NoError(err)
require.NoError(t, err)
found := false
for _, chk := range reply.HealthChecks {
@ -1583,8 +1574,8 @@ func TestHealth_ChecksInState_FilterACL(t *testing.T) {
t.Fatalf("bad service 'bar': %#v", reply.HealthChecks)
}
}
require.True(found, "missing service 'foo': %#v", reply.HealthChecks)
require.True(reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.True(t, found, "missing service 'foo': %#v", reply.HealthChecks)
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
// We've already proven that we call the ACL filtering function so we
// test node filtering down in acl.go for node cases. This also proves

View File

@ -111,7 +111,6 @@ func TestIntentionApply_defaultSourceType(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -135,8 +134,8 @@ func TestIntentionApply_defaultSourceType(t *testing.T) {
var reply string
// Create
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.NotEmpty(reply)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.NotEmpty(t, reply)
// Read
ixn.Intention.ID = reply
@ -146,10 +145,10 @@ func TestIntentionApply_defaultSourceType(t *testing.T) {
IntentionID: ixn.Intention.ID,
}
var resp structs.IndexedIntentions
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp))
require.Len(resp.Intentions, 1)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp))
require.Len(t, resp.Intentions, 1)
actual := resp.Intentions[0]
require.Equal(structs.IntentionSourceConsul, actual.SourceType)
require.Equal(t, structs.IntentionSourceConsul, actual.SourceType)
}
}
@ -161,7 +160,6 @@ func TestIntentionApply_createWithID(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -184,8 +182,8 @@ func TestIntentionApply_createWithID(t *testing.T) {
// Create
err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)
require.NotNil(err)
require.Contains(err, "ID must be empty")
require.NotNil(t, err)
require.Contains(t, err, "ID must be empty")
}
// Test basic updating
@ -282,7 +280,6 @@ func TestIntentionApply_updateNonExist(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -304,8 +301,8 @@ func TestIntentionApply_updateNonExist(t *testing.T) {
// Create
err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)
require.NotNil(err)
require.Contains(err, "Cannot modify non-existent intention")
require.NotNil(t, err)
require.Contains(t, err, "Cannot modify non-existent intention")
}
// Test basic deleting
@ -316,7 +313,6 @@ func TestIntentionApply_deleteGood(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -346,13 +342,13 @@ func TestIntentionApply_deleteGood(t *testing.T) {
}, &reply), "Cannot delete non-existent intention")
// Create
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.NotEmpty(reply)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.NotEmpty(t, reply)
// Delete
ixn.Op = structs.IntentionOpDelete
ixn.Intention.ID = reply
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
// Read
ixn.Intention.ID = reply
@ -363,8 +359,8 @@ func TestIntentionApply_deleteGood(t *testing.T) {
}
var resp structs.IndexedIntentions
err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp)
require.NotNil(err)
require.Contains(err, ErrIntentionNotFound.Error())
require.NotNil(t, err)
require.Contains(t, err, ErrIntentionNotFound.Error())
}
}
@ -863,7 +859,6 @@ func TestIntentionApply_aclDeny(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -895,11 +890,11 @@ service "foobar" {
// Create without a token should error since default deny
var reply string
err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)
require.True(acl.IsErrPermissionDenied(err))
require.True(t, acl.IsErrPermissionDenied(err))
// Now add the token and try again.
ixn.WriteRequest.Token = token
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
// Read
ixn.Intention.ID = reply
@ -910,10 +905,10 @@ service "foobar" {
QueryOptions: structs.QueryOptions{Token: "root"},
}
var resp structs.IndexedIntentions
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp))
require.Len(resp.Intentions, 1)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp))
require.Len(t, resp.Intentions, 1)
actual := resp.Intentions[0]
require.Equal(resp.Index, actual.ModifyIndex)
require.Equal(t, resp.Index, actual.ModifyIndex)
actual.CreateIndex, actual.ModifyIndex = 0, 0
actual.CreatedAt = ixn.Intention.CreatedAt
@ -921,7 +916,7 @@ service "foobar" {
actual.Hash = ixn.Intention.Hash
//nolint:staticcheck
ixn.Intention.UpdatePrecedence()
require.Equal(ixn.Intention, actual)
require.Equal(t, ixn.Intention, actual)
}
}
@ -937,17 +932,17 @@ func TestIntention_WildcardACLEnforcement(t *testing.T) {
// create some test policies.
writeToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `service_prefix "" { policy = "deny" intentions = "write" }`)
writeToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service_prefix "" { policy = "deny" intentions = "write" }`)
require.NoError(t, err)
readToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `service_prefix "" { policy = "deny" intentions = "read" }`)
readToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service_prefix "" { policy = "deny" intentions = "read" }`)
require.NoError(t, err)
exactToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `service "*" { policy = "deny" intentions = "write" }`)
exactToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service "*" { policy = "deny" intentions = "write" }`)
require.NoError(t, err)
wildcardPrefixToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `service_prefix "*" { policy = "deny" intentions = "write" }`)
wildcardPrefixToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service_prefix "*" { policy = "deny" intentions = "write" }`)
require.NoError(t, err)
fooToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `service "foo" { policy = "deny" intentions = "write" }`)
fooToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service "foo" { policy = "deny" intentions = "write" }`)
require.NoError(t, err)
denyToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `service_prefix "" { policy = "deny" intentions = "deny" }`)
denyToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service_prefix "" { policy = "deny" intentions = "deny" }`)
require.NoError(t, err)
doIntentionCreate := func(t *testing.T, token string, dest string, deny bool) string {
@ -1253,7 +1248,6 @@ func TestIntentionApply_aclDelete(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -1285,18 +1279,18 @@ service "foobar" {
// Create
var reply string
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
// Try to do a delete with no token; this should get rejected.
ixn.Op = structs.IntentionOpDelete
ixn.Intention.ID = reply
ixn.WriteRequest.Token = ""
err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)
require.True(acl.IsErrPermissionDenied(err))
require.True(t, acl.IsErrPermissionDenied(err))
// Try again with the original token. This should go through.
ixn.WriteRequest.Token = token
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
// Verify it is gone
{
@ -1306,8 +1300,8 @@ service "foobar" {
}
var resp structs.IndexedIntentions
err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp)
require.NotNil(err)
require.Contains(err.Error(), ErrIntentionNotFound.Error())
require.NotNil(t, err)
require.Contains(t, err.Error(), ErrIntentionNotFound.Error())
}
}
@ -1319,7 +1313,6 @@ func TestIntentionApply_aclUpdate(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -1351,18 +1344,18 @@ service "foobar" {
// Create
var reply string
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
// Try to do an update without a token; this should get rejected.
ixn.Op = structs.IntentionOpUpdate
ixn.Intention.ID = reply
ixn.WriteRequest.Token = ""
err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)
require.True(acl.IsErrPermissionDenied(err))
require.True(t, acl.IsErrPermissionDenied(err))
// Try again with the original token; this should go through.
ixn.WriteRequest.Token = token
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
}
// Test apply with a management token
@ -1373,7 +1366,6 @@ func TestIntentionApply_aclManagement(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -1398,16 +1390,16 @@ func TestIntentionApply_aclManagement(t *testing.T) {
// Create
var reply string
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
ixn.Intention.ID = reply
// Update
ixn.Op = structs.IntentionOpUpdate
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
// Delete
ixn.Op = structs.IntentionOpDelete
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
}
// Test update changing the name where an ACL won't allow it
@ -1418,7 +1410,6 @@ func TestIntentionApply_aclUpdateChange(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
@ -1450,7 +1441,7 @@ service "foobar" {
// Create
var reply string
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply))
// Try to do an update without a token; this should get rejected.
ixn.Op = structs.IntentionOpUpdate
@ -1458,7 +1449,7 @@ service "foobar" {
ixn.Intention.DestinationName = "foo"
ixn.WriteRequest.Token = token
err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)
require.True(acl.IsErrPermissionDenied(err))
require.True(t, acl.IsErrPermissionDenied(err))
}
// Test reading with ACLs
@ -1570,7 +1561,6 @@ func TestIntentionList(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -1585,9 +1575,9 @@ func TestIntentionList(t *testing.T) {
Datacenter: "dc1",
}
var resp structs.IndexedIntentions
require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp))
require.NotNil(resp.Intentions)
require.Len(resp.Intentions, 0)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp))
require.NotNil(t, resp.Intentions)
require.Len(t, resp.Intentions, 0)
}
}
@ -1599,7 +1589,7 @@ func TestIntentionList_acl(t *testing.T) {
t.Parallel()
dir1, s1 := testServerWithConfig(t, testServerACLConfig(nil))
dir1, s1 := testServerWithConfig(t, testServerACLConfig)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
@ -1607,7 +1597,7 @@ func TestIntentionList_acl(t *testing.T) {
waitForLeaderEstablishment(t, s1)
token, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `service_prefix "foo" { policy = "write" }`)
token, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service_prefix "foo" { policy = "write" }`)
require.NoError(t, err)
// Create a few records
@ -1620,7 +1610,7 @@ func TestIntentionList_acl(t *testing.T) {
ixn.Intention.SourceNS = "default"
ixn.Intention.DestinationNS = "default"
ixn.Intention.DestinationName = name
ixn.WriteRequest.Token = TestDefaultMasterToken
ixn.WriteRequest.Token = TestDefaultInitialManagementToken
// Create
var reply string
@ -1639,10 +1629,10 @@ func TestIntentionList_acl(t *testing.T) {
})
// Test with management token
t.Run("master-token", func(t *testing.T) {
t.Run("initial-management-token", func(t *testing.T) {
req := &structs.IntentionListRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Token: TestDefaultMasterToken},
QueryOptions: structs.QueryOptions{Token: TestDefaultInitialManagementToken},
}
var resp structs.IndexedIntentions
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp))
@ -1666,7 +1656,7 @@ func TestIntentionList_acl(t *testing.T) {
req := &structs.IntentionListRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{
Token: TestDefaultMasterToken,
Token: TestDefaultInitialManagementToken,
Filter: "DestinationName == foobar",
},
}
@ -1763,7 +1753,7 @@ func TestIntentionMatch_acl(t *testing.T) {
_, srv, codec := testACLServerWithConfig(t, nil, false)
waitForLeaderEstablishment(t, srv)
token, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `service "bar" { policy = "write" }`)
token, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service "bar" { policy = "write" }`)
require.NoError(t, err)
// Create some records
@ -1781,7 +1771,7 @@ func TestIntentionMatch_acl(t *testing.T) {
Intention: structs.TestIntention(t),
}
ixn.Intention.DestinationName = v
ixn.WriteRequest.Token = TestDefaultMasterToken
ixn.WriteRequest.Token = TestDefaultInitialManagementToken
// Create
var reply string
@ -1993,7 +1983,7 @@ func TestIntentionCheck_match(t *testing.T) {
_, srv, codec := testACLServerWithConfig(t, nil, false)
waitForLeaderEstablishment(t, srv)
token, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `service "api" { policy = "read" }`)
token, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service "api" { policy = "read" }`)
require.NoError(t, err)
// Create some intentions
@ -2015,7 +2005,7 @@ func TestIntentionCheck_match(t *testing.T) {
DestinationName: v[1],
Action: structs.IntentionActionAllow,
},
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
}
// Create
var reply string

View File

@ -853,7 +853,6 @@ func TestInternal_ServiceDump_ACL(t *testing.T) {
}
t.Run("can read all", func(t *testing.T) {
require := require.New(t)
token := tokenWithRules(t, `
node_prefix "" {
@ -870,14 +869,13 @@ func TestInternal_ServiceDump_ACL(t *testing.T) {
}
var out structs.IndexedNodesWithGateways
err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out)
require.NoError(err)
require.NotEmpty(out.Nodes)
require.NotEmpty(out.Gateways)
require.False(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
require.NoError(t, err)
require.NotEmpty(t, out.Nodes)
require.NotEmpty(t, out.Gateways)
require.False(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
})
t.Run("cannot read service node", func(t *testing.T) {
require := require.New(t)
token := tokenWithRules(t, `
node "node1" {
@ -894,13 +892,12 @@ func TestInternal_ServiceDump_ACL(t *testing.T) {
}
var out structs.IndexedNodesWithGateways
err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out)
require.NoError(err)
require.Empty(out.Nodes)
require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.NoError(t, err)
require.Empty(t, out.Nodes)
require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
})
t.Run("cannot read service", func(t *testing.T) {
require := require.New(t)
token := tokenWithRules(t, `
node "node1" {
@ -917,13 +914,12 @@ func TestInternal_ServiceDump_ACL(t *testing.T) {
}
var out structs.IndexedNodesWithGateways
err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out)
require.NoError(err)
require.Empty(out.Nodes)
require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.NoError(t, err)
require.Empty(t, out.Nodes)
require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
})
t.Run("cannot read gateway node", func(t *testing.T) {
require := require.New(t)
token := tokenWithRules(t, `
node "node2" {
@ -940,13 +936,12 @@ func TestInternal_ServiceDump_ACL(t *testing.T) {
}
var out structs.IndexedNodesWithGateways
err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out)
require.NoError(err)
require.Empty(out.Gateways)
require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.NoError(t, err)
require.Empty(t, out.Gateways)
require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
})
t.Run("cannot read gateway", func(t *testing.T) {
require := require.New(t)
token := tokenWithRules(t, `
node "node2" {
@ -963,9 +958,9 @@ func TestInternal_ServiceDump_ACL(t *testing.T) {
}
var out structs.IndexedNodesWithGateways
err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out)
require.NoError(err)
require.Empty(out.Gateways)
require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.NoError(t, err)
require.Empty(t, out.Gateways)
require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
})
}
@ -1784,13 +1779,13 @@ func TestInternal_GatewayIntentions_aclDeny(t *testing.T) {
t.Skip("too slow for testing.Short")
}
dir1, s1 := testServerWithConfig(t, testServerACLConfig(nil))
dir1, s1 := testServerWithConfig(t, testServerACLConfig)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken))
testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
// Register terminating gateway and config entry linking it to postgres + redis
{
@ -1809,7 +1804,7 @@ func TestInternal_GatewayIntentions_aclDeny(t *testing.T) {
Status: api.HealthPassing,
ServiceID: "terminating-gateway",
},
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
}
var regOutput struct{}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &regOutput))
@ -1834,7 +1829,7 @@ func TestInternal_GatewayIntentions_aclDeny(t *testing.T) {
Op: structs.ConfigEntryUpsert,
Datacenter: "dc1",
Entry: args,
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
}
var configOutput bool
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &configOutput))
@ -1848,7 +1843,7 @@ func TestInternal_GatewayIntentions_aclDeny(t *testing.T) {
Datacenter: "dc1",
Op: structs.IntentionOpCreate,
Intention: structs.TestIntention(t),
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
}
req.Intention.SourceName = "api"
req.Intention.DestinationName = v
@ -1860,7 +1855,7 @@ func TestInternal_GatewayIntentions_aclDeny(t *testing.T) {
Datacenter: "dc1",
Op: structs.IntentionOpCreate,
Intention: structs.TestIntention(t),
WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken},
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
}
req.Intention.SourceName = v
req.Intention.DestinationName = "api"
@ -1868,7 +1863,7 @@ func TestInternal_GatewayIntentions_aclDeny(t *testing.T) {
}
}
userToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `
userToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `
service_prefix "redis" { policy = "read" }
service_prefix "terminating-gateway" { policy = "read" }
`)
@ -2192,7 +2187,7 @@ func TestInternal_ServiceTopology_ACL(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
c.ACLInitialManagementToken = TestDefaultMasterToken
c.ACLInitialManagementToken = TestDefaultInitialManagementToken
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
})
defer os.RemoveAll(dir1)
@ -2215,10 +2210,10 @@ func TestInternal_ServiceTopology_ACL(t *testing.T) {
// web -> redis exact intention
// redis and redis-proxy on node zip
registerTestTopologyEntries(t, codec, TestDefaultMasterToken)
registerTestTopologyEntries(t, codec, TestDefaultInitialManagementToken)
// Token grants read to: foo/api, foo/api-proxy, bar/web, baz/web
userToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `
userToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `
node_prefix "" { policy = "read" }
service_prefix "api" { policy = "read" }
service "web" { policy = "read" }
@ -2331,7 +2326,7 @@ func TestInternal_IntentionUpstreams_ACL(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
c.ACLInitialManagementToken = TestDefaultMasterToken
c.ACLInitialManagementToken = TestDefaultInitialManagementToken
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
})
defer os.RemoveAll(dir1)
@ -2349,11 +2344,11 @@ func TestInternal_IntentionUpstreams_ACL(t *testing.T) {
// Intentions
// * -> * (deny) intention
// web -> api (allow)
registerIntentionUpstreamEntries(t, codec, TestDefaultMasterToken)
registerIntentionUpstreamEntries(t, codec, TestDefaultInitialManagementToken)
t.Run("valid token", func(t *testing.T) {
// Token grants read to read api service
userToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `
userToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `
service_prefix "api" { policy = "read" }
`)
require.NoError(t, err)
@ -2379,7 +2374,7 @@ service_prefix "api" { policy = "read" }
t.Run("invalid token filters results", func(t *testing.T) {
// Token grants read to read an unrelated service, mongo
userToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `
userToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `
service_prefix "mongo" { policy = "read" }
`)
require.NoError(t, err)

View File

@ -31,6 +31,10 @@ var (
// assignment to be enabled.
minVirtualIPVersion = version.Must(version.NewVersion("1.11.0"))
// minVirtualIPVersion is the minimum version for all Consul servers for virtual IP
// assignment to be enabled for terminating gateways.
minVirtualIPTerminatingGatewayVersion = version.Must(version.NewVersion("1.11.2"))
// virtualIPVersionCheckInterval is the frequency we check whether all servers meet
// the minimum version to enable virtual IP assignment for services.
virtualIPVersionCheckInterval = time.Minute
@ -125,7 +129,7 @@ func (s *Server) pruneCARoots() error {
func (s *Server) runVirtualIPVersionCheck(ctx context.Context) error {
// Return early if the flag is already set.
done, err := s.setVirtualIPVersionFlag()
done, err := s.setVirtualIPFlags()
if err != nil {
s.loggers.Named(logging.Connect).Warn("error enabling virtual IPs", "error", err)
}
@ -142,7 +146,7 @@ func (s *Server) runVirtualIPVersionCheck(ctx context.Context) error {
case <-ctx.Done():
return nil
case <-ticker.C:
done, err := s.setVirtualIPVersionFlag()
done, err := s.setVirtualIPFlags()
if err != nil {
s.loggers.Named(logging.Connect).Warn("error enabling virtual IPs", "error", err)
continue
@ -154,6 +158,19 @@ func (s *Server) runVirtualIPVersionCheck(ctx context.Context) error {
}
}
func (s *Server) setVirtualIPFlags() (bool, error) {
virtualIPFlag, err := s.setVirtualIPVersionFlag()
if err != nil {
return false, err
}
terminatingGatewayVirtualIPFlag, err := s.setVirtualIPTerminatingGatewayVersionFlag()
if err != nil {
return false, err
}
return virtualIPFlag && terminatingGatewayVirtualIPFlag, nil
}
func (s *Server) setVirtualIPVersionFlag() (bool, error) {
val, err := s.getSystemMetadata(structs.SystemMetadataVirtualIPsEnabled)
if err != nil {
@ -175,6 +192,27 @@ func (s *Server) setVirtualIPVersionFlag() (bool, error) {
return true, nil
}
func (s *Server) setVirtualIPTerminatingGatewayVersionFlag() (bool, error) {
val, err := s.getSystemMetadata(structs.SystemMetadataTermGatewayVirtualIPsEnabled)
if err != nil {
return false, err
}
if val != "" {
return true, nil
}
if ok, _ := ServersInDCMeetMinimumVersion(s, s.config.Datacenter, minVirtualIPTerminatingGatewayVersion); !ok {
return false, fmt.Errorf("can't allocate Virtual IPs for terminating gateways until all servers >= %s",
minVirtualIPTerminatingGatewayVersion.String())
}
if err := s.setSystemMetadataKey(structs.SystemMetadataTermGatewayVirtualIPsEnabled, "true"); err != nil {
return false, nil
}
return true, nil
}
// retryLoopBackoff loops a given function indefinitely, backing off exponentially
// upon errors up to a maximum of maxRetryBackoff seconds.
func retryLoopBackoff(ctx context.Context, loopFn func() error, errFn func(error)) {

View File

@ -1463,28 +1463,22 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
connect.HackSANExtensionForCSR(csr)
root, err := provider.ActiveRoot()
if err != nil {
return nil, err
}
// Check if the root expired before using it to sign.
err = c.checkExpired(root)
// TODO: we store NotBefore and NotAfter on this struct, so we could avoid
// parsing the cert here.
err = c.checkExpired(caRoot.RootCert)
if err != nil {
return nil, fmt.Errorf("root expired: %w", err)
}
inter, err := provider.ActiveIntermediate()
if err != nil {
return nil, err
}
// Check if the intermediate expired before using it to sign.
err = c.checkExpired(inter)
if err != nil {
return nil, fmt.Errorf("intermediate expired: %w", err)
if c.isIntermediateUsedToSignLeaf() && len(caRoot.IntermediateCerts) > 0 {
inter := caRoot.IntermediateCerts[len(caRoot.IntermediateCerts)-1]
if err := c.checkExpired(inter); err != nil {
return nil, fmt.Errorf("intermediate expired: %w", err)
}
}
// All seems to be in order, actually sign it.
pem, err := provider.Sign(csr)
if err == ca.ErrRateLimited {
return nil, ErrRateLimited
@ -1498,11 +1492,6 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
pem = pem + ca.EnsureTrailingNewline(p)
}
// Append our local CA's intermediate if there is one.
if inter != root {
pem = pem + ca.EnsureTrailingNewline(inter)
}
modIdx, err := c.delegate.ApplyCALeafRequest()
if err != nil {
return nil, err

View File

@ -12,6 +12,7 @@ import (
"fmt"
"math/big"
"net/rpc"
"net/url"
"testing"
"time"
@ -131,11 +132,12 @@ func verifyLeafCert(t *testing.T, root *structs.CARoot, leafCertPEM string) {
}
type mockCAServerDelegate struct {
t *testing.T
config *Config
store *state.Store
primaryRoot *structs.CARoot
callbackCh chan string
t *testing.T
config *Config
store *state.Store
primaryRoot *structs.CARoot
secondaryIntermediate string
callbackCh chan string
}
func NewMockCAServerDelegate(t *testing.T, config *Config) *mockCAServerDelegate {
@ -198,7 +200,7 @@ func (m *mockCAServerDelegate) forwardDC(method, dc string, args interface{}, re
roots.ActiveRootID = m.primaryRoot.ID
case "ConnectCA.SignIntermediate":
r := reply.(*string)
*r = m.primaryRoot.RootCert
*r = m.secondaryIntermediate
default:
return fmt.Errorf("received call to unsupported method %q", method)
}
@ -305,13 +307,14 @@ func initTestManager(t *testing.T, manager *CAManager, delegate *mockCAServerDel
}
func TestCAManager_Initialize(t *testing.T) {
conf := DefaultConfig()
conf.ConnectEnabled = true
conf.PrimaryDatacenter = "dc1"
conf.Datacenter = "dc2"
delegate := NewMockCAServerDelegate(t, conf)
delegate.secondaryIntermediate = delegate.primaryRoot.RootCert
manager := NewCAManager(delegate, nil, testutil.Logger(t), conf)
manager.providerShim = &mockCAProvider{
callbackCh: delegate.callbackCh,
rootPEM: delegate.primaryRoot.RootCert,
@ -356,6 +359,7 @@ func TestCAManager_UpdateConfigWhileRenewIntermediate(t *testing.T) {
conf.PrimaryDatacenter = "dc1"
conf.Datacenter = "dc2"
delegate := NewMockCAServerDelegate(t, conf)
delegate.secondaryIntermediate = delegate.primaryRoot.RootCert
manager := NewCAManager(delegate, nil, testutil.Logger(t), conf)
manager.providerShim = &mockCAProvider{
callbackCh: delegate.callbackCh,
@ -400,7 +404,7 @@ func TestCAManager_UpdateConfigWhileRenewIntermediate(t *testing.T) {
require.EqualValues(t, caStateInitialized, manager.state)
}
func TestCAManager_SignLeafWithExpiredCert(t *testing.T) {
func TestCAManager_SignCertificate_WithExpiredCert(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
@ -422,8 +426,10 @@ func TestCAManager_SignLeafWithExpiredCert(t *testing.T) {
{"root in the future", time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
}
for _, arg := range args {
caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
require.NoError(t, err, "failed to generate key")
for _, arg := range args {
t.Run(arg.testName, func(t *testing.T) {
// No parallel execution because we change globals
// Set the interval and drift buffer low for renewing the cert.
@ -440,13 +446,15 @@ func TestCAManager_SignLeafWithExpiredCert(t *testing.T) {
conf.ConnectEnabled = true
conf.PrimaryDatacenter = "dc1"
conf.Datacenter = "dc2"
rootPEM := generateCertPEM(t, caPrivKey, arg.notBeforeRoot, arg.notAfterRoot)
intermediatePEM := generateCertPEM(t, caPrivKey, arg.notBeforeIntermediate, arg.notAfterIntermediate)
delegate := NewMockCAServerDelegate(t, conf)
delegate.primaryRoot.RootCert = rootPEM
delegate.secondaryIntermediate = intermediatePEM
manager := NewCAManager(delegate, nil, testutil.Logger(t), conf)
err, rootPEM := generatePem(arg.notBeforeRoot, arg.notAfterRoot)
require.NoError(t, err)
err, intermediatePEM := generatePem(arg.notBeforeIntermediate, arg.notAfterIntermediate)
require.NoError(t, err)
manager.providerShim = &mockCAProvider{
callbackCh: delegate.callbackCh,
rootPEM: rootPEM,
@ -456,14 +464,13 @@ func TestCAManager_SignLeafWithExpiredCert(t *testing.T) {
// Simulate Wait half the TTL for the cert to need renewing.
manager.timeNow = func() time.Time {
return time.Now().Add(500 * time.Millisecond)
return time.Now().UTC().Add(500 * time.Millisecond)
}
// Call RenewIntermediate and then confirm the RPCs and provider calls
// happen in the expected order.
_, err = manager.SignCertificate(&x509.CertificateRequest{}, &connect.SpiffeIDAgent{})
_, err := manager.SignCertificate(&x509.CertificateRequest{}, &connect.SpiffeIDAgent{})
if arg.isError {
require.Error(t, err)
require.Contains(t, err.Error(), arg.errorMsg)
@ -474,7 +481,8 @@ func TestCAManager_SignLeafWithExpiredCert(t *testing.T) {
}
}
func generatePem(notBefore time.Time, notAfter time.Time) (error, string) {
func generateCertPEM(t *testing.T, caPrivKey *rsa.PrivateKey, notBefore time.Time, notAfter time.Time) string {
t.Helper()
ca := &x509.Certificate{
SerialNumber: big.NewInt(2019),
Subject: pkix.Name{
@ -491,27 +499,19 @@ func generatePem(notBefore time.Time, notAfter time.Time) (error, string) {
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
URIs: []*url.URL{connect.SpiffeIDAgent{Host: "foo"}.URI()},
}
caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return err, ""
}
caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey)
if err != nil {
return err, ""
}
require.NoError(t, err, "failed to create cert")
caPEM := new(bytes.Buffer)
pem.Encode(caPEM, &pem.Block{
err = pem.Encode(caPEM, &pem.Block{
Type: "CERTIFICATE",
Bytes: caBytes,
})
caPrivKeyPEM := new(bytes.Buffer)
pem.Encode(caPrivKeyPEM, &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(caPrivKey),
})
return err, caPEM.String()
require.NoError(t, err, "failed to encode")
return caPEM.String()
}
func TestCADelegateWithState_GenerateCASignRequest(t *testing.T) {

View File

@ -196,7 +196,7 @@ func TestCAManager_Initialize_Secondary(t *testing.T) {
for _, tc := range tests {
tc := tc
t.Run(fmt.Sprintf("%s-%d", tc.keyType, tc.keyBits), func(t *testing.T) {
masterToken := "8a85f086-dd95-4178-b128-e10902767c5c"
initialManagementToken := "8a85f086-dd95-4178-b128-e10902767c5c"
// Initialize primary as the primary DC
dir1, s1 := testServerWithConfig(t, func(c *Config) {
@ -204,7 +204,7 @@ func TestCAManager_Initialize_Secondary(t *testing.T) {
c.PrimaryDatacenter = "primary"
c.Build = "1.6.0"
c.ACLsEnabled = true
c.ACLInitialManagementToken = masterToken
c.ACLInitialManagementToken = initialManagementToken
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
c.CAConfig.Config["PrivateKeyType"] = tc.keyType
c.CAConfig.Config["PrivateKeyBits"] = tc.keyBits
@ -213,7 +213,7 @@ func TestCAManager_Initialize_Secondary(t *testing.T) {
defer os.RemoveAll(dir1)
defer s1.Shutdown()
s1.tokens.UpdateAgentToken(masterToken, token.TokenSourceConfig)
s1.tokens.UpdateAgentToken(initialManagementToken, token.TokenSourceConfig)
testrpc.WaitForLeader(t, s1.RPC, "primary")
@ -232,8 +232,8 @@ func TestCAManager_Initialize_Secondary(t *testing.T) {
defer os.RemoveAll(dir2)
defer s2.Shutdown()
s2.tokens.UpdateAgentToken(masterToken, token.TokenSourceConfig)
s2.tokens.UpdateReplicationToken(masterToken, token.TokenSourceConfig)
s2.tokens.UpdateAgentToken(initialManagementToken, token.TokenSourceConfig)
s2.tokens.UpdateReplicationToken(initialManagementToken, token.TokenSourceConfig)
// Create the WAN link
joinWAN(t, s2, s1)
@ -327,7 +327,6 @@ func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) {
// no parallel execution because we change globals
patchIntermediateCertRenewInterval(t)
require := require.New(t)
testVault := ca.NewTestVaultServer(t)
@ -354,15 +353,15 @@ func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) {
store := s1.caManager.delegate.State()
_, activeRoot, err := store.CARootActive(nil)
require.NoError(err)
require.NoError(t, err)
t.Log("original SigningKeyID", activeRoot.SigningKeyID)
intermediatePEM := s1.caManager.getLeafSigningCertFromRoot(activeRoot)
intermediateCert, err := connect.ParseCert(intermediatePEM)
require.NoError(err)
require.NoError(t, err)
require.Equal(connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID)
require.Equal(intermediatePEM, s1.caManager.getLeafSigningCertFromRoot(activeRoot))
require.Equal(t, connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID)
require.Equal(t, intermediatePEM, s1.caManager.getLeafSigningCertFromRoot(activeRoot))
// Wait for dc1's intermediate to be refreshed.
retry.Run(t, func(r *retry.R) {
@ -382,12 +381,12 @@ func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) {
codec := rpcClient(t, s1)
roots := structs.IndexedCARoots{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(err)
require.Len(roots.Roots, 1)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
activeRoot = roots.Active()
require.Equal(connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID)
require.Equal(intermediatePEM, s1.caManager.getLeafSigningCertFromRoot(activeRoot))
require.Equal(t, connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID)
require.Equal(t, intermediatePEM, s1.caManager.getLeafSigningCertFromRoot(activeRoot))
// Have the new intermediate sign a leaf cert and make sure the chain is correct.
spiffeService := &connect.SpiffeIDService{
@ -401,7 +400,7 @@ func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) {
req := structs.CASignRequest{CSR: csr}
cert := structs.IssuedCert{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(err)
require.NoError(t, err)
verifyLeafCert(t, activeRoot, cert.CertPEM)
}
@ -425,7 +424,6 @@ func TestCAManager_RenewIntermediate_Secondary(t *testing.T) {
// no parallel execution because we change globals
patchIntermediateCertRenewInterval(t)
require := require.New(t)
_, s1 := testServerWithConfig(t, func(c *Config) {
c.Build = "1.6.0"
@ -469,15 +467,15 @@ func TestCAManager_RenewIntermediate_Secondary(t *testing.T) {
store := s2.fsm.State()
_, activeRoot, err := store.CARootActive(nil)
require.NoError(err)
require.NoError(t, err)
t.Log("original SigningKeyID", activeRoot.SigningKeyID)
intermediatePEM := s2.caManager.getLeafSigningCertFromRoot(activeRoot)
intermediateCert, err := connect.ParseCert(intermediatePEM)
require.NoError(err)
require.NoError(t, err)
require.Equal(intermediatePEM, s2.caManager.getLeafSigningCertFromRoot(activeRoot))
require.Equal(connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID)
require.Equal(t, intermediatePEM, s2.caManager.getLeafSigningCertFromRoot(activeRoot))
require.Equal(t, connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID)
// Wait for dc2's intermediate to be refreshed.
retry.Run(t, func(r *retry.R) {
@ -497,13 +495,13 @@ func TestCAManager_RenewIntermediate_Secondary(t *testing.T) {
codec := rpcClient(t, s2)
roots := structs.IndexedCARoots{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(err)
require.Len(roots.Roots, 1)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
_, activeRoot, err = store.CARootActive(nil)
require.NoError(err)
require.Equal(connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID)
require.Equal(intermediatePEM, s2.caManager.getLeafSigningCertFromRoot(activeRoot))
require.NoError(t, err)
require.Equal(t, connect.HexString(intermediateCert.SubjectKeyId), activeRoot.SigningKeyID)
require.Equal(t, intermediatePEM, s2.caManager.getLeafSigningCertFromRoot(activeRoot))
// Have dc2 sign a leaf cert and make sure the chain is correct.
spiffeService := &connect.SpiffeIDService{
@ -517,7 +515,7 @@ func TestCAManager_RenewIntermediate_Secondary(t *testing.T) {
req := structs.CASignRequest{CSR: csr}
cert := structs.IssuedCert{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(err)
require.NoError(t, err)
verifyLeafCert(t, activeRoot, cert.CertPEM)
}
@ -528,8 +526,6 @@ func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Build = "1.6.0"
c.PrimaryDatacenter = "dc1"
@ -555,15 +551,15 @@ func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
// Get the original intermediate
secondaryProvider, _ := getCAProviderWithLock(s2)
oldIntermediatePEM, err := secondaryProvider.ActiveIntermediate()
require.NoError(err)
require.NotEmpty(oldIntermediatePEM)
require.NoError(t, err)
require.NotEmpty(t, oldIntermediatePEM)
// Capture the current root
var originalRoot *structs.CARoot
{
rootList, activeRoot, err := getTestRoots(s1, "dc1")
require.NoError(err)
require.Len(rootList.Roots, 1)
require.NoError(t, err)
require.Len(t, rootList.Roots, 1)
originalRoot = activeRoot
}
@ -574,7 +570,7 @@ func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
// Update the provider config to use a new private key, which should
// cause a rotation.
_, newKey, err := connect.GeneratePrivateKey()
require.NoError(err)
require.NoError(t, err)
newConfig := &structs.CAConfiguration{
Provider: "consul",
Config: map[string]interface{}{
@ -590,14 +586,14 @@ func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
}
var reply interface{}
require.NoError(s1.RPC("ConnectCA.ConfigurationSet", args, &reply))
require.NoError(t, s1.RPC("ConnectCA.ConfigurationSet", args, &reply))
}
var updatedRoot *structs.CARoot
{
rootList, activeRoot, err := getTestRoots(s1, "dc1")
require.NoError(err)
require.Len(rootList.Roots, 2)
require.NoError(t, err)
require.Len(t, rootList.Roots, 2)
updatedRoot = activeRoot
}
@ -613,17 +609,17 @@ func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
r.Fatal("not a new intermediate")
}
})
require.NoError(err)
require.NoError(t, err)
// Verify the root lists have been rotated in each DC's state store.
state1 := s1.fsm.State()
_, primaryRoot, err := state1.CARootActive(nil)
require.NoError(err)
require.NoError(t, err)
state2 := s2.fsm.State()
_, roots2, err := state2.CARoots(nil)
require.NoError(err)
require.Equal(2, len(roots2))
require.NoError(t, err)
require.Equal(t, 2, len(roots2))
newRoot := roots2[0]
oldRoot := roots2[1]
@ -631,10 +627,10 @@ func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
newRoot = roots2[1]
oldRoot = roots2[0]
}
require.False(oldRoot.Active)
require.True(newRoot.Active)
require.Equal(primaryRoot.ID, newRoot.ID)
require.Equal(primaryRoot.RootCert, newRoot.RootCert)
require.False(t, oldRoot.Active)
require.True(t, newRoot.Active)
require.Equal(t, primaryRoot.ID, newRoot.ID)
require.Equal(t, primaryRoot.RootCert, newRoot.RootCert)
// Get the new root from dc1 and validate a chain of:
// dc2 leaf -> dc2 intermediate -> dc1 root
@ -650,13 +646,13 @@ func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
raw, _ := connect.TestCSR(t, spiffeService)
leafCsr, err := connect.ParseCSR(raw)
require.NoError(err)
require.NoError(t, err)
leafPEM, err := secondaryProvider.Sign(leafCsr)
require.NoError(err)
require.NoError(t, err)
cert, err := connect.ParseCert(leafPEM)
require.NoError(err)
require.NoError(t, err)
// Check that the leaf signed by the new intermediate can be verified using the
// returned cert chain (signed intermediate + remote root).
@ -669,7 +665,7 @@ func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
Intermediates: intermediatePool,
Roots: rootPool,
})
require.NoError(err)
require.NoError(t, err)
}
func TestCAManager_Initialize_Vault_FixesSigningKeyID_Primary(t *testing.T) {
@ -1113,7 +1109,6 @@ func TestLeader_CARootPruning(t *testing.T) {
caRootPruneInterval = origPruneInterval
})
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -1127,14 +1122,14 @@ func TestLeader_CARootPruning(t *testing.T) {
Datacenter: "dc1",
}
var rootList structs.IndexedCARoots
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(rootList.Roots, 1)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(t, rootList.Roots, 1)
oldRoot := rootList.Roots[0]
// Update the provider config to use a new private key, which should
// cause a rotation.
_, newKey, err := connect.GeneratePrivateKey()
require.NoError(err)
require.NoError(t, err)
newConfig := &structs.CAConfiguration{
Provider: "consul",
Config: map[string]interface{}{
@ -1151,22 +1146,22 @@ func TestLeader_CARootPruning(t *testing.T) {
}
var reply interface{}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
}
// Should have 2 roots now.
_, roots, err := s1.fsm.State().CARoots(nil)
require.NoError(err)
require.Len(roots, 2)
require.NoError(t, err)
require.Len(t, roots, 2)
time.Sleep(2 * time.Second)
// Now the old root should be pruned.
_, roots, err = s1.fsm.State().CARoots(nil)
require.NoError(err)
require.Len(roots, 1)
require.True(roots[0].Active)
require.NotEqual(roots[0].ID, oldRoot.ID)
require.NoError(t, err)
require.Len(t, roots, 1)
require.True(t, roots[0].Active)
require.NotEqual(t, roots[0].ID, oldRoot.ID)
}
func TestConnectCA_ConfigurationSet_PersistsRoots(t *testing.T) {
@ -1176,7 +1171,6 @@ func TestConnectCA_ConfigurationSet_PersistsRoots(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -1201,13 +1195,13 @@ func TestConnectCA_ConfigurationSet_PersistsRoots(t *testing.T) {
Datacenter: "dc1",
}
var rootList structs.IndexedCARoots
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(rootList.Roots, 1)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(t, rootList.Roots, 1)
// Update the provider config to use a new private key, which should
// cause a rotation.
_, newKey, err := connect.GeneratePrivateKey()
require.NoError(err)
require.NoError(t, err)
newConfig := &structs.CAConfiguration{
Provider: "consul",
Config: map[string]interface{}{
@ -1222,12 +1216,12 @@ func TestConnectCA_ConfigurationSet_PersistsRoots(t *testing.T) {
}
var reply interface{}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
}
// Get the active root before leader change.
_, root := getCAProviderWithLock(s1)
require.Len(root.IntermediateCerts, 1)
require.Len(t, root.IntermediateCerts, 1)
// Force a leader change and make sure the root CA values are preserved.
s1.Leave()
@ -1310,17 +1304,16 @@ func TestParseCARoot(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
require := require.New(t)
root, err := parseCARoot(tt.pem, "consul", "cluster")
if tt.wantErr {
require.Error(err)
require.Error(t, err)
return
}
require.NoError(err)
require.Equal(tt.wantSerial, root.SerialNumber)
require.Equal(strings.ToLower(tt.wantSigningKeyID), root.SigningKeyID)
require.Equal(tt.wantKeyType, root.PrivateKeyType)
require.Equal(tt.wantKeyBits, root.PrivateKeyBits)
require.NoError(t, err)
require.Equal(t, tt.wantSerial, root.SerialNumber)
require.Equal(t, strings.ToLower(tt.wantSigningKeyID), root.SigningKeyID)
require.Equal(t, tt.wantKeyType, root.PrivateKeyType)
require.Equal(t, tt.wantKeyBits, root.PrivateKeyBits)
})
}
}
@ -1491,7 +1484,6 @@ func TestCAManager_Initialize_BadCAConfigDoesNotPreventLeaderEstablishment(t *te
}
func TestConnectCA_ConfigurationSet_ForceWithoutCrossSigning(t *testing.T) {
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -1505,14 +1497,14 @@ func TestConnectCA_ConfigurationSet_ForceWithoutCrossSigning(t *testing.T) {
Datacenter: "dc1",
}
var rootList structs.IndexedCARoots
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(rootList.Roots, 1)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(t, rootList.Roots, 1)
oldRoot := rootList.Roots[0]
// Update the provider config to use a new private key, which should
// cause a rotation.
_, newKey, err := connect.GeneratePrivateKey()
require.NoError(err)
require.NoError(t, err)
newConfig := &structs.CAConfiguration{
Provider: "consul",
Config: map[string]interface{}{
@ -1530,18 +1522,18 @@ func TestConnectCA_ConfigurationSet_ForceWithoutCrossSigning(t *testing.T) {
}
var reply interface{}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
}
// Old root should no longer be active.
_, roots, err := s1.fsm.State().CARoots(nil)
require.NoError(err)
require.Len(roots, 2)
require.NoError(t, err)
require.Len(t, roots, 2)
for _, r := range roots {
if r.ID == oldRoot.ID {
require.False(r.Active)
require.False(t, r.Active)
} else {
require.True(r.Active)
require.True(t, r.Active)
}
}
}
@ -1549,7 +1541,6 @@ func TestConnectCA_ConfigurationSet_ForceWithoutCrossSigning(t *testing.T) {
func TestConnectCA_ConfigurationSet_Vault_ForceWithoutCrossSigning(t *testing.T) {
ca.SkipIfVaultNotPresent(t)
require := require.New(t)
testVault := ca.NewTestVaultServer(t)
defer testVault.Stop()
@ -1577,8 +1568,8 @@ func TestConnectCA_ConfigurationSet_Vault_ForceWithoutCrossSigning(t *testing.T)
Datacenter: "dc1",
}
var rootList structs.IndexedCARoots
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(rootList.Roots, 1)
require.Nil(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(t, rootList.Roots, 1)
oldRoot := rootList.Roots[0]
// Update the provider config to use a new PKI path, which should
@ -1600,18 +1591,18 @@ func TestConnectCA_ConfigurationSet_Vault_ForceWithoutCrossSigning(t *testing.T)
}
var reply interface{}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
}
// Old root should no longer be active.
_, roots, err := s1.fsm.State().CARoots(nil)
require.NoError(err)
require.Len(roots, 2)
require.NoError(t, err)
require.Len(t, roots, 2)
for _, r := range roots {
if r.ID == oldRoot.ID {
require.False(r.Active)
require.False(t, r.Active)
} else {
require.True(r.Active)
require.True(t, r.Active)
}
}
}

View File

@ -217,7 +217,6 @@ func TestLeader_ReplicateIntentions(t *testing.T) {
func TestLeader_batchLegacyIntentionUpdates(t *testing.T) {
t.Parallel()
assert := assert.New(t)
ixn1 := structs.TestIntention(t)
ixn1.ID = "ixn1"
ixn2 := structs.TestIntention(t)
@ -356,7 +355,7 @@ func TestLeader_batchLegacyIntentionUpdates(t *testing.T) {
for _, tc := range cases {
actual := batchLegacyIntentionUpdates(tc.deletes, tc.updates)
assert.Equal(tc.expected, actual)
assert.Equal(t, tc.expected, actual)
}
}

View File

@ -13,7 +13,6 @@ import (
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/connect/ca"
"github.com/hashicorp/consul/logging"
)
@ -55,27 +54,14 @@ func getRootCAExpiry(s *Server) (time.Duration, error) {
}
func signingCAExpiryMonitor(s *Server) CertExpirationMonitor {
isPrimary := s.config.Datacenter == s.config.PrimaryDatacenter
if isPrimary {
return CertExpirationMonitor{
Key: metricsKeyMeshActiveSigningCAExpiry,
Logger: s.logger.Named(logging.Connect),
Query: func() (time.Duration, error) {
provider, _ := s.caManager.getCAProvider()
if _, ok := provider.(ca.PrimaryUsesIntermediate); ok {
return getActiveIntermediateExpiry(s)
}
return getRootCAExpiry(s)
},
}
}
return CertExpirationMonitor{
Key: metricsKeyMeshActiveSigningCAExpiry,
Logger: s.logger.Named(logging.Connect),
Query: func() (time.Duration, error) {
return getActiveIntermediateExpiry(s)
if s.caManager.isIntermediateUsedToSignLeaf() {
return getActiveIntermediateExpiry(s)
}
return getRootCAExpiry(s)
},
}
}

View File

@ -1162,15 +1162,15 @@ func TestLeader_ACL_Initialization(t *testing.T) {
t.Parallel()
tests := []struct {
name string
build string
master string
bootstrap bool
name string
build string
initialManagement string
bootstrap bool
}{
{"old version, no master", "0.8.0", "", true},
{"old version, master", "0.8.0", "root", false},
{"new version, no master", "0.9.1", "", true},
{"new version, master", "0.9.1", "root", false},
{"old version, no initial management", "0.8.0", "", true},
{"old version, initial management", "0.8.0", "root", false},
{"new version, no initial management", "0.9.1", "", true},
{"new version, initial management", "0.9.1", "root", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@ -1180,17 +1180,17 @@ func TestLeader_ACL_Initialization(t *testing.T) {
c.Datacenter = "dc1"
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
c.ACLInitialManagementToken = tt.master
c.ACLInitialManagementToken = tt.initialManagement
}
dir1, s1 := testServerWithConfig(t, conf)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
if tt.master != "" {
_, master, err := s1.fsm.State().ACLTokenGetBySecret(nil, tt.master, nil)
if tt.initialManagement != "" {
_, initialManagement, err := s1.fsm.State().ACLTokenGetBySecret(nil, tt.initialManagement, nil)
require.NoError(t, err)
require.NotNil(t, master)
require.NotNil(t, initialManagement)
}
_, anon, err := s1.fsm.State().ACLTokenGetBySecret(nil, anonymousToken, nil)
@ -2134,7 +2134,7 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
c.Bootstrap = false
c.BootstrapExpect = 3
c.Datacenter = "dc1"
c.Build = "1.11.0"
c.Build = "1.11.2"
}
dir1, s1 := testServerWithConfig(t, conf)
defer os.RemoveAll(dir1)
@ -2163,6 +2163,10 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
_, entry, err := state.SystemMetadataGet(nil, structs.SystemMetadataVirtualIPsEnabled)
require.NoError(t, err)
require.Nil(t, entry)
state = s1.fsm.State()
_, entry, err = state.SystemMetadataGet(nil, structs.SystemMetadataTermGatewayVirtualIPsEnabled)
require.NoError(t, err)
require.Nil(t, entry)
// Register a connect-native service and make sure we don't have a virtual IP yet.
err = state.EnsureRegistration(10, &structs.RegisterRequest{
@ -2181,6 +2185,35 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
require.NoError(t, err)
require.Equal(t, "", vip)
// Register a terminating gateway.
err = state.EnsureRegistration(11, &structs.RegisterRequest{
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
Service: "tgate1",
ID: "tgate1",
Kind: structs.ServiceKindTerminatingGateway,
},
})
require.NoError(t, err)
err = state.EnsureConfigEntry(12, &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "bar",
},
},
})
require.NoError(t, err)
// Make sure the service referenced in the terminating gateway config doesn't have
// a virtual IP yet.
vip, err = state.VirtualIPForService(structs.NewServiceName("bar", nil))
require.NoError(t, err)
require.Equal(t, "", vip)
// Leave s3 and wait for the version to get updated.
require.NoError(t, s3.Leave())
retry.Run(t, func(r *retry.R) {
@ -2188,6 +2221,10 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
require.NoError(r, err)
require.NotNil(r, entry)
require.Equal(r, "true", entry.Value)
_, entry, err = state.SystemMetadataGet(nil, structs.SystemMetadataTermGatewayVirtualIPsEnabled)
require.NoError(r, err)
require.NotNil(r, entry)
require.Equal(r, "true", entry.Value)
})
// Update the connect-native service - now there should be a virtual IP assigned.
@ -2206,6 +2243,34 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
vip, err = state.VirtualIPForService(structs.NewServiceName("api", nil))
require.NoError(t, err)
require.Equal(t, "240.0.0.1", vip)
// Update the terminating gateway config entry - now there should be a virtual IP assigned.
err = state.EnsureConfigEntry(21, &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "api",
},
{
Name: "baz",
},
},
})
require.NoError(t, err)
_, node, err := state.NodeService("bar", "tgate1", nil)
require.NoError(t, err)
sn := structs.ServiceName{Name: "api"}
key := structs.ServiceGatewayVirtualIPTag(sn)
require.Contains(t, node.TaggedAddresses, key)
require.Equal(t, node.TaggedAddresses[key].Address, "240.0.0.1")
// Make sure the baz service (only referenced in the config entry so far)
// has a virtual IP.
vip, err = state.VirtualIPForService(structs.NewServiceName("baz", nil))
require.NoError(t, err)
require.Equal(t, "240.0.0.2", vip)
}
func TestLeader_ACL_Initialization_AnonymousToken(t *testing.T) {

View File

@ -3,22 +3,21 @@ package consul
import (
"testing"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/sdk/testutil"
)
func TestLoggerStore_Named(t *testing.T) {
t.Parallel()
require := require.New(t)
logger := testutil.Logger(t)
store := newLoggerStore(logger)
require.NotNil(store)
require.NotNil(t, store)
l1 := store.Named("test1")
l2 := store.Named("test2")
require.Truef(
l1 != l2,
require.Truef(t, l1 != l2,
"expected %p and %p to have a different memory address",
l1,
l2,
@ -27,16 +26,14 @@ func TestLoggerStore_Named(t *testing.T) {
func TestLoggerStore_NamedCache(t *testing.T) {
t.Parallel()
require := require.New(t)
logger := testutil.Logger(t)
store := newLoggerStore(logger)
require.NotNil(store)
require.NotNil(t, store)
l1 := store.Named("test")
l2 := store.Named("test")
require.Truef(
l1 == l2,
require.Truef(t, l1 == l2,
"expected %p and %p to have the same memory address",
l1,
l2,

View File

@ -222,7 +222,7 @@ func TestPreparedQuery_Apply_ACLDeny(t *testing.T) {
Datacenter: "dc1",
Op: structs.PreparedQueryCreate,
Query: &structs.PreparedQuery{
Name: "redis-master",
Name: "redis-primary",
Service: structs.ServiceQuery{
Service: "the-redis",
},
@ -503,7 +503,7 @@ func TestPreparedQuery_Apply_ForwardLeader(t *testing.T) {
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "redis",
Tags: []string{"master"},
Tags: []string{"primary"},
Port: 8000,
},
}
@ -853,7 +853,7 @@ func TestPreparedQuery_Get(t *testing.T) {
Datacenter: "dc1",
Op: structs.PreparedQueryCreate,
Query: &structs.PreparedQuery{
Name: "redis-master",
Name: "redis-primary",
Service: structs.ServiceQuery{
Service: "the-redis",
},
@ -1110,7 +1110,7 @@ func TestPreparedQuery_List(t *testing.T) {
Datacenter: "dc1",
Op: structs.PreparedQueryCreate,
Query: &structs.PreparedQuery{
Name: "redis-master",
Name: "redis-primary",
Token: "le-token",
Service: structs.ServiceQuery{
Service: "the-redis",
@ -2348,7 +2348,7 @@ func TestPreparedQuery_Execute_ForwardLeader(t *testing.T) {
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "redis",
Tags: []string{"master"},
Tags: []string{"primary"},
Port: 8000,
},
}
@ -2448,7 +2448,6 @@ func TestPreparedQuery_Execute_ConnectExact(t *testing.T) {
t.Parallel()
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
@ -2484,7 +2483,7 @@ func TestPreparedQuery_Execute_ConnectExact(t *testing.T) {
}
var reply struct{}
require.NoError(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply))
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply))
}
// The query, start with connect disabled
@ -2501,7 +2500,7 @@ func TestPreparedQuery_Execute_ConnectExact(t *testing.T) {
},
},
}
require.NoError(msgpackrpc.CallWithCodec(
require.NoError(t, msgpackrpc.CallWithCodec(
codec, "PreparedQuery.Apply", &query, &query.Query.ID))
// In the future we'll run updates
@ -2515,15 +2514,15 @@ func TestPreparedQuery_Execute_ConnectExact(t *testing.T) {
}
var reply structs.PreparedQueryExecuteResponse
require.NoError(msgpackrpc.CallWithCodec(
require.NoError(t, msgpackrpc.CallWithCodec(
codec, "PreparedQuery.Execute", &req, &reply))
// Result should have two because it omits the proxy whose name
// doesn't match the query.
require.Len(reply.Nodes, 2)
require.Equal(query.Query.Service.Service, reply.Service)
require.Equal(query.Query.DNS, reply.DNS)
require.True(reply.QueryMeta.KnownLeader, "queried leader")
require.Len(t, reply.Nodes, 2)
require.Equal(t, query.Query.Service.Service, reply.Service)
require.Equal(t, query.Query.DNS, reply.DNS)
require.True(t, reply.QueryMeta.KnownLeader, "queried leader")
}
// Run with the Connect setting specified on the request
@ -2535,31 +2534,31 @@ func TestPreparedQuery_Execute_ConnectExact(t *testing.T) {
}
var reply structs.PreparedQueryExecuteResponse
require.NoError(msgpackrpc.CallWithCodec(
require.NoError(t, msgpackrpc.CallWithCodec(
codec, "PreparedQuery.Execute", &req, &reply))
// Result should have two because we should get the native AND
// the proxy (since the destination matches our service name).
require.Len(reply.Nodes, 2)
require.Equal(query.Query.Service.Service, reply.Service)
require.Equal(query.Query.DNS, reply.DNS)
require.True(reply.QueryMeta.KnownLeader, "queried leader")
require.Len(t, reply.Nodes, 2)
require.Equal(t, query.Query.Service.Service, reply.Service)
require.Equal(t, query.Query.DNS, reply.DNS)
require.True(t, reply.QueryMeta.KnownLeader, "queried leader")
// Make sure the native is the first one
if !reply.Nodes[0].Service.Connect.Native {
reply.Nodes[0], reply.Nodes[1] = reply.Nodes[1], reply.Nodes[0]
}
require.True(reply.Nodes[0].Service.Connect.Native, "native")
require.Equal(reply.Service, reply.Nodes[0].Service.Service)
require.True(t, reply.Nodes[0].Service.Connect.Native, "native")
require.Equal(t, reply.Service, reply.Nodes[0].Service.Service)
require.Equal(structs.ServiceKindConnectProxy, reply.Nodes[1].Service.Kind)
require.Equal(reply.Service, reply.Nodes[1].Service.Proxy.DestinationServiceName)
require.Equal(t, structs.ServiceKindConnectProxy, reply.Nodes[1].Service.Kind)
require.Equal(t, reply.Service, reply.Nodes[1].Service.Proxy.DestinationServiceName)
}
// Update the query
query.Query.Service.Connect = true
require.NoError(msgpackrpc.CallWithCodec(
require.NoError(t, msgpackrpc.CallWithCodec(
codec, "PreparedQuery.Apply", &query, &query.Query.ID))
// Run the registered query.
@ -2570,31 +2569,31 @@ func TestPreparedQuery_Execute_ConnectExact(t *testing.T) {
}
var reply structs.PreparedQueryExecuteResponse
require.NoError(msgpackrpc.CallWithCodec(
require.NoError(t, msgpackrpc.CallWithCodec(
codec, "PreparedQuery.Execute", &req, &reply))
// Result should have two because we should get the native AND
// the proxy (since the destination matches our service name).
require.Len(reply.Nodes, 2)
require.Equal(query.Query.Service.Service, reply.Service)
require.Equal(query.Query.DNS, reply.DNS)
require.True(reply.QueryMeta.KnownLeader, "queried leader")
require.Len(t, reply.Nodes, 2)
require.Equal(t, query.Query.Service.Service, reply.Service)
require.Equal(t, query.Query.DNS, reply.DNS)
require.True(t, reply.QueryMeta.KnownLeader, "queried leader")
// Make sure the native is the first one
if !reply.Nodes[0].Service.Connect.Native {
reply.Nodes[0], reply.Nodes[1] = reply.Nodes[1], reply.Nodes[0]
}
require.True(reply.Nodes[0].Service.Connect.Native, "native")
require.Equal(reply.Service, reply.Nodes[0].Service.Service)
require.True(t, reply.Nodes[0].Service.Connect.Native, "native")
require.Equal(t, reply.Service, reply.Nodes[0].Service.Service)
require.Equal(structs.ServiceKindConnectProxy, reply.Nodes[1].Service.Kind)
require.Equal(reply.Service, reply.Nodes[1].Service.Proxy.DestinationServiceName)
require.Equal(t, structs.ServiceKindConnectProxy, reply.Nodes[1].Service.Kind)
require.Equal(t, reply.Service, reply.Nodes[1].Service.Proxy.DestinationServiceName)
}
// Unset the query
query.Query.Service.Connect = false
require.NoError(msgpackrpc.CallWithCodec(
require.NoError(t, msgpackrpc.CallWithCodec(
codec, "PreparedQuery.Apply", &query, &query.Query.ID))
}

View File

@ -233,9 +233,6 @@ func TestRPC_blockingQuery(t *testing.T) {
defer os.RemoveAll(dir)
defer s.Shutdown()
require := require.New(t)
assert := assert.New(t)
// Perform a non-blocking query. Note that it's significant that the meta has
// a zero index in response - the implied opts.MinQueryIndex is also zero but
// this should not block still.
@ -311,9 +308,9 @@ func TestRPC_blockingQuery(t *testing.T) {
calls++
return nil
}
require.NoError(s.blockingQuery(&opts, &meta, fn))
assert.Equal(1, calls)
assert.Equal(uint64(1), meta.Index,
require.NoError(t, s.blockingQuery(&opts, &meta, fn))
assert.Equal(t, 1, calls)
assert.Equal(t, uint64(1), meta.Index,
"expect fake index of 1 to force client to block on next update")
// Simulate client making next request
@ -322,12 +319,12 @@ func TestRPC_blockingQuery(t *testing.T) {
// This time we should block even though the func returns index 0 still
t0 := time.Now()
require.NoError(s.blockingQuery(&opts, &meta, fn))
require.NoError(t, s.blockingQuery(&opts, &meta, fn))
t1 := time.Now()
assert.Equal(2, calls)
assert.Equal(uint64(1), meta.Index,
assert.Equal(t, 2, calls)
assert.Equal(t, uint64(1), meta.Index,
"expect fake index of 1 to force client to block on next update")
assert.True(t1.Sub(t0) > 20*time.Millisecond,
assert.True(t, t1.Sub(t0) > 20*time.Millisecond,
"should have actually blocked waiting for timeout")
}
@ -382,13 +379,13 @@ func TestRPC_blockingQuery(t *testing.T) {
}
err := s.blockingQuery(&opts, &meta, fn)
require.NoError(err)
require.False(meta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be reset for unauthenticated calls")
require.NoError(t, err)
require.False(t, meta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be reset for unauthenticated calls")
})
t.Run("ResultsFilteredByACLs is honored for authenticated calls", func(t *testing.T) {
token, err := lib.GenerateUUID(nil)
require.NoError(err)
require.NoError(t, err)
opts := structs.QueryOptions{
Token: token,
@ -400,8 +397,8 @@ func TestRPC_blockingQuery(t *testing.T) {
}
err = s.blockingQuery(&opts, &meta, fn)
require.NoError(err)
require.True(meta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be honored for authenticated calls")
require.NoError(t, err)
require.True(t, meta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be honored for authenticated calls")
})
}

View File

@ -1347,7 +1347,7 @@ func (s *Server) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io
// Let the caller peek at the reply.
if replyFn != nil {
if err := replyFn(&reply); err != nil {
return nil
return err
}
}

View File

@ -35,7 +35,7 @@ import (
)
const (
TestDefaultMasterToken = "d9f05e83-a7ae-47ce-839e-c0d53a68c00a"
TestDefaultInitialManagementToken = "d9f05e83-a7ae-47ce-839e-c0d53a68c00a"
)
// testTLSCertificates Generates a TLS CA and server key/cert and returns them
@ -66,21 +66,12 @@ func testTLSCertificates(serverName string) (cert string, key string, cacert str
return cert, privateKey, ca, nil
}
// testServerACLConfig wraps another arbitrary Config altering callback
// to setup some common ACL configurations. A new callback func will
// be returned that has the original callback invoked after setting
// up all of the ACL configurations (so they can still be overridden)
func testServerACLConfig(cb func(*Config)) func(*Config) {
return func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
c.ACLInitialManagementToken = TestDefaultMasterToken
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
if cb != nil {
cb(c)
}
}
// testServerACLConfig setup some common ACL configurations.
func testServerACLConfig(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
c.ACLInitialManagementToken = TestDefaultInitialManagementToken
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
}
func configureTLS(config *Config) {
@ -164,8 +155,6 @@ func testServerConfig(t *testing.T) (string, *Config) {
config.ServerHealthInterval = 50 * time.Millisecond
config.AutopilotInterval = 100 * time.Millisecond
config.Build = "1.7.2"
config.CoordinateUpdatePeriod = 100 * time.Millisecond
config.LeaveDrainTime = 1 * time.Millisecond
@ -187,14 +176,12 @@ func testServerConfig(t *testing.T) (string, *Config) {
return dir, config
}
// Deprecated: use testServerWithConfig instead. It does the same thing and more.
func testServer(t *testing.T) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.PrimaryDatacenter = "dc1"
c.Bootstrap = true
})
return testServerWithConfig(t)
}
// Deprecated: use testServerWithConfig
func testServerDC(t *testing.T, dc string) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = dc
@ -202,6 +189,7 @@ func testServerDC(t *testing.T, dc string) (string, *Server) {
})
}
// Deprecated: use testServerWithConfig
func testServerDCBootstrap(t *testing.T, dc string, bootstrap bool) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = dc
@ -210,6 +198,7 @@ func testServerDCBootstrap(t *testing.T, dc string, bootstrap bool) (string, *Se
})
}
// Deprecated: use testServerWithConfig
func testServerDCExpect(t *testing.T, dc string, expect int) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = dc
@ -218,16 +207,7 @@ func testServerDCExpect(t *testing.T, dc string, expect int) (string, *Server) {
})
}
func testServerDCExpectNonVoter(t *testing.T, dc string, expect int) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = dc
c.Bootstrap = false
c.BootstrapExpect = expect
c.ReadReplica = true
})
}
func testServerWithConfig(t *testing.T, cb func(*Config)) (string, *Server) {
func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *Server) {
var dir string
var srv *Server
@ -235,8 +215,8 @@ func testServerWithConfig(t *testing.T, cb func(*Config)) (string, *Server) {
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
var config *Config
dir, config = testServerConfig(t)
if cb != nil {
cb(config)
for _, fn := range configOpts {
fn(config)
}
// Apply config to copied fields because many tests only set the old
@ -257,16 +237,18 @@ func testServerWithConfig(t *testing.T, cb func(*Config)) (string, *Server) {
// cb is a function that can alter the test servers configuration prior to the server starting.
func testACLServerWithConfig(t *testing.T, cb func(*Config), initReplicationToken bool) (string, *Server, rpc.ClientCodec) {
dir, srv := testServerWithConfig(t, testServerACLConfig(cb))
t.Cleanup(func() { srv.Shutdown() })
opts := []func(*Config){testServerACLConfig}
if cb != nil {
opts = append(opts, cb)
}
dir, srv := testServerWithConfig(t, opts...)
if initReplicationToken {
// setup some tokens here so we get less warnings in the logs
srv.tokens.UpdateReplicationToken(TestDefaultMasterToken, token.TokenSourceConfig)
srv.tokens.UpdateReplicationToken(TestDefaultInitialManagementToken, token.TokenSourceConfig)
}
codec := rpcClient(t, srv)
t.Cleanup(func() { codec.Close() })
return dir, srv, codec
}
@ -1284,7 +1266,11 @@ func TestServer_Expect_NonVoters(t *testing.T) {
}
t.Parallel()
dir1, s1 := testServerDCExpectNonVoter(t, "dc1", 2)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
c.BootstrapExpect = 2
c.ReadReplica = true
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()

View File

@ -420,7 +420,6 @@ func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) {
require.NoError(t, err)
t.Run("Get", func(t *testing.T) {
require := require.New(t)
req := &structs.SessionSpecificRequest{
Datacenter: "dc1",
@ -432,30 +431,29 @@ func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) {
var sessions structs.IndexedSessions
err := msgpackrpc.CallWithCodec(codec, "Session.Get", req, &sessions)
require.NoError(err)
require.Empty(sessions.Sessions)
require.True(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.NoError(t, err)
require.Empty(t, sessions.Sessions)
require.True(t, sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
// ACL-restricted results included.
req.Token = allowedToken
err = msgpackrpc.CallWithCodec(codec, "Session.Get", req, &sessions)
require.NoError(err)
require.Len(sessions.Sessions, 1)
require.False(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
require.NoError(t, err)
require.Len(t, sessions.Sessions, 1)
require.False(t, sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
// Try to get a session that doesn't exist to make sure that's handled
// correctly by the filter (it will get passed a nil slice).
req.SessionID = "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
err = msgpackrpc.CallWithCodec(codec, "Session.Get", req, &sessions)
require.NoError(err)
require.Empty(sessions.Sessions)
require.False(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
require.NoError(t, err)
require.Empty(t, sessions.Sessions)
require.False(t, sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
})
t.Run("List", func(t *testing.T) {
require := require.New(t)
req := &structs.DCSpecificRequest{
Datacenter: "dc1",
@ -466,21 +464,20 @@ func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) {
var sessions structs.IndexedSessions
err := msgpackrpc.CallWithCodec(codec, "Session.List", req, &sessions)
require.NoError(err)
require.Empty(sessions.Sessions)
require.True(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.NoError(t, err)
require.Empty(t, sessions.Sessions)
require.True(t, sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
// ACL-restricted results included.
req.Token = allowedToken
err = msgpackrpc.CallWithCodec(codec, "Session.List", req, &sessions)
require.NoError(err)
require.Len(sessions.Sessions, 1)
require.False(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
require.NoError(t, err)
require.Len(t, sessions.Sessions, 1)
require.False(t, sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
})
t.Run("NodeSessions", func(t *testing.T) {
require := require.New(t)
req := &structs.NodeSpecificRequest{
Datacenter: "dc1",
@ -492,17 +489,17 @@ func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) {
var sessions structs.IndexedSessions
err := msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", req, &sessions)
require.NoError(err)
require.Empty(sessions.Sessions)
require.True(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
require.NoError(t, err)
require.Empty(t, sessions.Sessions)
require.True(t, sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
// ACL-restricted results included.
req.Token = allowedToken
err = msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", req, &sessions)
require.NoError(err)
require.Len(sessions.Sessions, 1)
require.False(sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
require.NoError(t, err)
require.Len(t, sessions.Sessions, 1)
require.False(t, sessions.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
})
}

View File

@ -787,6 +787,32 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool
}
}
// If there's a terminating gateway config entry for this service, populate the tagged addresses
// with virtual IP mappings.
termGatewayVIPsSupported, err := terminatingGatewayVirtualIPsSupported(tx, nil)
if err != nil {
return err
}
if termGatewayVIPsSupported && svc.Kind == structs.ServiceKindTerminatingGateway {
_, conf, err := configEntryTxn(tx, nil, structs.TerminatingGateway, svc.Service, &svc.EnterpriseMeta)
if err != nil {
return fmt.Errorf("failed to retrieve terminating gateway config: %s", err)
}
if conf != nil {
termGatewayConf := conf.(*structs.TerminatingGatewayConfigEntry)
addrs, err := getTermGatewayVirtualIPs(tx, termGatewayConf.Services, &svc.EnterpriseMeta)
if err != nil {
return err
}
if svc.TaggedAddresses == nil {
svc.TaggedAddresses = make(map[string]structs.ServiceAddress)
}
for key, addr := range addrs {
svc.TaggedAddresses[key] = addr
}
}
}
// Create the service node entry and populate the indexes. Note that
// conversion doesn't populate any of the node-specific information.
// That's always populated when we read from the state store.
@ -939,6 +965,18 @@ func virtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool, error) {
return entry.Value != "", nil
}
func terminatingGatewayVirtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool, error) {
_, entry, err := systemMetadataGetTxn(tx, ws, structs.SystemMetadataTermGatewayVirtualIPsEnabled)
if err != nil {
return false, fmt.Errorf("failed system metadata lookup: %s", err)
}
if entry == nil {
return false, nil
}
return entry.Value != "", nil
}
// Services returns all services along with a list of associated tags.
func (s *Store) Services(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Services, error) {
tx := s.db.Txn(false)
@ -1697,7 +1735,7 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
if err := cleanupGatewayWildcards(tx, idx, svc); err != nil {
return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", name.String(), err)
}
if err := freeServiceVirtualIP(tx, svc.ServiceName, entMeta); err != nil {
if err := freeServiceVirtualIP(tx, svc.ServiceName, nil, entMeta); err != nil {
return fmt.Errorf("failed to clean up virtual IP for %q: %v", name.String(), err)
}
if err := cleanupKindServiceName(tx, idx, svc.CompoundServiceName(), svc.ServiceKind); err != nil {
@ -1713,7 +1751,7 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
// freeServiceVirtualIP is used to free a virtual IP for a service after the last instance
// is removed.
func freeServiceVirtualIP(tx WriteTxn, svc string, entMeta *structs.EnterpriseMeta) error {
func freeServiceVirtualIP(tx WriteTxn, svc string, excludeGateway *structs.ServiceName, entMeta *structs.EnterpriseMeta) error {
supported, err := virtualIPsSupported(tx, nil)
if err != nil {
return err
@ -1722,7 +1760,28 @@ func freeServiceVirtualIP(tx WriteTxn, svc string, entMeta *structs.EnterpriseMe
return nil
}
// Don't deregister the virtual IP if at least one terminating gateway still references this service.
sn := structs.NewServiceName(svc, entMeta)
termGatewaySupported, err := terminatingGatewayVirtualIPsSupported(tx, nil)
if err != nil {
return err
}
if termGatewaySupported {
svcGateways, err := tx.Get(tableGatewayServices, indexService, sn)
if err != nil {
return fmt.Errorf("failed gateway lookup for %q: %s", sn.Name, err)
}
for service := svcGateways.Next(); service != nil; service = svcGateways.Next() {
if svc, ok := service.(*structs.GatewayService); ok && svc != nil {
ignoreGateway := excludeGateway == nil || !svc.Gateway.Matches(*excludeGateway)
if ignoreGateway && svc.GatewayKind == structs.ServiceKindTerminatingGateway {
return nil
}
}
}
}
serviceVIP, err := tx.First(tableServiceVirtualIPs, indexID, sn)
if err != nil {
return fmt.Errorf("failed service virtual IP lookup: %s", err)
@ -2862,6 +2921,18 @@ func updateGatewayServices(tx WriteTxn, idx uint64, conf structs.ConfigEntry, en
return err
}
// Update terminating gateway service virtual IPs
vipsSupported, err := terminatingGatewayVirtualIPsSupported(tx, nil)
if err != nil {
return err
}
if vipsSupported && conf.GetKind() == structs.TerminatingGateway {
gatewayConf := conf.(*structs.TerminatingGatewayConfigEntry)
if err := updateTerminatingGatewayVirtualIPs(tx, idx, gatewayConf, entMeta); err != nil {
return err
}
}
// Delete all associated with gateway first, to avoid keeping mappings that were removed
sn := structs.NewServiceName(conf.GetName(), entMeta)
@ -2899,6 +2970,96 @@ func updateGatewayServices(tx WriteTxn, idx uint64, conf structs.ConfigEntry, en
return nil
}
func getTermGatewayVirtualIPs(tx WriteTxn, services []structs.LinkedService, entMeta *structs.EnterpriseMeta) (map[string]structs.ServiceAddress, error) {
addrs := make(map[string]structs.ServiceAddress, len(services))
for _, s := range services {
sn := structs.ServiceName{Name: s.Name, EnterpriseMeta: *entMeta}
vip, err := assignServiceVirtualIP(tx, sn)
if err != nil {
return nil, err
}
key := structs.ServiceGatewayVirtualIPTag(sn)
addrs[key] = structs.ServiceAddress{Address: vip}
}
return addrs, nil
}
func updateTerminatingGatewayVirtualIPs(tx WriteTxn, idx uint64, conf *structs.TerminatingGatewayConfigEntry, entMeta *structs.EnterpriseMeta) error {
// Build the current map of services with virtual IPs for this gateway
services := conf.Services
addrs, err := getTermGatewayVirtualIPs(tx, services, entMeta)
if err != nil {
return err
}
// Find any deleted service entries by comparing the new config entry to the existing one.
_, existing, err := configEntryTxn(tx, nil, conf.GetKind(), conf.GetName(), entMeta)
if err != nil {
return fmt.Errorf("failed to get config entry: %v", err)
}
var deletes []structs.ServiceName
cfg, ok := existing.(*structs.TerminatingGatewayConfigEntry)
if ok {
for _, s := range cfg.Services {
sn := structs.ServiceName{Name: s.Name, EnterpriseMeta: *entMeta}
key := structs.ServiceGatewayVirtualIPTag(sn)
if _, ok := addrs[key]; !ok {
deletes = append(deletes, sn)
}
}
}
q := Query{Value: conf.GetName(), EnterpriseMeta: *entMeta}
_, svcNodes, err := serviceNodesTxn(tx, nil, indexService, q)
if err != nil {
return err
}
// Update the tagged addrs for any existing instances of this terminating gateway.
for _, s := range svcNodes {
newAddrs := make(map[string]structs.ServiceAddress)
for key, addr := range s.ServiceTaggedAddresses {
if !strings.HasPrefix(key, structs.TaggedAddressVirtualIP+":") {
newAddrs[key] = addr
}
}
for key, addr := range addrs {
newAddrs[key] = addr
}
// Don't need to update the service record if it's a no-op.
if reflect.DeepEqual(newAddrs, s.ServiceTaggedAddresses) {
continue
}
newSN := s.PartialClone()
newSN.ServiceTaggedAddresses = newAddrs
newSN.ModifyIndex = idx
if err := catalogInsertService(tx, newSN); err != nil {
return err
}
}
// Check if we can delete any virtual IPs for the removed services.
gatewayName := structs.NewServiceName(conf.GetName(), entMeta)
for _, sn := range deletes {
// If there's no existing service nodes, attempt to free the virtual IP.
q := Query{Value: sn.Name, EnterpriseMeta: sn.EnterpriseMeta}
_, nodes, err := serviceNodesTxn(tx, nil, indexConnect, q)
if err != nil {
return err
}
if len(nodes) == 0 {
if err := freeServiceVirtualIP(tx, sn.Name, &gatewayName, &sn.EnterpriseMeta); err != nil {
return err
}
}
}
return nil
}
// ingressConfigGatewayServices constructs a list of GatewayService structs for
// insertion into the memdb table, specific to ingress gateways. The boolean
// returned indicates that there are no changes necessary to the memdb table.

View File

@ -315,8 +315,8 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
events = append(events, e)
}
for gatewayName, serviceChanges := range termGatewayChanges {
for serviceName, gsChange := range serviceChanges {
for gatewayName, svcChanges := range termGatewayChanges {
for serviceName, gsChange := range svcChanges {
gs := changeObject(gsChange.change).(*structs.GatewayService)
q := Query{
@ -355,6 +355,12 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
// Build service events and append them
for _, sn := range nodes {
tuple := newNodeServiceTupleFromServiceNode(sn)
// If we're already sending an event for the service, don't send another.
if _, ok := serviceChanges[tuple]; ok {
continue
}
e, err := newServiceHealthEventForService(tx, changes.Index, tuple)
if err != nil {
return nil, err

View File

@ -73,10 +73,7 @@ func TestServiceHealthSnapshot(t *testing.T) {
func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) {
store := NewStateStore(nil)
require.NoError(t, store.SystemMetadataSet(0, &structs.SystemMetadataEntry{
Key: structs.SystemMetadataVirtualIPsEnabled,
Value: "true",
}))
setVirtualIPFlags(t, store)
counter := newIndexCounter()
err := store.EnsureRegistration(counter.Next(), testServiceRegistration(t, "db"))
@ -1101,28 +1098,34 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1")),
evServiceTermingGateway("tgate1"),
evTerminatingGatewayVirtualIPs("srv1", "srv2")),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
evServiceTermingGateway("srv1"),
evTerminatingGatewayVirtualIPs("srv1", "srv2")),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2")),
evServiceTermingGateway("srv2"),
evTerminatingGatewayVirtualIPs("srv1", "srv2")),
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1"),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evNode2),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evNode2),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evNode2),
},
})
@ -1161,6 +1164,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1"),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evNodeCheckFail,
evNodeUnchanged,
evNodeChecksMutated,
@ -1169,6 +1173,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evNodeCheckFail,
evNodeUnchanged,
evNodeChecksMutated,
@ -1177,6 +1182,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evNodeCheckFail,
evNodeUnchanged,
evNodeChecksMutated,
@ -1208,16 +1214,26 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway(""),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evServiceIndex(setupIndex),
evServiceMutatedModifyIndex),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evServiceIndex(setupIndex)),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evServiceIndex(setupIndex),
evServiceMutatedModifyIndex),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evServiceIndex(setupIndex)),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evServiceIndex(setupIndex),
evServiceMutatedModifyIndex),
},
})
run(t, eventsTestCase{
@ -1260,11 +1276,26 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway(""),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evServiceIndex(setupIndex),
evServiceMutatedModifyIndex),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evServiceIndex(setupIndex),
evServiceMutatedModifyIndex),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evServiceIndex(setupIndex)),
evTerminatingGatewayVirtualIPs("srv1", "srv2"),
evServiceIndex(setupIndex),
evServiceMutatedModifyIndex),
},
})
run(t, eventsTestCase{
@ -1307,10 +1338,25 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway(""),
evTerminatingGatewayVirtualIP("srv2", "240.0.0.2"),
evServiceIndex(setupIndex),
evServiceMutatedModifyIndex),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
evServiceTermingGateway("srv1"),
evTerminatingGatewayVirtualIP("srv2", "240.0.0.2"),
evServiceMutatedModifyIndex),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evTerminatingGatewayVirtualIP("srv2", "240.0.0.2"),
evServiceIndex(setupIndex),
evServiceMutatedModifyIndex),
},
})
run(t, eventsTestCase{
@ -1327,12 +1373,12 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
err := s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
@ -1466,6 +1512,12 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
run(t, eventsTestCase{
Name: "rename a terminating gateway instance",
Setup: func(s *Store, tx *txn) error {
err := s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
if err != nil {
return err
}
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
@ -1477,7 +1529,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
err = ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
@ -1492,12 +1544,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}
err = ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
Mutate: func(s *Store, tx *txn) error {
rename := func(req *structs.RegisterRequest) error {
@ -1511,14 +1558,16 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evServiceTermingGateway("tgate1")),
evServiceTermingGateway(""),
evTerminatingGatewayVirtualIPs("srv1")),
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway(""),
evNodeUnchanged,
evServiceMutated,
evServiceChecksMutated,
evTerminatingGatewayRenamed("tgate2")),
evTerminatingGatewayRenamed("tgate2"),
evTerminatingGatewayVirtualIPs("srv1")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
@ -1564,15 +1613,18 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evServiceTermingGateway("")),
evServiceTermingGateway(""),
evTerminatingGatewayVirtualIPs("srv1", "srv2")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
evServiceTermingGateway("srv1"),
evTerminatingGatewayVirtualIPs("srv1", "srv2")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2")),
evServiceTermingGateway("srv2"),
evTerminatingGatewayVirtualIPs("srv1", "srv2")),
},
})
}
@ -1583,6 +1635,10 @@ func (tc eventsTestCase) run(t *testing.T) {
Key: structs.SystemMetadataVirtualIPsEnabled,
Value: "true",
}))
require.NoError(t, s.SystemMetadataSet(0, &structs.SystemMetadataEntry{
Key: structs.SystemMetadataTermGatewayVirtualIPsEnabled,
Value: "true",
}))
setupIndex := uint64(10)
mutateIndex := uint64(100)
@ -1636,6 +1692,14 @@ func evServiceTermingGateway(name string) func(e *stream.Event) error {
csn.Service.Kind = structs.ServiceKindTerminatingGateway
csn.Service.Port = 22000
sn := structs.NewServiceName(name, &csn.Service.EnterpriseMeta)
key := structs.ServiceGatewayVirtualIPTag(sn)
if name != "" && name != csn.Service.Service {
csn.Service.TaggedAddresses = map[string]structs.ServiceAddress{
key: {Address: "240.0.0.1"},
}
}
if e.Topic == topicServiceHealthConnect {
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.overrideKey = name
@ -1645,6 +1709,40 @@ func evServiceTermingGateway(name string) func(e *stream.Event) error {
}
}
func evTerminatingGatewayVirtualIP(name, addr string) func(e *stream.Event) error {
return func(e *stream.Event) error {
csn := getPayloadCheckServiceNode(e.Payload)
sn := structs.NewServiceName(name, &csn.Service.EnterpriseMeta)
key := structs.ServiceGatewayVirtualIPTag(sn)
csn.Service.TaggedAddresses = map[string]structs.ServiceAddress{
key: {Address: addr},
}
return nil
}
}
func evTerminatingGatewayVirtualIPs(names ...string) func(e *stream.Event) error {
return func(e *stream.Event) error {
csn := getPayloadCheckServiceNode(e.Payload)
if len(names) > 0 {
csn.Service.TaggedAddresses = make(map[string]structs.ServiceAddress)
}
for i, name := range names {
sn := structs.NewServiceName(name, &csn.Service.EnterpriseMeta)
key := structs.ServiceGatewayVirtualIPTag(sn)
csn.Service.TaggedAddresses[key] = structs.ServiceAddress{
Address: fmt.Sprintf("240.0.0.%d", i+1),
}
}
return nil
}
}
func evServiceIndex(idx uint64) func(e *stream.Event) error {
return func(e *stream.Event) error {
payload := e.Payload.(EventPayloadCheckServiceNode)
@ -2040,6 +2138,11 @@ func evServiceMutated(e *stream.Event) error {
return nil
}
func evServiceMutatedModifyIndex(e *stream.Event) error {
getPayloadCheckServiceNode(e.Payload).Service.ModifyIndex = 100
return nil
}
// evServiceChecksMutated option alters the base event service check to set it's
// CreateIndex (but not modify index) to the setup index. This expresses that we
// expect the service check records originally created in setup to have been

View File

@ -1515,7 +1515,6 @@ func TestStateStore_EnsureService(t *testing.T) {
}
func TestStateStore_EnsureService_connectProxy(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Create the service registration.
@ -1535,26 +1534,22 @@ func TestStateStore_EnsureService_connectProxy(t *testing.T) {
// Service successfully registers into the state store.
testRegisterNode(t, s, 0, "node1")
assert.Nil(s.EnsureService(10, "node1", ns1))
assert.Nil(t, s.EnsureService(10, "node1", ns1))
// Retrieve and verify
_, out, err := s.NodeServices(nil, "node1", nil)
assert.Nil(err)
assert.NotNil(out)
assert.Len(out.Services, 1)
assert.Nil(t, err)
assert.NotNil(t, out)
assert.Len(t, out.Services, 1)
expect1 := *ns1
expect1.CreateIndex, expect1.ModifyIndex = 10, 10
assert.Equal(&expect1, out.Services["connect-proxy"])
assert.Equal(t, &expect1, out.Services["connect-proxy"])
}
func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
require.NoError(t, s.SystemMetadataSet(0, &structs.SystemMetadataEntry{
Key: structs.SystemMetadataVirtualIPsEnabled,
Value: "true",
}))
setVirtualIPFlags(t, s)
// Create the service registration.
entMeta := structs.DefaultEnterpriseMetaInDefaultPartition()
@ -1578,17 +1573,17 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
// Make sure there's a virtual IP for the foo service.
vip, err := s.VirtualIPForService(structs.ServiceName{Name: "foo"})
require.NoError(t, err)
assert.Equal("240.0.0.1", vip)
assert.Equal(t, "240.0.0.1", vip)
// Retrieve and verify
_, out, err := s.NodeServices(nil, "node1", nil)
require.NoError(t, err)
assert.NotNil(out)
assert.Len(out.Services, 1)
assert.NotNil(t, out)
assert.Len(t, out.Services, 1)
taggedAddress := out.Services["foo"].TaggedAddresses[structs.TaggedAddressVirtualIP]
assert.Equal(vip, taggedAddress.Address)
assert.Equal(ns1.Port, taggedAddress.Port)
assert.Equal(t, vip, taggedAddress.Address)
assert.Equal(t, ns1.Port, taggedAddress.Port)
// Create the service registration.
ns2 := &structs.NodeService{
@ -1609,23 +1604,23 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
// Make sure the virtual IP has been incremented for the redis service.
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"})
require.NoError(t, err)
assert.Equal("240.0.0.2", vip)
assert.Equal(t, "240.0.0.2", vip)
// Retrieve and verify
_, out, err = s.NodeServices(nil, "node1", nil)
assert.Nil(err)
assert.NotNil(out)
assert.Len(out.Services, 2)
assert.Nil(t, err)
assert.NotNil(t, out)
assert.Len(t, out.Services, 2)
taggedAddress = out.Services["redis-proxy"].TaggedAddresses[structs.TaggedAddressVirtualIP]
assert.Equal(vip, taggedAddress.Address)
assert.Equal(ns2.Port, taggedAddress.Port)
assert.Equal(t, vip, taggedAddress.Address)
assert.Equal(t, ns2.Port, taggedAddress.Port)
// Delete the first service and make sure it no longer has a virtual IP assigned.
require.NoError(t, s.DeleteService(12, "node1", "foo", entMeta))
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "connect-proxy"})
require.NoError(t, err)
assert.Equal("", vip)
assert.Equal(t, "", vip)
// Register another instance of redis-proxy and make sure the virtual IP is unchanged.
ns3 := &structs.NodeService{
@ -1646,14 +1641,14 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
// Make sure the virtual IP is unchanged for the redis service.
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"})
require.NoError(t, err)
assert.Equal("240.0.0.2", vip)
assert.Equal(t, "240.0.0.2", vip)
// Make sure the new instance has the same virtual IP.
_, out, err = s.NodeServices(nil, "node1", nil)
require.NoError(t, err)
taggedAddress = out.Services["redis-proxy2"].TaggedAddresses[structs.TaggedAddressVirtualIP]
assert.Equal(vip, taggedAddress.Address)
assert.Equal(ns3.Port, taggedAddress.Port)
assert.Equal(t, vip, taggedAddress.Address)
assert.Equal(t, ns3.Port, taggedAddress.Port)
// Register another service to take its virtual IP.
ns4 := &structs.NodeService{
@ -1674,23 +1669,19 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) {
// Make sure the virtual IP has allocated from the previously freed service.
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "web"})
require.NoError(t, err)
assert.Equal("240.0.0.1", vip)
assert.Equal(t, "240.0.0.1", vip)
// Retrieve and verify
_, out, err = s.NodeServices(nil, "node1", nil)
require.NoError(t, err)
taggedAddress = out.Services["web-proxy"].TaggedAddresses[structs.TaggedAddressVirtualIP]
assert.Equal(vip, taggedAddress.Address)
assert.Equal(ns4.Port, taggedAddress.Port)
assert.Equal(t, vip, taggedAddress.Address)
assert.Equal(t, ns4.Port, taggedAddress.Port)
}
func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
require.NoError(t, s.SystemMetadataSet(0, &structs.SystemMetadataEntry{
Key: structs.SystemMetadataVirtualIPsEnabled,
Value: "true",
}))
setVirtualIPFlags(t, s)
// Create the service registration.
entMeta := structs.DefaultEnterpriseMetaInDefaultPartition()
@ -1714,16 +1705,16 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
// Make sure there's a virtual IP for the foo service.
vip, err := s.VirtualIPForService(structs.ServiceName{Name: "foo"})
require.NoError(t, err)
assert.Equal("240.0.0.1", vip)
assert.Equal(t, "240.0.0.1", vip)
// Retrieve and verify
_, out, err := s.NodeServices(nil, "node1", nil)
require.NoError(t, err)
assert.NotNil(out)
assert.NotNil(t, out)
taggedAddress := out.Services["foo"].TaggedAddresses[structs.TaggedAddressVirtualIP]
assert.Equal(vip, taggedAddress.Address)
assert.Equal(ns1.Port, taggedAddress.Port)
assert.Equal(t, vip, taggedAddress.Address)
assert.Equal(t, ns1.Port, taggedAddress.Port)
// Create the service registration.
ns2 := &structs.NodeService{
@ -1744,22 +1735,22 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
// Make sure the virtual IP has been incremented for the redis service.
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"})
require.NoError(t, err)
assert.Equal("240.0.0.2", vip)
assert.Equal(t, "240.0.0.2", vip)
// Retrieve and verify
_, out, err = s.NodeServices(nil, "node1", nil)
assert.Nil(err)
assert.NotNil(out)
assert.Nil(t, err)
assert.NotNil(t, out)
taggedAddress = out.Services["redis"].TaggedAddresses[structs.TaggedAddressVirtualIP]
assert.Equal(vip, taggedAddress.Address)
assert.Equal(ns2.Port, taggedAddress.Port)
assert.Equal(t, vip, taggedAddress.Address)
assert.Equal(t, ns2.Port, taggedAddress.Port)
// Delete the last service and make sure it no longer has a virtual IP assigned.
require.NoError(t, s.DeleteService(12, "node1", "redis", entMeta))
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"})
require.NoError(t, err)
assert.Equal("", vip)
assert.Equal(t, "", vip)
// Register a new service, should end up with the freed 240.0.0.2 address.
ns3 := &structs.NodeService{
@ -1779,16 +1770,16 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "backend"})
require.NoError(t, err)
assert.Equal("240.0.0.2", vip)
assert.Equal(t, "240.0.0.2", vip)
// Retrieve and verify
_, out, err = s.NodeServices(nil, "node1", nil)
assert.Nil(err)
assert.NotNil(out)
assert.Nil(t, err)
assert.NotNil(t, out)
taggedAddress = out.Services["backend"].TaggedAddresses[structs.TaggedAddressVirtualIP]
assert.Equal(vip, taggedAddress.Address)
assert.Equal(ns3.Port, taggedAddress.Port)
assert.Equal(t, vip, taggedAddress.Address)
assert.Equal(t, ns3.Port, taggedAddress.Port)
// Create a new service, no more freed VIPs so it should go back to using the counter.
ns4 := &structs.NodeService{
@ -1809,16 +1800,16 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
// Make sure the virtual IP has been incremented for the frontend service.
vip, err = s.VirtualIPForService(structs.ServiceName{Name: "frontend"})
require.NoError(t, err)
assert.Equal("240.0.0.3", vip)
assert.Equal(t, "240.0.0.3", vip)
// Retrieve and verify
_, out, err = s.NodeServices(nil, "node1", nil)
assert.Nil(err)
assert.NotNil(out)
assert.Nil(t, err)
assert.NotNil(t, out)
taggedAddress = out.Services["frontend"].TaggedAddresses[structs.TaggedAddressVirtualIP]
assert.Equal(vip, taggedAddress.Address)
assert.Equal(ns4.Port, taggedAddress.Port)
assert.Equal(t, vip, taggedAddress.Address)
assert.Equal(t, ns4.Port, taggedAddress.Port)
}
func TestStateStore_Services(t *testing.T) {
@ -2366,83 +2357,80 @@ func TestStateStore_DeleteService(t *testing.T) {
}
func TestStateStore_ConnectServiceNodes(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Listing with no results returns an empty list.
ws := memdb.NewWatchSet()
idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(0))
assert.Len(nodes, 0)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(0))
assert.Len(t, nodes, 0)
// Create some nodes and services.
assert.Nil(s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
assert.Nil(s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
assert.Nil(s.EnsureService(12, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
assert.Nil(s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
assert.Nil(s.EnsureService(14, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.Nil(s.EnsureService(15, "bar", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.Nil(s.EnsureService(16, "bar", &structs.NodeService{ID: "native-db", Service: "db", Connect: structs.ServiceConnect{Native: true}}))
assert.Nil(s.EnsureService(17, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
assert.True(watchFired(ws))
assert.Nil(t, s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
assert.Nil(t, s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
assert.Nil(t, s.EnsureService(12, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
assert.Nil(t, s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
assert.Nil(t, s.EnsureService(14, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.Nil(t, s.EnsureService(15, "bar", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.Nil(t, s.EnsureService(16, "bar", &structs.NodeService{ID: "native-db", Service: "db", Connect: structs.ServiceConnect{Native: true}}))
assert.Nil(t, s.EnsureService(17, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
assert.True(t, watchFired(ws))
// Read everything back.
ws = memdb.NewWatchSet()
idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(17))
assert.Len(nodes, 3)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(17))
assert.Len(t, nodes, 3)
for _, n := range nodes {
assert.True(
n.ServiceKind == structs.ServiceKindConnectProxy ||
n.ServiceConnect.Native,
assert.True(t, n.ServiceKind == structs.ServiceKindConnectProxy ||
n.ServiceConnect.Native,
"either proxy or connect native")
}
// Registering some unrelated node should not fire the watch.
testRegisterNode(t, s, 17, "nope")
assert.False(watchFired(ws))
assert.False(t, watchFired(ws))
// But removing a node with the "db" service should fire the watch.
assert.Nil(s.DeleteNode(18, "bar", nil))
assert.True(watchFired(ws))
assert.Nil(t, s.DeleteNode(18, "bar", nil))
assert.True(t, watchFired(ws))
}
func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Listing with no results returns an empty list.
ws := memdb.NewWatchSet()
idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(0))
assert.Len(nodes, 0)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(0))
assert.Len(t, nodes, 0)
// Create some nodes and services.
assert.Nil(s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
assert.Nil(s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
assert.Nil(t, s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
assert.Nil(t, s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
// Typical services
assert.Nil(s.EnsureService(12, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
assert.Nil(s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
assert.Nil(s.EnsureService(14, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
assert.False(watchFired(ws))
assert.Nil(t, s.EnsureService(12, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
assert.Nil(t, s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
assert.Nil(t, s.EnsureService(14, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
assert.False(t, watchFired(ws))
// Register a sidecar for db
assert.Nil(s.EnsureService(15, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.True(watchFired(ws))
assert.Nil(t, s.EnsureService(15, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.True(t, watchFired(ws))
// Reset WatchSet to ensure watch fires when associating db with gateway
ws = memdb.NewWatchSet()
_, _, err = s.ConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Nil(t, err)
// Associate gateway with db
assert.Nil(s.EnsureService(16, "bar", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443}))
assert.Nil(s.EnsureConfigEntry(17, &structs.TerminatingGatewayConfigEntry{
assert.Nil(t, s.EnsureService(16, "bar", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443}))
assert.Nil(t, s.EnsureConfigEntry(17, &structs.TerminatingGatewayConfigEntry{
Kind: "terminating-gateway",
Name: "gateway",
Services: []structs.LinkedService{
@ -2451,71 +2439,71 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) {
},
},
}))
assert.True(watchFired(ws))
assert.True(t, watchFired(ws))
// Read everything back.
ws = memdb.NewWatchSet()
idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(17))
assert.Len(nodes, 2)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(17))
assert.Len(t, nodes, 2)
// Check sidecar
assert.Equal(structs.ServiceKindConnectProxy, nodes[0].ServiceKind)
assert.Equal("foo", nodes[0].Node)
assert.Equal("proxy", nodes[0].ServiceName)
assert.Equal("proxy", nodes[0].ServiceID)
assert.Equal("db", nodes[0].ServiceProxy.DestinationServiceName)
assert.Equal(8000, nodes[0].ServicePort)
assert.Equal(t, structs.ServiceKindConnectProxy, nodes[0].ServiceKind)
assert.Equal(t, "foo", nodes[0].Node)
assert.Equal(t, "proxy", nodes[0].ServiceName)
assert.Equal(t, "proxy", nodes[0].ServiceID)
assert.Equal(t, "db", nodes[0].ServiceProxy.DestinationServiceName)
assert.Equal(t, 8000, nodes[0].ServicePort)
// Check gateway
assert.Equal(structs.ServiceKindTerminatingGateway, nodes[1].ServiceKind)
assert.Equal("bar", nodes[1].Node)
assert.Equal("gateway", nodes[1].ServiceName)
assert.Equal("gateway", nodes[1].ServiceID)
assert.Equal(443, nodes[1].ServicePort)
assert.Equal(t, structs.ServiceKindTerminatingGateway, nodes[1].ServiceKind)
assert.Equal(t, "bar", nodes[1].Node)
assert.Equal(t, "gateway", nodes[1].ServiceName)
assert.Equal(t, "gateway", nodes[1].ServiceID)
assert.Equal(t, 443, nodes[1].ServicePort)
// Watch should fire when another gateway instance is registered
assert.Nil(s.EnsureService(18, "foo", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway-2", Service: "gateway", Port: 443}))
assert.True(watchFired(ws))
assert.Nil(t, s.EnsureService(18, "foo", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway-2", Service: "gateway", Port: 443}))
assert.True(t, watchFired(ws))
// Reset WatchSet to ensure watch fires when deregistering gateway
ws = memdb.NewWatchSet()
_, _, err = s.ConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Nil(t, err)
// Watch should fire when a gateway instance is deregistered
assert.Nil(s.DeleteService(19, "bar", "gateway", nil))
assert.True(watchFired(ws))
assert.Nil(t, s.DeleteService(19, "bar", "gateway", nil))
assert.True(t, watchFired(ws))
ws = memdb.NewWatchSet()
idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(19))
assert.Len(nodes, 2)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(19))
assert.Len(t, nodes, 2)
// Check the new gateway
assert.Equal(structs.ServiceKindTerminatingGateway, nodes[1].ServiceKind)
assert.Equal("foo", nodes[1].Node)
assert.Equal("gateway", nodes[1].ServiceName)
assert.Equal("gateway-2", nodes[1].ServiceID)
assert.Equal(443, nodes[1].ServicePort)
assert.Equal(t, structs.ServiceKindTerminatingGateway, nodes[1].ServiceKind)
assert.Equal(t, "foo", nodes[1].Node)
assert.Equal(t, "gateway", nodes[1].ServiceName)
assert.Equal(t, "gateway-2", nodes[1].ServiceID)
assert.Equal(t, 443, nodes[1].ServicePort)
// Index should not slide back after deleting all instances of the gateway
assert.Nil(s.DeleteService(20, "foo", "gateway-2", nil))
assert.True(watchFired(ws))
assert.Nil(t, s.DeleteService(20, "foo", "gateway-2", nil))
assert.True(t, watchFired(ws))
idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(20))
assert.Len(nodes, 1)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(20))
assert.Len(t, nodes, 1)
// Ensure that remaining node is the proxy and not a gateway
assert.Equal(structs.ServiceKindConnectProxy, nodes[0].ServiceKind)
assert.Equal("foo", nodes[0].Node)
assert.Equal("proxy", nodes[0].ServiceName)
assert.Equal("proxy", nodes[0].ServiceID)
assert.Equal(8000, nodes[0].ServicePort)
assert.Equal(t, structs.ServiceKindConnectProxy, nodes[0].ServiceKind)
assert.Equal(t, "foo", nodes[0].Node)
assert.Equal(t, "proxy", nodes[0].ServiceName)
assert.Equal(t, "proxy", nodes[0].ServiceID)
assert.Equal(t, 8000, nodes[0].ServicePort)
}
func TestStateStore_Service_Snapshot(t *testing.T) {
@ -3686,14 +3674,12 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
tt.setupFn(s)
}
require := require.New(t)
// Run the query
ws := memdb.NewWatchSet()
_, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil)
require.NoError(err)
require.Len(res, tt.wantBeforeResLen)
require.Len(ws, tt.wantBeforeWatchSetSize)
require.NoError(t, err)
require.Len(t, res, tt.wantBeforeResLen)
require.Len(t, ws, tt.wantBeforeWatchSetSize)
// Mutate the state store
if tt.updateFn != nil {
@ -3702,18 +3688,18 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
fired := watchFired(ws)
if tt.shouldFire {
require.True(fired, "WatchSet should have fired")
require.True(t, fired, "WatchSet should have fired")
} else {
require.False(fired, "WatchSet should not have fired")
require.False(t, fired, "WatchSet should not have fired")
}
// Re-query the same result. Should return the desired index and len
ws = memdb.NewWatchSet()
idx, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil)
require.NoError(err)
require.Len(res, tt.wantAfterResLen)
require.Equal(tt.wantAfterIndex, idx)
require.Len(ws, tt.wantAfterWatchSetSize)
require.NoError(t, err)
require.Len(t, res, tt.wantAfterResLen)
require.Equal(t, tt.wantAfterIndex, idx)
require.Len(t, ws, tt.wantAfterWatchSetSize)
})
}
}
@ -3835,25 +3821,24 @@ func TestStateStore_CheckServiceNodes(t *testing.T) {
}
func TestStateStore_CheckConnectServiceNodes(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Listing with no results returns an empty list.
ws := memdb.NewWatchSet()
idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(0))
assert.Len(nodes, 0)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(0))
assert.Len(t, nodes, 0)
// Create some nodes and services.
assert.Nil(s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
assert.Nil(s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
assert.Nil(s.EnsureService(12, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
assert.Nil(s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
assert.Nil(s.EnsureService(14, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.Nil(s.EnsureService(15, "bar", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.Nil(s.EnsureService(16, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
assert.True(watchFired(ws))
assert.Nil(t, s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
assert.Nil(t, s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
assert.Nil(t, s.EnsureService(12, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
assert.Nil(t, s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
assert.Nil(t, s.EnsureService(14, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.Nil(t, s.EnsureService(15, "bar", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.Nil(t, s.EnsureService(16, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
assert.True(t, watchFired(ws))
// Register node checks
testRegisterCheck(t, s, 17, "foo", "", "check1", api.HealthPassing)
@ -3866,13 +3851,13 @@ func TestStateStore_CheckConnectServiceNodes(t *testing.T) {
// Read everything back.
ws = memdb.NewWatchSet()
idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(20))
assert.Len(nodes, 2)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(20))
assert.Len(t, nodes, 2)
for _, n := range nodes {
assert.Equal(structs.ServiceKindConnectProxy, n.Service.Kind)
assert.Equal("db", n.Service.Proxy.DestinationServiceName)
assert.Equal(t, structs.ServiceKindConnectProxy, n.Service.Kind)
assert.Equal(t, "db", n.Service.Proxy.DestinationServiceName)
}
}
@ -3881,34 +3866,33 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) {
t.Skip("too slow for testing.Short")
}
assert := assert.New(t)
s := testStateStore(t)
// Listing with no results returns an empty list.
ws := memdb.NewWatchSet()
idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(0))
assert.Len(nodes, 0)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(0))
assert.Len(t, nodes, 0)
// Create some nodes and services.
assert.Nil(s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
assert.Nil(s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
assert.Nil(t, s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
assert.Nil(t, s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
// Typical services
assert.Nil(s.EnsureService(12, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
assert.Nil(s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
assert.Nil(s.EnsureService(14, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
assert.False(watchFired(ws))
assert.Nil(t, s.EnsureService(12, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
assert.Nil(t, s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
assert.Nil(t, s.EnsureService(14, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
assert.False(t, watchFired(ws))
// Register node and service checks
testRegisterCheck(t, s, 15, "foo", "", "check1", api.HealthPassing)
testRegisterCheck(t, s, 16, "bar", "", "check2", api.HealthPassing)
testRegisterCheck(t, s, 17, "foo", "db", "check3", api.HealthPassing)
assert.False(watchFired(ws))
assert.False(t, watchFired(ws))
// Watch should fire when a gateway is associated with the service, even if the gateway doesn't exist yet
assert.Nil(s.EnsureConfigEntry(18, &structs.TerminatingGatewayConfigEntry{
assert.Nil(t, s.EnsureConfigEntry(18, &structs.TerminatingGatewayConfigEntry{
Kind: "terminating-gateway",
Name: "gateway",
Services: []structs.LinkedService{
@ -3917,90 +3901,90 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) {
},
},
}))
assert.True(watchFired(ws))
assert.True(t, watchFired(ws))
ws = memdb.NewWatchSet()
idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(18))
assert.Len(nodes, 0)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(18))
assert.Len(t, nodes, 0)
// Watch should fire when a gateway is added
assert.Nil(s.EnsureService(19, "bar", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443}))
assert.True(watchFired(ws))
assert.Nil(t, s.EnsureService(19, "bar", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443}))
assert.True(t, watchFired(ws))
// Watch should fire when a check is added to the gateway
testRegisterCheck(t, s, 20, "bar", "gateway", "check4", api.HealthPassing)
assert.True(watchFired(ws))
assert.True(t, watchFired(ws))
// Watch should fire when a different connect service is registered for db
assert.Nil(s.EnsureService(21, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.True(watchFired(ws))
assert.Nil(t, s.EnsureService(21, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
assert.True(t, watchFired(ws))
// Read everything back.
ws = memdb.NewWatchSet()
idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(21))
assert.Len(nodes, 2)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(21))
assert.Len(t, nodes, 2)
// Check sidecar
assert.Equal(structs.ServiceKindConnectProxy, nodes[0].Service.Kind)
assert.Equal("foo", nodes[0].Node.Node)
assert.Equal("proxy", nodes[0].Service.Service)
assert.Equal("proxy", nodes[0].Service.ID)
assert.Equal("db", nodes[0].Service.Proxy.DestinationServiceName)
assert.Equal(8000, nodes[0].Service.Port)
assert.Equal(t, structs.ServiceKindConnectProxy, nodes[0].Service.Kind)
assert.Equal(t, "foo", nodes[0].Node.Node)
assert.Equal(t, "proxy", nodes[0].Service.Service)
assert.Equal(t, "proxy", nodes[0].Service.ID)
assert.Equal(t, "db", nodes[0].Service.Proxy.DestinationServiceName)
assert.Equal(t, 8000, nodes[0].Service.Port)
// Check gateway
assert.Equal(structs.ServiceKindTerminatingGateway, nodes[1].Service.Kind)
assert.Equal("bar", nodes[1].Node.Node)
assert.Equal("gateway", nodes[1].Service.Service)
assert.Equal("gateway", nodes[1].Service.ID)
assert.Equal(443, nodes[1].Service.Port)
assert.Equal(t, structs.ServiceKindTerminatingGateway, nodes[1].Service.Kind)
assert.Equal(t, "bar", nodes[1].Node.Node)
assert.Equal(t, "gateway", nodes[1].Service.Service)
assert.Equal(t, "gateway", nodes[1].Service.ID)
assert.Equal(t, 443, nodes[1].Service.Port)
// Watch should fire when another gateway instance is registered
assert.Nil(s.EnsureService(22, "foo", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway-2", Service: "gateway", Port: 443}))
assert.True(watchFired(ws))
assert.Nil(t, s.EnsureService(22, "foo", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway-2", Service: "gateway", Port: 443}))
assert.True(t, watchFired(ws))
ws = memdb.NewWatchSet()
idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(22))
assert.Len(nodes, 3)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(22))
assert.Len(t, nodes, 3)
// Watch should fire when a gateway instance is deregistered
assert.Nil(s.DeleteService(23, "bar", "gateway", nil))
assert.True(watchFired(ws))
assert.Nil(t, s.DeleteService(23, "bar", "gateway", nil))
assert.True(t, watchFired(ws))
ws = memdb.NewWatchSet()
idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(23))
assert.Len(nodes, 2)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(23))
assert.Len(t, nodes, 2)
// Check new gateway
assert.Equal(structs.ServiceKindTerminatingGateway, nodes[1].Service.Kind)
assert.Equal("foo", nodes[1].Node.Node)
assert.Equal("gateway", nodes[1].Service.Service)
assert.Equal("gateway-2", nodes[1].Service.ID)
assert.Equal(443, nodes[1].Service.Port)
assert.Equal(t, structs.ServiceKindTerminatingGateway, nodes[1].Service.Kind)
assert.Equal(t, "foo", nodes[1].Node.Node)
assert.Equal(t, "gateway", nodes[1].Service.Service)
assert.Equal(t, "gateway-2", nodes[1].Service.ID)
assert.Equal(t, 443, nodes[1].Service.Port)
// Index should not slide back after deleting all instances of the gateway
assert.Nil(s.DeleteService(24, "foo", "gateway-2", nil))
assert.True(watchFired(ws))
assert.Nil(t, s.DeleteService(24, "foo", "gateway-2", nil))
assert.True(t, watchFired(ws))
idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil)
assert.Nil(err)
assert.Equal(idx, uint64(24))
assert.Len(nodes, 1)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(24))
assert.Len(t, nodes, 1)
// Ensure that remaining node is the proxy and not a gateway
assert.Equal(structs.ServiceKindConnectProxy, nodes[0].Service.Kind)
assert.Equal("foo", nodes[0].Node.Node)
assert.Equal("proxy", nodes[0].Service.Service)
assert.Equal("proxy", nodes[0].Service.ID)
assert.Equal(8000, nodes[0].Service.Port)
assert.Equal(t, structs.ServiceKindConnectProxy, nodes[0].Service.Kind)
assert.Equal(t, "foo", nodes[0].Node.Node)
assert.Equal(t, "proxy", nodes[0].Service.Service)
assert.Equal(t, "proxy", nodes[0].Service.ID)
assert.Equal(t, 8000, nodes[0].Service.Port)
}
func BenchmarkCheckServiceNodes(b *testing.B) {
@ -5261,14 +5245,13 @@ func TestStateStore_GatewayServices_ServiceDeletion(t *testing.T) {
func TestStateStore_CheckIngressServiceNodes(t *testing.T) {
s := testStateStore(t)
ws := setupIngressState(t, s)
require := require.New(t)
t.Run("check service1 ingress gateway", func(t *testing.T) {
idx, results, err := s.CheckIngressServiceNodes(ws, "service1", nil)
require.NoError(err)
require.Equal(uint64(15), idx)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
// Multiple instances of the ingress2 service
require.Len(results, 4)
require.Len(t, results, 4)
ids := make(map[string]struct{})
for _, n := range results {
@ -5279,14 +5262,14 @@ func TestStateStore_CheckIngressServiceNodes(t *testing.T) {
"ingress2": {},
"wildcardIngress": {},
}
require.Equal(expectedIds, ids)
require.Equal(t, expectedIds, ids)
})
t.Run("check service2 ingress gateway", func(t *testing.T) {
idx, results, err := s.CheckIngressServiceNodes(ws, "service2", nil)
require.NoError(err)
require.Equal(uint64(15), idx)
require.Len(results, 2)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
require.Len(t, results, 2)
ids := make(map[string]struct{})
for _, n := range results {
@ -5296,38 +5279,38 @@ func TestStateStore_CheckIngressServiceNodes(t *testing.T) {
"ingress1": {},
"wildcardIngress": {},
}
require.Equal(expectedIds, ids)
require.Equal(t, expectedIds, ids)
})
t.Run("check service3 ingress gateway", func(t *testing.T) {
ws := memdb.NewWatchSet()
idx, results, err := s.CheckIngressServiceNodes(ws, "service3", nil)
require.NoError(err)
require.Equal(uint64(15), idx)
require.Len(results, 1)
require.Equal("wildcardIngress", results[0].Service.ID)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
require.Len(t, results, 1)
require.Equal(t, "wildcardIngress", results[0].Service.ID)
})
t.Run("delete a wildcard entry", func(t *testing.T) {
require.Nil(s.DeleteConfigEntry(19, "ingress-gateway", "wildcardIngress", nil))
require.True(watchFired(ws))
require.Nil(t, s.DeleteConfigEntry(19, "ingress-gateway", "wildcardIngress", nil))
require.True(t, watchFired(ws))
idx, results, err := s.CheckIngressServiceNodes(ws, "service1", nil)
require.NoError(err)
require.Equal(uint64(15), idx)
require.Len(results, 3)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
require.Len(t, results, 3)
idx, results, err = s.CheckIngressServiceNodes(ws, "service2", nil)
require.NoError(err)
require.Equal(uint64(15), idx)
require.Len(results, 1)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
require.Len(t, results, 1)
idx, results, err = s.CheckIngressServiceNodes(ws, "service3", nil)
require.NoError(err)
require.Equal(uint64(15), idx)
require.NoError(t, err)
require.Equal(t, uint64(15), idx)
// TODO(ingress): index goes backward when deleting last config entry
// require.Equal(uint64(11), idx)
require.Len(results, 0)
// require.Equal(t,uint64(11), idx)
require.Len(t, results, 0)
})
}
@ -5635,56 +5618,55 @@ func TestStateStore_GatewayServices_WildcardAssociation(t *testing.T) {
s := testStateStore(t)
setupIngressState(t, s)
require := require.New(t)
ws := memdb.NewWatchSet()
t.Run("base case for wildcard", func(t *testing.T) {
idx, results, err := s.GatewayServices(ws, "wildcardIngress", nil)
require.NoError(err)
require.Equal(uint64(16), idx)
require.Len(results, 3)
require.NoError(t, err)
require.Equal(t, uint64(16), idx)
require.Len(t, results, 3)
})
t.Run("do not associate ingress services with gateway", func(t *testing.T) {
testRegisterIngressService(t, s, 17, "node1", "testIngress")
require.False(watchFired(ws))
require.False(t, watchFired(ws))
idx, results, err := s.GatewayServices(ws, "wildcardIngress", nil)
require.NoError(err)
require.Equal(uint64(16), idx)
require.Len(results, 3)
require.NoError(t, err)
require.Equal(t, uint64(16), idx)
require.Len(t, results, 3)
})
t.Run("do not associate terminating-gateway services with gateway", func(t *testing.T) {
require.Nil(s.EnsureService(18, "node1",
require.Nil(t, s.EnsureService(18, "node1",
&structs.NodeService{
Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443,
},
))
require.False(watchFired(ws))
require.False(t, watchFired(ws))
idx, results, err := s.GatewayServices(ws, "wildcardIngress", nil)
require.NoError(err)
require.Equal(uint64(16), idx)
require.Len(results, 3)
require.NoError(t, err)
require.Equal(t, uint64(16), idx)
require.Len(t, results, 3)
})
t.Run("do not associate connect-proxy services with gateway", func(t *testing.T) {
testRegisterSidecarProxy(t, s, 19, "node1", "web")
require.False(watchFired(ws))
require.False(t, watchFired(ws))
idx, results, err := s.GatewayServices(ws, "wildcardIngress", nil)
require.NoError(err)
require.Equal(uint64(16), idx)
require.Len(results, 3)
require.NoError(t, err)
require.Equal(t, uint64(16), idx)
require.Len(t, results, 3)
})
t.Run("do not associate consul services with gateway", func(t *testing.T) {
require.Nil(s.EnsureService(20, "node1",
require.Nil(t, s.EnsureService(20, "node1",
&structs.NodeService{ID: "consul", Service: "consul", Tags: nil},
))
require.False(watchFired(ws))
require.False(t, watchFired(ws))
idx, results, err := s.GatewayServices(ws, "wildcardIngress", nil)
require.NoError(err)
require.Equal(uint64(16), idx)
require.Len(results, 3)
require.NoError(t, err)
require.Equal(t, uint64(16), idx)
require.Len(t, results, 3)
})
}
@ -5715,15 +5697,13 @@ func TestStateStore_GatewayServices_IngressProtocolFiltering(t *testing.T) {
})
t.Run("no services from default tcp protocol", func(t *testing.T) {
require := require.New(t)
idx, results, err := s.GatewayServices(nil, "ingress1", nil)
require.NoError(err)
require.Equal(uint64(4), idx)
require.Len(results, 0)
require.NoError(t, err)
require.Equal(t, uint64(4), idx)
require.Len(t, results, 0)
})
t.Run("service-defaults", func(t *testing.T) {
require := require.New(t)
expected := structs.GatewayServices{
{
Gateway: structs.NewServiceName("ingress1", nil),
@ -5746,13 +5726,12 @@ func TestStateStore_GatewayServices_IngressProtocolFiltering(t *testing.T) {
}
assert.NoError(t, s.EnsureConfigEntry(5, svcDefaults))
idx, results, err := s.GatewayServices(nil, "ingress1", nil)
require.NoError(err)
require.Equal(uint64(5), idx)
require.ElementsMatch(results, expected)
require.NoError(t, err)
require.Equal(t, uint64(5), idx)
require.ElementsMatch(t, results, expected)
})
t.Run("proxy-defaults", func(t *testing.T) {
require := require.New(t)
expected := structs.GatewayServices{
{
Gateway: structs.NewServiceName("ingress1", nil),
@ -5790,13 +5769,12 @@ func TestStateStore_GatewayServices_IngressProtocolFiltering(t *testing.T) {
assert.NoError(t, s.EnsureConfigEntry(6, proxyDefaults))
idx, results, err := s.GatewayServices(nil, "ingress1", nil)
require.NoError(err)
require.Equal(uint64(6), idx)
require.ElementsMatch(results, expected)
require.NoError(t, err)
require.Equal(t, uint64(6), idx)
require.ElementsMatch(t, results, expected)
})
t.Run("service-defaults overrides proxy-defaults", func(t *testing.T) {
require := require.New(t)
expected := structs.GatewayServices{
{
Gateway: structs.NewServiceName("ingress1", nil),
@ -5820,13 +5798,12 @@ func TestStateStore_GatewayServices_IngressProtocolFiltering(t *testing.T) {
assert.NoError(t, s.EnsureConfigEntry(7, svcDefaults))
idx, results, err := s.GatewayServices(nil, "ingress1", nil)
require.NoError(err)
require.Equal(uint64(7), idx)
require.ElementsMatch(results, expected)
require.NoError(t, err)
require.Equal(t, uint64(7), idx)
require.ElementsMatch(t, results, expected)
})
t.Run("change listener protocol and expect different filter", func(t *testing.T) {
require := require.New(t)
expected := structs.GatewayServices{
{
Gateway: structs.NewServiceName("ingress1", nil),
@ -5860,9 +5837,9 @@ func TestStateStore_GatewayServices_IngressProtocolFiltering(t *testing.T) {
assert.NoError(t, s.EnsureConfigEntry(8, ingress1))
idx, results, err := s.GatewayServices(nil, "ingress1", nil)
require.NoError(err)
require.Equal(uint64(8), idx)
require.ElementsMatch(results, expected)
require.NoError(t, err)
require.Equal(t, uint64(8), idx)
require.ElementsMatch(t, results, expected)
})
}
@ -7986,3 +7963,14 @@ func generateUUID() ([]byte, string) {
buf[10:16])
return buf, uuid
}
func setVirtualIPFlags(t *testing.T, s *Store) {
require.NoError(t, s.SystemMetadataSet(0, &structs.SystemMetadataEntry{
Key: structs.SystemMetadataVirtualIPsEnabled,
Value: "true",
}))
require.NoError(t, s.SystemMetadataSet(0, &structs.SystemMetadataEntry{
Key: structs.SystemMetadataTermGatewayVirtualIPsEnabled,
Value: "true",
}))
}

View File

@ -12,7 +12,6 @@ import (
)
func TestStore_ConfigEntry(t *testing.T) {
require := require.New(t)
s := testConfigStateStore(t)
expected := &structs.ProxyConfigEntry{
@ -24,12 +23,12 @@ func TestStore_ConfigEntry(t *testing.T) {
}
// Create
require.NoError(s.EnsureConfigEntry(0, expected))
require.NoError(t, s.EnsureConfigEntry(0, expected))
idx, config, err := s.ConfigEntry(nil, structs.ProxyDefaults, "global", nil)
require.NoError(err)
require.Equal(uint64(0), idx)
require.Equal(expected, config)
require.NoError(t, err)
require.Equal(t, uint64(0), idx)
require.Equal(t, expected, config)
// Update
updated := &structs.ProxyConfigEntry{
@ -39,44 +38,43 @@ func TestStore_ConfigEntry(t *testing.T) {
"DestinationServiceName": "bar",
},
}
require.NoError(s.EnsureConfigEntry(1, updated))
require.NoError(t, s.EnsureConfigEntry(1, updated))
idx, config, err = s.ConfigEntry(nil, structs.ProxyDefaults, "global", nil)
require.NoError(err)
require.Equal(uint64(1), idx)
require.Equal(updated, config)
require.NoError(t, err)
require.Equal(t, uint64(1), idx)
require.Equal(t, updated, config)
// Delete
require.NoError(s.DeleteConfigEntry(2, structs.ProxyDefaults, "global", nil))
require.NoError(t, s.DeleteConfigEntry(2, structs.ProxyDefaults, "global", nil))
idx, config, err = s.ConfigEntry(nil, structs.ProxyDefaults, "global", nil)
require.NoError(err)
require.Equal(uint64(2), idx)
require.Nil(config)
require.NoError(t, err)
require.Equal(t, uint64(2), idx)
require.Nil(t, config)
// Set up a watch.
serviceConf := &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
}
require.NoError(s.EnsureConfigEntry(3, serviceConf))
require.NoError(t, s.EnsureConfigEntry(3, serviceConf))
ws := memdb.NewWatchSet()
_, _, err = s.ConfigEntry(ws, structs.ServiceDefaults, "foo", nil)
require.NoError(err)
require.NoError(t, err)
// Make an unrelated modification and make sure the watch doesn't fire.
require.NoError(s.EnsureConfigEntry(4, updated))
require.False(watchFired(ws))
require.NoError(t, s.EnsureConfigEntry(4, updated))
require.False(t, watchFired(ws))
// Update the watched config and make sure it fires.
serviceConf.Protocol = "http"
require.NoError(s.EnsureConfigEntry(5, serviceConf))
require.True(watchFired(ws))
require.NoError(t, s.EnsureConfigEntry(5, serviceConf))
require.True(t, watchFired(ws))
}
func TestStore_ConfigEntryCAS(t *testing.T) {
require := require.New(t)
s := testConfigStateStore(t)
expected := &structs.ProxyConfigEntry{
@ -88,12 +86,12 @@ func TestStore_ConfigEntryCAS(t *testing.T) {
}
// Create
require.NoError(s.EnsureConfigEntry(1, expected))
require.NoError(t, s.EnsureConfigEntry(1, expected))
idx, config, err := s.ConfigEntry(nil, structs.ProxyDefaults, "global", nil)
require.NoError(err)
require.Equal(uint64(1), idx)
require.Equal(expected, config)
require.NoError(t, err)
require.Equal(t, uint64(1), idx)
require.Equal(t, expected, config)
// Update with invalid index
updated := &structs.ProxyConfigEntry{
@ -104,29 +102,28 @@ func TestStore_ConfigEntryCAS(t *testing.T) {
},
}
ok, err := s.EnsureConfigEntryCAS(2, 99, updated)
require.False(ok)
require.NoError(err)
require.False(t, ok)
require.NoError(t, err)
// Entry should not be changed
idx, config, err = s.ConfigEntry(nil, structs.ProxyDefaults, "global", nil)
require.NoError(err)
require.Equal(uint64(1), idx)
require.Equal(expected, config)
require.NoError(t, err)
require.Equal(t, uint64(1), idx)
require.Equal(t, expected, config)
// Update with a valid index
ok, err = s.EnsureConfigEntryCAS(2, 1, updated)
require.True(ok)
require.NoError(err)
require.True(t, ok)
require.NoError(t, err)
// Entry should be updated
idx, config, err = s.ConfigEntry(nil, structs.ProxyDefaults, "global", nil)
require.NoError(err)
require.Equal(uint64(2), idx)
require.Equal(updated, config)
require.NoError(t, err)
require.Equal(t, uint64(2), idx)
require.Equal(t, updated, config)
}
func TestStore_ConfigEntry_DeleteCAS(t *testing.T) {
require := require.New(t)
s := testConfigStateStore(t)
entry := &structs.ProxyConfigEntry{
@ -139,31 +136,31 @@ func TestStore_ConfigEntry_DeleteCAS(t *testing.T) {
// Attempt to delete the entry before it exists.
ok, err := s.DeleteConfigEntryCAS(1, 0, entry)
require.NoError(err)
require.False(ok)
require.NoError(t, err)
require.False(t, ok)
// Create the entry.
require.NoError(s.EnsureConfigEntry(1, entry))
require.NoError(t, s.EnsureConfigEntry(1, entry))
// Attempt to delete with an invalid index.
ok, err = s.DeleteConfigEntryCAS(2, 99, entry)
require.NoError(err)
require.False(ok)
require.NoError(t, err)
require.False(t, ok)
// Entry should not be deleted.
_, config, err := s.ConfigEntry(nil, entry.Kind, entry.Name, nil)
require.NoError(err)
require.NotNil(config)
require.NoError(t, err)
require.NotNil(t, config)
// Attempt to delete with a valid index.
ok, err = s.DeleteConfigEntryCAS(2, 1, entry)
require.NoError(err)
require.True(ok)
require.NoError(t, err)
require.True(t, ok)
// Entry should be deleted.
_, config, err = s.ConfigEntry(nil, entry.Kind, entry.Name, nil)
require.NoError(err)
require.Nil(config)
require.NoError(t, err)
require.Nil(t, config)
}
func TestStore_ConfigEntry_UpdateOver(t *testing.T) {
@ -263,7 +260,6 @@ func TestStore_ConfigEntry_UpdateOver(t *testing.T) {
}
func TestStore_ConfigEntries(t *testing.T) {
require := require.New(t)
s := testConfigStateStore(t)
// Create some config entries.
@ -280,39 +276,39 @@ func TestStore_ConfigEntries(t *testing.T) {
Name: "test3",
}
require.NoError(s.EnsureConfigEntry(0, entry1))
require.NoError(s.EnsureConfigEntry(1, entry2))
require.NoError(s.EnsureConfigEntry(2, entry3))
require.NoError(t, s.EnsureConfigEntry(0, entry1))
require.NoError(t, s.EnsureConfigEntry(1, entry2))
require.NoError(t, s.EnsureConfigEntry(2, entry3))
// Get all entries
idx, entries, err := s.ConfigEntries(nil, nil)
require.NoError(err)
require.Equal(uint64(2), idx)
require.Equal([]structs.ConfigEntry{entry1, entry2, entry3}, entries)
require.NoError(t, err)
require.Equal(t, uint64(2), idx)
require.Equal(t, []structs.ConfigEntry{entry1, entry2, entry3}, entries)
// Get all proxy entries
idx, entries, err = s.ConfigEntriesByKind(nil, structs.ProxyDefaults, nil)
require.NoError(err)
require.Equal(uint64(2), idx)
require.Equal([]structs.ConfigEntry{entry1}, entries)
require.NoError(t, err)
require.Equal(t, uint64(2), idx)
require.Equal(t, []structs.ConfigEntry{entry1}, entries)
// Get all service entries
ws := memdb.NewWatchSet()
idx, entries, err = s.ConfigEntriesByKind(ws, structs.ServiceDefaults, nil)
require.NoError(err)
require.Equal(uint64(2), idx)
require.Equal([]structs.ConfigEntry{entry2, entry3}, entries)
require.NoError(t, err)
require.Equal(t, uint64(2), idx)
require.Equal(t, []structs.ConfigEntry{entry2, entry3}, entries)
// Watch should not have fired
require.False(watchFired(ws))
require.False(t, watchFired(ws))
// Now make an update and make sure the watch fires.
require.NoError(s.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
require.NoError(t, s.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "test2",
Protocol: "tcp",
}))
require.True(watchFired(ws))
require.True(t, watchFired(ws))
}
func TestStore_ConfigEntry_GraphValidation(t *testing.T) {

View File

@ -184,25 +184,24 @@ func TestStore_CAConfig_Snapshot_Restore_BlankConfig(t *testing.T) {
}
func TestStore_CARootSetList(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Call list to populate the watch set
ws := memdb.NewWatchSet()
_, _, err := s.CARoots(ws)
assert.Nil(err)
assert.Nil(t, err)
// Build a valid value
ca1 := connect.TestCA(t, nil)
expected := *ca1
// Set
ok, err := s.CARootSetCAS(1, 0, []*structs.CARoot{ca1})
assert.Nil(err)
assert.True(ok)
assert.Nil(t, err)
assert.True(t, ok)
// Make sure the index got updated.
assert.Equal(s.maxIndex(tableConnectCARoots), uint64(1))
assert.True(watchFired(ws), "watch fired")
assert.Equal(t, s.maxIndex(tableConnectCARoots), uint64(1))
assert.True(t, watchFired(ws), "watch fired")
// Read it back out and verify it.
@ -212,20 +211,19 @@ func TestStore_CARootSetList(t *testing.T) {
}
ws = memdb.NewWatchSet()
_, roots, err := s.CARoots(ws)
assert.Nil(err)
assert.Len(roots, 1)
assert.Nil(t, err)
assert.Len(t, roots, 1)
actual := roots[0]
assertDeepEqual(t, expected, *actual)
}
func TestStore_CARootSet_emptyID(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Call list to populate the watch set
ws := memdb.NewWatchSet()
_, _, err := s.CARoots(ws)
assert.Nil(err)
assert.Nil(t, err)
// Build a valid value
ca1 := connect.TestCA(t, nil)
@ -233,29 +231,28 @@ func TestStore_CARootSet_emptyID(t *testing.T) {
// Set
ok, err := s.CARootSetCAS(1, 0, []*structs.CARoot{ca1})
assert.NotNil(err)
assert.Contains(err.Error(), ErrMissingCARootID.Error())
assert.False(ok)
assert.NotNil(t, err)
assert.Contains(t, err.Error(), ErrMissingCARootID.Error())
assert.False(t, ok)
// Make sure the index got updated.
assert.Equal(s.maxIndex(tableConnectCARoots), uint64(0))
assert.False(watchFired(ws), "watch fired")
assert.Equal(t, s.maxIndex(tableConnectCARoots), uint64(0))
assert.False(t, watchFired(ws), "watch fired")
// Read it back out and verify it.
ws = memdb.NewWatchSet()
_, roots, err := s.CARoots(ws)
assert.Nil(err)
assert.Len(roots, 0)
assert.Nil(t, err)
assert.Len(t, roots, 0)
}
func TestStore_CARootSet_noActive(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Call list to populate the watch set
ws := memdb.NewWatchSet()
_, _, err := s.CARoots(ws)
assert.Nil(err)
assert.Nil(t, err)
// Build a valid value
ca1 := connect.TestCA(t, nil)
@ -265,19 +262,18 @@ func TestStore_CARootSet_noActive(t *testing.T) {
// Set
ok, err := s.CARootSetCAS(1, 0, []*structs.CARoot{ca1, ca2})
assert.NotNil(err)
assert.Contains(err.Error(), "exactly one active")
assert.False(ok)
assert.NotNil(t, err)
assert.Contains(t, err.Error(), "exactly one active")
assert.False(t, ok)
}
func TestStore_CARootSet_multipleActive(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Call list to populate the watch set
ws := memdb.NewWatchSet()
_, _, err := s.CARoots(ws)
assert.Nil(err)
assert.Nil(t, err)
// Build a valid value
ca1 := connect.TestCA(t, nil)
@ -285,13 +281,12 @@ func TestStore_CARootSet_multipleActive(t *testing.T) {
// Set
ok, err := s.CARootSetCAS(1, 0, []*structs.CARoot{ca1, ca2})
assert.NotNil(err)
assert.Contains(err.Error(), "exactly one active")
assert.False(ok)
assert.NotNil(t, err)
assert.Contains(t, err.Error(), "exactly one active")
assert.False(t, ok)
}
func TestStore_CARootActive_valid(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Build a valid value
@ -303,33 +298,31 @@ func TestStore_CARootActive_valid(t *testing.T) {
// Set
ok, err := s.CARootSetCAS(1, 0, []*structs.CARoot{ca1, ca2, ca3})
assert.Nil(err)
assert.True(ok)
assert.Nil(t, err)
assert.True(t, ok)
// Query
ws := memdb.NewWatchSet()
idx, res, err := s.CARootActive(ws)
assert.Equal(idx, uint64(1))
assert.Nil(err)
assert.NotNil(res)
assert.Equal(ca2.ID, res.ID)
assert.Equal(t, idx, uint64(1))
assert.Nil(t, err)
assert.NotNil(t, res)
assert.Equal(t, ca2.ID, res.ID)
}
// Test that querying the active CA returns the correct value.
func TestStore_CARootActive_none(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Querying with no results returns nil.
ws := memdb.NewWatchSet()
idx, res, err := s.CARootActive(ws)
assert.Equal(idx, uint64(0))
assert.Nil(res)
assert.Nil(err)
assert.Equal(t, idx, uint64(0))
assert.Nil(t, res)
assert.Nil(t, err)
}
func TestStore_CARoot_Snapshot_Restore(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Create some intentions.
@ -351,8 +344,8 @@ func TestStore_CARoot_Snapshot_Restore(t *testing.T) {
// Now create
ok, err := s.CARootSetCAS(1, 0, roots)
assert.Nil(err)
assert.True(ok)
assert.Nil(t, err)
assert.True(t, ok)
// Snapshot the queries.
snap := s.Snapshot()
@ -360,34 +353,33 @@ func TestStore_CARoot_Snapshot_Restore(t *testing.T) {
// Alter the real state store.
ok, err = s.CARootSetCAS(2, 1, roots[:1])
assert.Nil(err)
assert.True(ok)
assert.Nil(t, err)
assert.True(t, ok)
// Verify the snapshot.
assert.Equal(snap.LastIndex(), uint64(1))
assert.Equal(t, snap.LastIndex(), uint64(1))
dump, err := snap.CARoots()
assert.Nil(err)
assert.Equal(roots, dump)
assert.Nil(t, err)
assert.Equal(t, roots, dump)
// Restore the values into a new state store.
func() {
s := testStateStore(t)
restore := s.Restore()
for _, r := range dump {
assert.Nil(restore.CARoot(r))
assert.Nil(t, restore.CARoot(r))
}
restore.Commit()
// Read the restored values back out and verify that they match.
idx, actual, err := s.CARoots(nil)
assert.Nil(err)
assert.Equal(idx, uint64(2))
assert.Equal(roots, actual)
assert.Nil(t, err)
assert.Equal(t, idx, uint64(2))
assert.Equal(t, roots, actual)
}()
}
func TestStore_CABuiltinProvider(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
{
@ -398,13 +390,13 @@ func TestStore_CABuiltinProvider(t *testing.T) {
}
ok, err := s.CASetProviderState(0, expected)
assert.NoError(err)
assert.True(ok)
assert.NoError(t, err)
assert.True(t, ok)
idx, state, err := s.CAProviderState(expected.ID)
assert.NoError(err)
assert.Equal(idx, uint64(0))
assert.Equal(expected, state)
assert.NoError(t, err)
assert.Equal(t, idx, uint64(0))
assert.Equal(t, expected, state)
}
{
@ -415,13 +407,13 @@ func TestStore_CABuiltinProvider(t *testing.T) {
}
ok, err := s.CASetProviderState(1, expected)
assert.NoError(err)
assert.True(ok)
assert.NoError(t, err)
assert.True(t, ok)
idx, state, err := s.CAProviderState(expected.ID)
assert.NoError(err)
assert.Equal(idx, uint64(1))
assert.Equal(expected, state)
assert.NoError(t, err)
assert.Equal(t, idx, uint64(1))
assert.Equal(t, expected, state)
}
{
@ -429,21 +421,20 @@ func TestStore_CABuiltinProvider(t *testing.T) {
// numbers will initialize from the max index of the provider table.
// That's why this first serial is 2 and not 1.
sn, err := s.CAIncrementProviderSerialNumber(10)
assert.NoError(err)
assert.Equal(uint64(2), sn)
assert.NoError(t, err)
assert.Equal(t, uint64(2), sn)
sn, err = s.CAIncrementProviderSerialNumber(10)
assert.NoError(err)
assert.Equal(uint64(3), sn)
assert.NoError(t, err)
assert.Equal(t, uint64(3), sn)
sn, err = s.CAIncrementProviderSerialNumber(10)
assert.NoError(err)
assert.Equal(uint64(4), sn)
assert.NoError(t, err)
assert.Equal(t, uint64(4), sn)
}
}
func TestStore_CABuiltinProvider_Snapshot_Restore(t *testing.T) {
assert := assert.New(t)
s := testStateStore(t)
// Create multiple state entries.
@ -462,8 +453,8 @@ func TestStore_CABuiltinProvider_Snapshot_Restore(t *testing.T) {
for i, state := range before {
ok, err := s.CASetProviderState(uint64(98+i), state)
assert.NoError(err)
assert.True(ok)
assert.NoError(t, err)
assert.True(t, ok)
}
// Take a snapshot.
@ -477,26 +468,26 @@ func TestStore_CABuiltinProvider_Snapshot_Restore(t *testing.T) {
RootCert: "d",
}
ok, err := s.CASetProviderState(100, after)
assert.NoError(err)
assert.True(ok)
assert.NoError(t, err)
assert.True(t, ok)
snapped, err := snap.CAProviderState()
assert.NoError(err)
assert.Equal(before, snapped)
assert.NoError(t, err)
assert.Equal(t, before, snapped)
// Restore onto a new state store.
s2 := testStateStore(t)
restore := s2.Restore()
for _, entry := range snapped {
assert.NoError(restore.CAProviderState(entry))
assert.NoError(t, restore.CAProviderState(entry))
}
restore.Commit()
// Verify the restored values match those from before the snapshot.
for _, state := range before {
idx, res, err := s2.CAProviderState(state.ID)
assert.NoError(err)
assert.Equal(idx, uint64(99))
assert.Equal(state, res)
assert.NoError(t, err)
assert.Equal(t, idx, uint64(99))
assert.Equal(t, state, res)
}
}

Some files were not shown because too many files have changed in this diff Show More