fixing merge conflicts part 3
This commit is contained in:
commit
667976c94f
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
connect: reduce raft apply on CA configuration when no change is performed
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
server: partly fix config entry replication issue that prevents replication in some circumstances
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
server: suppress spurious blocking query returns where multiple config entries are involved
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: Added Tags tab to gateways(just like exists for non-gateway services)
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:enhancement
|
||||
agent: Allow client agents to perform keyring operations
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
connect: update Envoy supported version of 1.20 to 1.20.2
|
||||
```
|
|
@ -0,0 +1,7 @@
|
|||
```release-note:bug
|
||||
catalog: compare node names case insensitively in more places
|
||||
```
|
||||
|
||||
```release-note:bug
|
||||
state: fix bug blocking snapshot restore when a node check and node differed in casing of the node string
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
connect: Update supported Envoy versions to include 1.19.3 and 1.18.6
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
ui: Move icons away from depending on a CSS preprocessor
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:enhancement
|
||||
acl: Provide fuller detail in the error messsage when an ACL denies access.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
ui: In the datacenter selector order Datacenters by Primary, Local then alpanumerically
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: Fixes a visual bug where our loading icon can look cut off
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
raft: upgrade to v1.3.6 which fixes a bug where a read replica node could attempt bootstrapping raft and prevent other nodes from bootstrapping at all
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
server: fix spurious blocking query suppression for discovery chains
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
server: Ensure that service-defaults `Meta` is returned with the response to the `ConfigEntry.ResolveServiceConfig` RPC.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
server: **(Enterprise only)** Namespace deletion will now attempt to delete as many namespaced config entries as possible instead of halting on the first deletion that failed.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
acl: **(Enterprise Only)** fixes a bug preventing ACL policies configured with datacenter restrictions from being created if the cluster had been upgraded to Consul 1.11+ from an earlier version.
|
||||
```
|
|
@ -22,7 +22,7 @@ references:
|
|||
test-results: &TEST_RESULTS_DIR /tmp/test-results
|
||||
|
||||
cache:
|
||||
yarn: &YARN_CACHE_KEY consul-ui-v6-{{ checksum "ui/yarn.lock" }}
|
||||
yarn: &YARN_CACHE_KEY consul-ui-v7-{{ checksum "ui/yarn.lock" }}
|
||||
|
||||
environment: &ENVIRONMENT
|
||||
TEST_RESULTS_DIR: *TEST_RESULTS_DIR
|
||||
|
@ -889,20 +889,20 @@ jobs:
|
|||
path: *TEST_RESULTS_DIR
|
||||
- run: *notify-slack-failure
|
||||
|
||||
envoy-integration-test-1_18_4:
|
||||
envoy-integration-test-1_18_6:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.18.4"
|
||||
ENVOY_VERSION: "1.18.6"
|
||||
|
||||
envoy-integration-test-1_19_1:
|
||||
envoy-integration-test-1_19_3:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.19.1"
|
||||
ENVOY_VERSION: "1.19.3"
|
||||
|
||||
envoy-integration-test-1_20_1:
|
||||
envoy-integration-test-1_20_2:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.20.1"
|
||||
ENVOY_VERSION: "1.20.2"
|
||||
|
||||
# run integration tests for the connect ca providers
|
||||
test-connect-ca-providers:
|
||||
|
@ -1148,13 +1148,13 @@ workflows:
|
|||
- envoy-integration-test-1_17_4:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_18_4:
|
||||
- envoy-integration-test-1_18_6:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_19_1:
|
||||
- envoy-integration-test-1_19_3:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_20_1:
|
||||
- envoy-integration-test-1_20_2:
|
||||
requires:
|
||||
- dev-build
|
||||
|
||||
|
|
|
@ -242,10 +242,6 @@ jobs:
|
|||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: all
|
||||
- name: Docker Build (Action)
|
||||
uses: hashicorp/actions-docker-build@v1
|
||||
with:
|
||||
|
|
|
@ -150,3 +150,81 @@ event "verify" {
|
|||
on = "fail"
|
||||
}
|
||||
}
|
||||
|
||||
## These are promotion and post-publish events
|
||||
## they should be added to the end of the file after the verify event stanza.
|
||||
|
||||
event "trigger-staging" {
|
||||
// This event is dispatched by the bob trigger-promotion command
|
||||
// and is required - do not delete.
|
||||
}
|
||||
|
||||
event "promote-staging" {
|
||||
depends = ["trigger-staging"]
|
||||
action "promote-staging" {
|
||||
organization = "hashicorp"
|
||||
repository = "crt-workflows-common"
|
||||
workflow = "promote-staging"
|
||||
}
|
||||
|
||||
notification {
|
||||
on = "always"
|
||||
}
|
||||
}
|
||||
|
||||
event "promote-staging-docker" {
|
||||
depends = ["promote-staging"]
|
||||
action "promote-staging-docker" {
|
||||
organization = "hashicorp"
|
||||
repository = "crt-workflows-common"
|
||||
workflow = "promote-staging-docker"
|
||||
}
|
||||
|
||||
notification {
|
||||
on = "always"
|
||||
}
|
||||
}
|
||||
|
||||
event "trigger-production" {
|
||||
// This event is dispatched by the bob trigger-promotion command
|
||||
// and is required - do not delete.
|
||||
}
|
||||
|
||||
event "promote-production" {
|
||||
depends = ["trigger-production"]
|
||||
action "promote-production" {
|
||||
organization = "hashicorp"
|
||||
repository = "crt-workflows-common"
|
||||
workflow = "promote-production"
|
||||
}
|
||||
|
||||
notification {
|
||||
on = "always"
|
||||
}
|
||||
}
|
||||
|
||||
event "promote-production-docker" {
|
||||
depends = ["promote-production"]
|
||||
action "promote-production-docker" {
|
||||
organization = "hashicorp"
|
||||
repository = "crt-workflows-common"
|
||||
workflow = "promote-production-docker"
|
||||
}
|
||||
|
||||
notification {
|
||||
on = "always"
|
||||
}
|
||||
}
|
||||
|
||||
event "promote-production-packaging" {
|
||||
depends = ["promote-production-docker"]
|
||||
action "promote-production-packaging" {
|
||||
organization = "hashicorp"
|
||||
repository = "crt-workflows-common"
|
||||
workflow = "promote-production-packaging"
|
||||
}
|
||||
|
||||
notification {
|
||||
on = "always"
|
||||
}
|
||||
}
|
83
CHANGELOG.md
83
CHANGELOG.md
|
@ -1,3 +1,36 @@
|
|||
## 1.11.4 (February 28, 2022)
|
||||
|
||||
FEATURES:
|
||||
|
||||
* ca: support using an external root CA with the vault CA provider [[GH-11910](https://github.com/hashicorp/consul/issues/11910)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* connect: Update supported Envoy versions to include 1.19.3 and 1.18.6 [[GH-12449](https://github.com/hashicorp/consul/issues/12449)]
|
||||
* connect: update Envoy supported version of 1.20 to 1.20.2 [[GH-12433](https://github.com/hashicorp/consul/issues/12433)]
|
||||
* connect: update Envoy supported version of 1.20 to 1.20.2 [[GH-12443](https://github.com/hashicorp/consul/issues/12443)]
|
||||
* debug: reduce the capture time for trace to only a single interval instead of the full duration to make trace.out easier to open without running into OOM errors. [[GH-12359](https://github.com/hashicorp/consul/issues/12359)]
|
||||
* raft: add additional logging of snapshot restore progress [[GH-12325](https://github.com/hashicorp/consul/issues/12325)]
|
||||
* rpc: improve blocking queries for items that do not exist, by continuing to block until they exist (or the timeout). [[GH-12110](https://github.com/hashicorp/consul/issues/12110)]
|
||||
* sentinel: **(Enterprise Only)** Sentinel now uses SHA256 to generate policy ids
|
||||
* server: conditionally avoid writing a config entry to raft if it was already the same [[GH-12321](https://github.com/hashicorp/consul/issues/12321)]
|
||||
* server: suppress spurious blocking query returns where multiple config entries are involved [[GH-12362](https://github.com/hashicorp/consul/issues/12362)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* agent: Parse datacenter from Create/Delete requests for AuthMethods and BindingRules. [[GH-12370](https://github.com/hashicorp/consul/issues/12370)]
|
||||
* areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time. [[GH-1368](https://github.com/hashicorp/consul/issues/1368)]
|
||||
* catalog: compare node names case insensitively in more places [[GH-12444](https://github.com/hashicorp/consul/issues/12444)]
|
||||
* checks: populate interval and timeout when registering services [[GH-11138](https://github.com/hashicorp/consul/issues/11138)]
|
||||
* local: fixes a data race in anti-entropy sync that could cause the wrong tags to be applied to a service when EnableTagOverride is used [[GH-12324](https://github.com/hashicorp/consul/issues/12324)]
|
||||
* raft: fixed a race condition in leadership transfer that could result in reelection of the current leader [[GH-12325](https://github.com/hashicorp/consul/issues/12325)]
|
||||
* server: **(Enterprise only)** Namespace deletion will now attempt to delete as many namespaced config entries as possible instead of halting on the first deletion that failed.
|
||||
* server: partly fix config entry replication issue that prevents replication in some circumstances [[GH-12307](https://github.com/hashicorp/consul/issues/12307)]
|
||||
* state: fix bug blocking snapshot restore when a node check and node differed in casing of the node string [[GH-12444](https://github.com/hashicorp/consul/issues/12444)]
|
||||
* ui: Ensure we always display the Policy default preview in the Namespace editing form [[GH-12316](https://github.com/hashicorp/consul/issues/12316)]
|
||||
* ui: Fix missing helper javascript error [[GH-12358](https://github.com/hashicorp/consul/issues/12358)]
|
||||
* xds: Fixed Envoy http features such as outlier detection and retry policy not working correctly with transparent proxy. [[GH-12385](https://github.com/hashicorp/consul/issues/12385)]
|
||||
|
||||
## 1.11.3 (February 11, 2022)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
@ -206,6 +239,37 @@ NOTES:
|
|||
|
||||
* Renamed the `agent_master` field to `agent_recovery` in the `acl-tokens.json` file in which tokens are persisted on-disk (when `acl.enable_token_persistence` is enabled) [[GH-11744](https://github.com/hashicorp/consul/issues/11744)]
|
||||
|
||||
## 1.10.9 (February 28, 2022)
|
||||
|
||||
SECURITY:
|
||||
|
||||
* agent: Use SHA256 instead of MD5 to generate persistence file names.
|
||||
|
||||
FEATURES:
|
||||
|
||||
* ca: support using an external root CA with the vault CA provider [[GH-11910](https://github.com/hashicorp/consul/issues/11910)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* connect: Update supported Envoy versions to include 1.18.6 [[GH-12450](https://github.com/hashicorp/consul/issues/12450)]
|
||||
* connect: update Envoy supported version of 1.20 to 1.20.2 [[GH-12434](https://github.com/hashicorp/consul/issues/12434)]
|
||||
* debug: reduce the capture time for trace to only a single interval instead of the full duration to make trace.out easier to open without running into OOM errors. [[GH-12359](https://github.com/hashicorp/consul/issues/12359)]
|
||||
* raft: add additional logging of snapshot restore progress [[GH-12325](https://github.com/hashicorp/consul/issues/12325)]
|
||||
* rpc: improve blocking queries for items that do not exist, by continuing to block until they exist (or the timeout). [[GH-12110](https://github.com/hashicorp/consul/issues/12110)]
|
||||
* sentinel: **(Enterprise Only)** Sentinel now uses SHA256 to generate policy ids
|
||||
* server: conditionally avoid writing a config entry to raft if it was already the same [[GH-12321](https://github.com/hashicorp/consul/issues/12321)]
|
||||
* server: suppress spurious blocking query returns where multiple config entries are involved [[GH-12362](https://github.com/hashicorp/consul/issues/12362)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* agent: Parse datacenter from Create/Delete requests for AuthMethods and BindingRules. [[GH-12370](https://github.com/hashicorp/consul/issues/12370)]
|
||||
* areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time. [[GH-1368](https://github.com/hashicorp/consul/issues/1368)]
|
||||
* raft: fixed a race condition in leadership transfer that could result in reelection of the current leader [[GH-12325](https://github.com/hashicorp/consul/issues/12325)]
|
||||
* server: **(Enterprise only)** Namespace deletion will now attempt to delete as many namespaced config entries as possible instead of halting on the first deletion that failed.
|
||||
* server: partly fix config entry replication issue that prevents replication in some circumstances [[GH-12307](https://github.com/hashicorp/consul/issues/12307)]
|
||||
* ui: Ensure we always display the Policy default preview in the Namespace editing form [[GH-12316](https://github.com/hashicorp/consul/issues/12316)]
|
||||
* xds: Fixed Envoy http features such as outlier detection and retry policy not working correctly with transparent proxy. [[GH-12385](https://github.com/hashicorp/consul/issues/12385)]
|
||||
|
||||
## 1.10.8 (February 11, 2022)
|
||||
|
||||
SECURITY:
|
||||
|
@ -554,6 +618,25 @@ NOTES:
|
|||
|
||||
* legal: **(Enterprise only)** Enterprise binary downloads will now include a copy of the EULA and Terms of Evaluation in the zip archive
|
||||
|
||||
## 1.9.16 (February 28, 2022)
|
||||
|
||||
FEATURES:
|
||||
|
||||
* ca: support using an external root CA with the vault CA provider [[GH-11910](https://github.com/hashicorp/consul/issues/11910)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* sentinel: **(Enterprise Only)** Sentinel now uses SHA256 to generate policy ids
|
||||
* server: conditionally avoid writing a config entry to raft if it was already the same [[GH-12321](https://github.com/hashicorp/consul/issues/12321)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* agent: Parse datacenter from Create/Delete requests for AuthMethods and BindingRules. [[GH-12370](https://github.com/hashicorp/consul/issues/12370)]
|
||||
* areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time. [[GH-1368](https://github.com/hashicorp/consul/issues/1368)]
|
||||
* server: **(Enterprise only)** Namespace deletion will now attempt to delete as many namespaced config entries as possible instead of halting on the first deletion that failed.
|
||||
* server: partly fix config entry replication issue that prevents replication in some circumstances [[GH-12307](https://github.com/hashicorp/consul/issues/12307)]
|
||||
* ui: Ensure we always display the Policy default preview in the Namespace editing form [[GH-12316](https://github.com/hashicorp/consul/issues/12316)]
|
||||
|
||||
## 1.9.15 (February 11, 2022)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
|
12
GNUmakefile
12
GNUmakefile
|
@ -178,8 +178,8 @@ endif
|
|||
|
||||
# linux builds a linux binary independent of the source platform
|
||||
linux:
|
||||
mkdir -p bin
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./bin -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
|
||||
@mkdir -p ./pkg/bin/linux_amd64
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./pkg/bin/linux_amd64 -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
|
||||
|
||||
# dist builds binaries for all platforms and packages them for distribution
|
||||
dist:
|
||||
|
@ -330,15 +330,15 @@ ifeq ("$(CIRCLECI)","true")
|
|||
# Run in CI
|
||||
gotestsum --format=short-verbose --junitfile "$(TEST_RESULTS_DIR)/gotestsum-report.xml" -- -cover -coverprofile=coverage.txt ./agent/connect/ca
|
||||
# Run leader tests that require Vault
|
||||
gotestsum --format=short-verbose --junitfile "$(TEST_RESULTS_DIR)/gotestsum-report-leader.xml" -- -cover -coverprofile=coverage-leader.txt -run '.*_Vault_' ./agent/consul
|
||||
gotestsum --format=short-verbose --junitfile "$(TEST_RESULTS_DIR)/gotestsum-report-leader.xml" -- -cover -coverprofile=coverage-leader.txt -run Vault ./agent/consul
|
||||
# Run agent tests that require Vault
|
||||
gotestsum --format=short-verbose --junitfile "$(TEST_RESULTS_DIR)/gotestsum-report-agent.xml" -- -cover -coverprofile=coverage-agent.txt -run '.*_Vault_' ./agent
|
||||
gotestsum --format=short-verbose --junitfile "$(TEST_RESULTS_DIR)/gotestsum-report-agent.xml" -- -cover -coverprofile=coverage-agent.txt -run Vault ./agent
|
||||
else
|
||||
# Run locally
|
||||
@echo "Running /agent/connect/ca tests in verbose mode"
|
||||
@go test -v ./agent/connect/ca
|
||||
@go test -v ./agent/consul -run '.*_Vault_'
|
||||
@go test -v ./agent -run '.*_Vault_'
|
||||
@go test -v ./agent/consul -run Vault
|
||||
@go test -v ./agent -run Vault
|
||||
endif
|
||||
|
||||
proto: $(PROTOGOFILES) $(PROTOGOBINFILES)
|
||||
|
|
|
@ -161,6 +161,279 @@ type Authorizer interface {
|
|||
|
||||
// Embedded Interface for Consul Enterprise specific ACL enforcement
|
||||
enterpriseAuthorizer
|
||||
|
||||
// ToAllowAuthorizer is needed until we can use ResolveResult in all the places this interface is used.
|
||||
ToAllowAuthorizer() AllowAuthorizer
|
||||
}
|
||||
|
||||
// AllowAuthorizer is a wrapper to expose the *Allowed methods.
|
||||
// This and the ToAllowAuthorizer function exist to tide us over until the ResolveResult struct
|
||||
// is moved into acl.
|
||||
type AllowAuthorizer struct {
|
||||
Authorizer
|
||||
}
|
||||
|
||||
// ACLReadAllowed checks for permission to list all the ACLs
|
||||
func (a AllowAuthorizer) ACLReadAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.ACLRead(ctx) != Allow {
|
||||
return PermissionDeniedByACLUnnamed(a, ctx, ResourceACL, AccessRead)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ACLWriteAllowed checks for permission to manipulate ACLs
|
||||
func (a AllowAuthorizer) ACLWriteAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.ACLWrite(ctx) != Allow {
|
||||
return PermissionDeniedByACLUnnamed(a, ctx, ResourceACL, AccessWrite)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AgentReadAllowed checks for permission to read from agent endpoints for a
|
||||
// given node.
|
||||
func (a AllowAuthorizer) AgentReadAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.AgentRead(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceAgent, AccessRead, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AgentWriteAllowed checks for permission to make changes via agent endpoints
|
||||
// for a given node.
|
||||
func (a AllowAuthorizer) AgentWriteAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.AgentWrite(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceAgent, AccessWrite, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EventReadAllowed determines if a specific event can be queried.
|
||||
func (a AllowAuthorizer) EventReadAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.EventRead(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceEvent, AccessRead, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EventWriteAllowed determines if a specific event may be fired.
|
||||
func (a AllowAuthorizer) EventWriteAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.EventWrite(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceEvent, AccessWrite, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IntentionDefaultAllowAllowed determines the default authorized behavior
|
||||
// when no intentions match a Connect request.
|
||||
func (a AllowAuthorizer) IntentionDefaultAllowAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.IntentionDefaultAllow(ctx) != Allow {
|
||||
// This is a bit nuanced, in that this isn't set by a rule, but inherited globally
|
||||
// TODO(acl-error-enhancements) revisit when we have full accessor info
|
||||
return PermissionDeniedError{Cause: "Denied by intention default"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IntentionReadAllowed determines if a specific intention can be read.
|
||||
func (a AllowAuthorizer) IntentionReadAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.IntentionRead(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceIntention, AccessRead, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IntentionWriteAllowed determines if a specific intention can be
|
||||
// created, modified, or deleted.
|
||||
func (a AllowAuthorizer) IntentionWriteAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.IntentionWrite(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceIntention, AccessWrite, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyListAllowed checks for permission to list keys under a prefix
|
||||
func (a AllowAuthorizer) KeyListAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.KeyList(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceKey, AccessList, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyReadAllowed checks for permission to read a given key
|
||||
func (a AllowAuthorizer) KeyReadAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.KeyRead(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceKey, AccessRead, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyWriteAllowed checks for permission to write a given key
|
||||
func (a AllowAuthorizer) KeyWriteAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.KeyWrite(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceKey, AccessWrite, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyWritePrefixAllowed checks for permission to write to an
|
||||
// entire key prefix. This means there must be no sub-policies
|
||||
// that deny a write.
|
||||
func (a AllowAuthorizer) KeyWritePrefixAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.KeyWritePrefix(name, ctx) != Allow {
|
||||
// TODO(acl-error-enhancements) revisit this message; we may need to do some extra plumbing inside of KeyWritePrefix to
|
||||
// return properly detailed information.
|
||||
return PermissionDeniedByACL(a, ctx, ResourceKey, AccessWrite, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyringReadAllowed determines if the encryption keyring used in
|
||||
// the gossip layer can be read.
|
||||
func (a AllowAuthorizer) KeyringReadAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.KeyringRead(ctx) != Allow {
|
||||
return PermissionDeniedByACLUnnamed(a, ctx, ResourceKeyring, AccessRead)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyringWriteAllowed determines if the keyring can be manipulated
|
||||
func (a AllowAuthorizer) KeyringWriteAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.KeyringWrite(ctx) != Allow {
|
||||
return PermissionDeniedByACLUnnamed(a, ctx, ResourceKeyring, AccessWrite)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MeshReadAllowed determines if the read-only Consul mesh functions
|
||||
// can be used.
|
||||
func (a AllowAuthorizer) MeshReadAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.MeshRead(ctx) != Allow {
|
||||
return PermissionDeniedByACLUnnamed(a, ctx, ResourceMesh, AccessRead)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MeshWriteAllowed determines if the state-changing Consul mesh
|
||||
// functions can be used.
|
||||
func (a AllowAuthorizer) MeshWriteAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.MeshWrite(ctx) != Allow {
|
||||
return PermissionDeniedByACLUnnamed(a, ctx, ResourceMesh, AccessWrite)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeReadAllowed checks for permission to read (discover) a given node.
|
||||
func (a AllowAuthorizer) NodeReadAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.NodeRead(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceNode, AccessRead, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeReadAllAllowed checks for permission to read (discover) all nodes.
|
||||
func (a AllowAuthorizer) NodeReadAllAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.NodeReadAll(ctx) != Allow {
|
||||
// This is only used to gate certain UI functions right now (e.g metrics)
|
||||
return PermissionDeniedByACL(a, ctx, ResourceNode, AccessRead, "all nodes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeWriteAllowed checks for permission to create or update (register) a
|
||||
// given node.
|
||||
func (a AllowAuthorizer) NodeWriteAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.NodeWrite(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceNode, AccessWrite, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OperatorReadAllowed determines if the read-only Consul operator functions
|
||||
// can be used.
|
||||
func (a AllowAuthorizer) OperatorReadAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.OperatorRead(ctx) != Allow {
|
||||
return PermissionDeniedByACLUnnamed(a, ctx, ResourceOperator, AccessRead)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OperatorWriteAllowed determines if the state-changing Consul operator
|
||||
// functions can be used.
|
||||
func (a AllowAuthorizer) OperatorWriteAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.OperatorWrite(ctx) != Allow {
|
||||
return PermissionDeniedByACLUnnamed(a, ctx, ResourceOperator, AccessWrite)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PreparedQueryReadAllowed determines if a specific prepared query can be read
|
||||
// to show its contents (this is not used for execution).
|
||||
func (a AllowAuthorizer) PreparedQueryReadAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.PreparedQueryRead(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceQuery, AccessRead, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PreparedQueryWriteAllowed determines if a specific prepared query can be
|
||||
// created, modified, or deleted.
|
||||
func (a AllowAuthorizer) PreparedQueryWriteAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.PreparedQueryWrite(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceQuery, AccessWrite, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServiceReadAllowed checks for permission to read a given service
|
||||
func (a AllowAuthorizer) ServiceReadAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.ServiceRead(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceService, AccessRead, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServiceReadAllAllowed checks for permission to read all services
|
||||
func (a AllowAuthorizer) ServiceReadAllAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.ServiceReadAll(ctx) != Allow {
|
||||
// This is only used to gate certain UI functions right now (e.g metrics)
|
||||
return PermissionDeniedByACL(a, ctx, ResourceService, AccessRead, "all services") // read
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServiceWriteAllowed checks for permission to create or update a given
|
||||
// service
|
||||
func (a AllowAuthorizer) ServiceWriteAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.ServiceWrite(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceService, AccessWrite, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SessionReadAllowed checks for permission to read sessions for a given node.
|
||||
func (a AllowAuthorizer) SessionReadAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.SessionRead(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceSession, AccessRead, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SessionWriteAllowed checks for permission to create sessions for a given
|
||||
// node.
|
||||
func (a AllowAuthorizer) SessionWriteAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.SessionWrite(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceSession, AccessWrite, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SnapshotAllowed checks for permission to take and restore snapshots.
|
||||
func (a AllowAuthorizer) SnapshotAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.Snapshot(ctx) != Allow {
|
||||
// Implementation of this currently just checks acl write
|
||||
return PermissionDeniedByACLUnnamed(a, ctx, ResourceACL, AccessWrite)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Enforce(authz Authorizer, rsc Resource, segment string, access string, ctx *AuthorizerContext) (EnforcementDecision, error) {
|
||||
|
|
|
@ -204,6 +204,10 @@ func (m *mockAuthorizer) Snapshot(ctx *AuthorizerContext) EnforcementDecision {
|
|||
return ret.Get(0).(EnforcementDecision)
|
||||
}
|
||||
|
||||
func (p *mockAuthorizer) ToAllowAuthorizer() AllowAuthorizer {
|
||||
return AllowAuthorizer{Authorizer: p}
|
||||
}
|
||||
|
||||
func TestACL_Enforce(t *testing.T) {
|
||||
type testCase struct {
|
||||
method string
|
||||
|
|
|
@ -256,3 +256,7 @@ func (c *ChainedAuthorizer) Snapshot(entCtx *AuthorizerContext) EnforcementDecis
|
|||
return authz.Snapshot(entCtx)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ChainedAuthorizer) ToAllowAuthorizer() AllowAuthorizer {
|
||||
return AllowAuthorizer{Authorizer: c}
|
||||
}
|
||||
|
|
|
@ -99,6 +99,10 @@ func (authz testAuthorizer) Snapshot(*AuthorizerContext) EnforcementDecision {
|
|||
return EnforcementDecision(authz)
|
||||
}
|
||||
|
||||
func (authz testAuthorizer) ToAllowAuthorizer() AllowAuthorizer {
|
||||
return AllowAuthorizer{Authorizer: &authz}
|
||||
}
|
||||
|
||||
func TestChainedAuthorizer(t *testing.T) {
|
||||
t.Run("No Authorizers", func(t *testing.T) {
|
||||
authz := NewChainedAuthorizer([]Authorizer{})
|
||||
|
|
|
@ -106,7 +106,7 @@ func (e PermissionDeniedError) Error() string {
|
|||
fmt.Fprintf(&message, " lacks permission '%s:%s'", e.Resource, e.AccessLevel.String())
|
||||
|
||||
if e.ResourceID.Name != "" {
|
||||
fmt.Fprintf(&message, " %s", e.ResourceID.ToString())
|
||||
fmt.Fprintf(&message, " on %s", e.ResourceID.ToString())
|
||||
}
|
||||
return message.String()
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ func TestPermissionDeniedError(t *testing.T) {
|
|||
},
|
||||
{
|
||||
err: PermissionDeniedByACL(&auth1, nil, ResourceService, AccessRead, "foobar"),
|
||||
expected: "Permission denied: provided accessor lacks permission 'service:read' foobar",
|
||||
expected: "Permission denied: provided accessor lacks permission 'service:read' on foobar",
|
||||
},
|
||||
{
|
||||
err: PermissionDeniedByACLUnnamed(&auth1, nil, ResourceService, AccessRead),
|
||||
|
|
|
@ -787,3 +787,7 @@ func (p *policyAuthorizer) SessionWrite(node string, _ *AuthorizerContext) Enfor
|
|||
}
|
||||
return Default
|
||||
}
|
||||
|
||||
func (p *policyAuthorizer) ToAllowAuthorizer() AllowAuthorizer {
|
||||
return AllowAuthorizer{Authorizer: p}
|
||||
}
|
||||
|
|
|
@ -240,6 +240,10 @@ func (s *staticAuthorizer) Snapshot(_ *AuthorizerContext) EnforcementDecision {
|
|||
return Deny
|
||||
}
|
||||
|
||||
func (s *staticAuthorizer) ToAllowAuthorizer() AllowAuthorizer {
|
||||
return AllowAuthorizer{Authorizer: s}
|
||||
}
|
||||
|
||||
// AllowAll returns an Authorizer that allows all operations
|
||||
func AllowAll() Authorizer {
|
||||
return allowAll
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"regexp"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func RequirePermissionDeniedError(t testing.TB, err error, _ Authorizer, _ *AuthorizerContext, resource Resource, accessLevel AccessLevel, resourceID string) {
|
||||
t.Helper()
|
||||
if err == nil {
|
||||
t.Fatal("An error is expected but got nil.")
|
||||
}
|
||||
if v, ok := err.(PermissionDeniedError); ok {
|
||||
require.Equal(t, v.Resource, resource)
|
||||
require.Equal(t, v.AccessLevel, accessLevel)
|
||||
require.Equal(t, v.ResourceID.Name, resourceID)
|
||||
} else {
|
||||
t.Fatalf("Expected a permission denied error got %T %vp", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func RequirePermissionDeniedMessage(t testing.TB, msg string, auth Authorizer, _ *AuthorizerContext, resource Resource, accessLevel AccessLevel, resourceID string) {
|
||||
require.NotEmpty(t, msg, "expected non-empty error message")
|
||||
|
||||
var resourceIDFound string
|
||||
if auth == nil {
|
||||
expr := "^Permission denied" + `: provided accessor lacks permission '(\S*):(\S*)' on (.*)\s*$`
|
||||
re, _ := regexp.Compile(expr)
|
||||
matched := re.FindStringSubmatch(msg)
|
||||
|
||||
require.Equal(t, string(resource), matched[1], "resource")
|
||||
require.Equal(t, accessLevel.String(), matched[2], "access level")
|
||||
resourceIDFound = matched[3]
|
||||
} else {
|
||||
expr := "^Permission denied" + `: accessor '(\S*)' lacks permission '(\S*):(\S*)' on (.*)\s*$`
|
||||
re, _ := regexp.Compile(expr)
|
||||
matched := re.FindStringSubmatch(msg)
|
||||
|
||||
require.Equal(t, auth, matched[1], "auth")
|
||||
require.Equal(t, string(resource), matched[2], "resource")
|
||||
require.Equal(t, accessLevel.String(), matched[3], "access level")
|
||||
resourceIDFound = matched[4]
|
||||
}
|
||||
// AuthorizerContext information should be checked here
|
||||
require.Contains(t, resourceIDFound, resourceID, "resource id")
|
||||
}
|
41
agent/acl.go
41
agent/acl.go
|
@ -43,15 +43,15 @@ func (a *Agent) vetServiceRegisterWithAuthorizer(authz acl.Authorizer, service *
|
|||
|
||||
// Vet the service itself.
|
||||
service.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceWrite(service.Service, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceService, acl.AccessWrite, service.Service)
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(service.Service, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Vet any service that might be getting overwritten.
|
||||
if existing := a.State.Service(service.CompoundServiceID()); existing != nil {
|
||||
existing.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceWrite(existing.Service, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceService, acl.AccessWrite, existing.Service)
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(existing.Service, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -59,8 +59,8 @@ func (a *Agent) vetServiceRegisterWithAuthorizer(authz acl.Authorizer, service *
|
|||
// since it can be discovered as an instance of that service.
|
||||
if service.Kind == structs.ServiceKindConnectProxy {
|
||||
service.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceService, acl.AccessWrite, service.Proxy.DestinationServiceName)
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(service.Proxy.DestinationServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,8 +73,9 @@ func (a *Agent) vetServiceUpdateWithAuthorizer(authz acl.Authorizer, serviceID s
|
|||
// Vet any changes based on the existing services's info.
|
||||
if existing := a.State.Service(serviceID); existing != nil {
|
||||
existing.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceWrite(existing.Service, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceService, acl.AccessWrite, existing.Service)
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(existing.Service, &authzContext); err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
} else {
|
||||
// Take care if modifying this error message.
|
||||
|
@ -95,13 +96,13 @@ func (a *Agent) vetCheckRegisterWithAuthorizer(authz acl.Authorizer, check *stru
|
|||
|
||||
// Vet the check itself.
|
||||
if len(check.ServiceName) > 0 {
|
||||
if authz.ServiceWrite(check.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceService, acl.AccessWrite, check.ServiceName)
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(check.ServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// N.B. Should this authzContext be derived from a.AgentEnterpriseMeta()
|
||||
if authz.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceNode, acl.AccessWrite, a.config.NodeName)
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(a.config.NodeName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,13 +110,13 @@ func (a *Agent) vetCheckRegisterWithAuthorizer(authz acl.Authorizer, check *stru
|
|||
if existing := a.State.Check(check.CompoundCheckID()); existing != nil {
|
||||
if len(existing.ServiceName) > 0 {
|
||||
// N.B. Should this authzContext be derived from existing.EnterpriseMeta?
|
||||
if authz.ServiceWrite(existing.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceService, acl.AccessWrite, existing.ServiceName)
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(existing.ServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// N.B. Should this authzContext be derived from a.AgentEnterpriseMeta()
|
||||
if authz.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceNode, acl.AccessWrite, a.config.NodeName)
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(a.config.NodeName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -130,12 +131,12 @@ func (a *Agent) vetCheckUpdateWithAuthorizer(authz acl.Authorizer, checkID struc
|
|||
// Vet any changes based on the existing check's info.
|
||||
if existing := a.State.Check(checkID); existing != nil {
|
||||
if len(existing.ServiceName) > 0 {
|
||||
if authz.ServiceWrite(existing.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceService, acl.AccessWrite, existing.ServiceName)
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(existing.ServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if authz.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceNode, acl.AccessWrite, a.config.NodeName)
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(a.config.NodeName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -707,6 +707,7 @@ func (a *Agent) listenAndServeGRPC() error {
|
|||
|
||||
xdsServer := xds.NewServer(
|
||||
a.logger.Named(logging.Envoy),
|
||||
a.config.ConnectServerlessPluginEnabled,
|
||||
a.proxyConfig,
|
||||
func(id string) (acl.Authorizer, error) {
|
||||
return a.delegate.ResolveTokenAndDefaultMeta(id, nil, nil)
|
||||
|
|
|
@ -60,8 +60,8 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
|
|||
// Authorize using the agent's own enterprise meta, not the token.
|
||||
var authzContext acl.AuthorizerContext
|
||||
s.agent.AgentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.AgentRead(s.agent.config.NodeName, &authzContext) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().AgentReadAllowed(s.agent.config.NodeName, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cs lib.CoordinateSet
|
||||
|
@ -150,8 +150,8 @@ func (s *HTTPHandlers) AgentMetrics(resp http.ResponseWriter, req *http.Request)
|
|||
// Authorize using the agent's own enterprise meta, not the token.
|
||||
var authzContext acl.AuthorizerContext
|
||||
s.agent.AgentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.AgentRead(s.agent.config.NodeName, &authzContext) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().AgentReadAllowed(s.agent.config.NodeName, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enablePrometheusOutput(req) {
|
||||
if s.agent.config.Telemetry.PrometheusOpts.Expiration < 1 {
|
||||
|
@ -187,8 +187,8 @@ func (s *HTTPHandlers) AgentMetricsStream(resp http.ResponseWriter, req *http.Re
|
|||
// Authorize using the agent's own enterprise meta, not the token.
|
||||
var authzContext acl.AuthorizerContext
|
||||
s.agent.AgentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.AgentRead(s.agent.config.NodeName, &authzContext) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().AgentReadAllowed(s.agent.config.NodeName, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
flusher, ok := resp.(http.Flusher)
|
||||
|
@ -240,8 +240,8 @@ func (s *HTTPHandlers) AgentReload(resp http.ResponseWriter, req *http.Request)
|
|||
// Authorize using the agent's own enterprise meta, not the token.
|
||||
var authzContext acl.AuthorizerContext
|
||||
s.agent.AgentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.AgentWrite(s.agent.config.NodeName, &authzContext) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().AgentWriteAllowed(s.agent.config.NodeName, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, s.agent.ReloadConfig()
|
||||
|
@ -440,8 +440,8 @@ func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request)
|
|||
}
|
||||
var authzContext acl.AuthorizerContext
|
||||
svc.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceRead(svc.Service, &authzContext) != acl.Allow {
|
||||
return "", nil, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(svc.Service, &authzContext); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Calculate the content hash over the response, minus the hash field
|
||||
|
@ -621,8 +621,9 @@ func (s *HTTPHandlers) AgentJoin(resp http.ResponseWriter, req *http.Request) (i
|
|||
// Authorize using the agent's own enterprise meta, not the token.
|
||||
var authzContext acl.AuthorizerContext
|
||||
s.agent.AgentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.AgentWrite(s.agent.config.NodeName, &authzContext) != acl.Allow {
|
||||
return nil, acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceAgent, acl.AccessWrite, s.agent.config.NodeName)
|
||||
|
||||
if err := authz.ToAllowAuthorizer().AgentWriteAllowed(s.agent.config.NodeName, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the request partition and default to that of the agent.
|
||||
|
@ -666,8 +667,8 @@ func (s *HTTPHandlers) AgentLeave(resp http.ResponseWriter, req *http.Request) (
|
|||
// Authorize using the agent's own enterprise meta, not the token.
|
||||
var authzContext acl.AuthorizerContext
|
||||
s.agent.AgentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.AgentWrite(s.agent.config.NodeName, &authzContext) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().AgentWriteAllowed(s.agent.config.NodeName, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.agent.Leave(); err != nil {
|
||||
|
@ -685,8 +686,8 @@ func (s *HTTPHandlers) AgentForceLeave(resp http.ResponseWriter, req *http.Reque
|
|||
return nil, err
|
||||
}
|
||||
// TODO(partitions): should this be possible in a partition?
|
||||
if authz.OperatorWrite(nil) != acl.Allow {
|
||||
return nil, acl.PermissionDeniedByACLUnnamed(authz, nil, acl.ResourceOperator, acl.AccessWrite)
|
||||
if err := authz.ToAllowAuthorizer().OperatorWriteAllowed(nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the request partition and default to that of the agent.
|
||||
|
@ -1007,8 +1008,8 @@ func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *htt
|
|||
dc := s.agent.config.Datacenter
|
||||
|
||||
if service := s.agent.State.Service(sid); service != nil {
|
||||
if authz.ServiceRead(service.Service, &authzContext) != acl.Allow {
|
||||
return nil, acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceService, acl.AccessRead, service.Service)
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(service.Service, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
code, status, healthChecks := agentHealthService(sid, s)
|
||||
if returnTextPlain(req) {
|
||||
|
@ -1060,8 +1061,8 @@ func (s *HTTPHandlers) AgentHealthServiceByName(resp http.ResponseWriter, req *h
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if authz.ServiceRead(serviceName, &authzContext) != acl.Allow {
|
||||
return nil, acl.PermissionDeniedByACL(authz, &authzContext, acl.ResourceService, acl.AccessRead, serviceName)
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(serviceName, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !s.validateRequestPartition(resp, &entMeta) {
|
||||
|
@ -1374,8 +1375,8 @@ func (s *HTTPHandlers) AgentNodeMaintenance(resp http.ResponseWriter, req *http.
|
|||
|
||||
var authzContext acl.AuthorizerContext
|
||||
s.agent.AgentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.NodeWrite(s.agent.config.NodeName, &authzContext) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(s.agent.config.NodeName, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if enable {
|
||||
|
@ -1399,8 +1400,8 @@ func (s *HTTPHandlers) AgentMonitor(resp http.ResponseWriter, req *http.Request)
|
|||
// Authorize using the agent's own enterprise meta, not the token.
|
||||
var authzContext acl.AuthorizerContext
|
||||
s.agent.AgentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.AgentRead(s.agent.config.NodeName, &authzContext) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().AgentReadAllowed(s.agent.config.NodeName, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the provided loglevel.
|
||||
|
@ -1482,8 +1483,8 @@ func (s *HTTPHandlers) AgentToken(resp http.ResponseWriter, req *http.Request) (
|
|||
// Authorize using the agent's own enterprise meta, not the token.
|
||||
var authzContext acl.AuthorizerContext
|
||||
s.agent.AgentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.AgentWrite(s.agent.config.NodeName, &authzContext) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().AgentWriteAllowed(s.agent.config.NodeName, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The body is just the token, but it's in a JSON object so we can add
|
||||
|
@ -1683,8 +1684,8 @@ func (s *HTTPHandlers) AgentHost(resp http.ResponseWriter, req *http.Request) (i
|
|||
}
|
||||
|
||||
// TODO(partitions): should this be possible in a partition?
|
||||
if authz.OperatorRead(nil) != acl.Allow {
|
||||
return nil, acl.PermissionDeniedByACLUnnamed(authz, nil, acl.ResourceOperator, acl.AccessRead)
|
||||
if err := authz.ToAllowAuthorizer().OperatorReadAllowed(nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return debug.CollectHostInfo(), nil
|
||||
|
|
|
@ -2,19 +2,21 @@ package agent
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/api"
|
||||
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestCatalogRegister_Service_InvalidAddress(t *testing.T) {
|
||||
|
@ -412,42 +414,28 @@ func TestCatalogNodes_DistanceSort(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
}
|
||||
var out struct{}
|
||||
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
args = &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.2",
|
||||
}
|
||||
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
// Nobody has coordinates set so this will still return them in the
|
||||
// order they are indexed.
|
||||
req, _ := http.NewRequest("GET", "/v1/catalog/nodes?dc=dc1&near=foo", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.CatalogNodes(resp, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
assertIndex(t, resp)
|
||||
nodes := obj.(structs.Nodes)
|
||||
if len(nodes) != 3 {
|
||||
t.Fatalf("bad: %v", obj)
|
||||
}
|
||||
if nodes[0].Node != "bar" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[1].Node != "foo" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[2].Node != a.Config.NodeName {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
require.Len(t, nodes, 3)
|
||||
require.Equal(t, "bar", nodes[0].Node)
|
||||
require.Equal(t, "foo", nodes[1].Node)
|
||||
require.Equal(t, a.Config.NodeName, nodes[2].Node)
|
||||
|
||||
// Send an update for the node and wait for it to get applied.
|
||||
arg := structs.CoordinateUpdateRequest{
|
||||
|
@ -455,33 +443,21 @@ func TestCatalogNodes_DistanceSort(t *testing.T) {
|
|||
Node: "foo",
|
||||
Coord: coordinate.NewCoordinate(coordinate.DefaultConfig()),
|
||||
}
|
||||
if err := a.RPC("Coordinate.Update", &arg, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, a.RPC("Coordinate.Update", &arg, &out))
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
|
||||
// Query again and now foo should have moved to the front of the line.
|
||||
req, _ = http.NewRequest("GET", "/v1/catalog/nodes?dc=dc1&near=foo", nil)
|
||||
resp = httptest.NewRecorder()
|
||||
obj, err = a.srv.CatalogNodes(resp, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
assertIndex(t, resp)
|
||||
nodes = obj.(structs.Nodes)
|
||||
if len(nodes) != 3 {
|
||||
t.Fatalf("bad: %v", obj)
|
||||
}
|
||||
if nodes[0].Node != "foo" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[1].Node != "bar" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[2].Node != a.Config.NodeName {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
require.Len(t, nodes, 3)
|
||||
require.Equal(t, "foo", nodes[0].Node)
|
||||
require.Equal(t, "bar", nodes[1].Node)
|
||||
require.Equal(t, a.Config.NodeName, nodes[2].Node)
|
||||
}
|
||||
|
||||
func TestCatalogServices(t *testing.T) {
|
||||
|
|
|
@ -2,6 +2,7 @@ package checks
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -246,7 +247,7 @@ func (c *CheckAlias) processChecks(checks []*structs.HealthCheck, CheckIfService
|
|||
msg := "No checks found."
|
||||
serviceFound := false
|
||||
for _, chk := range checks {
|
||||
if c.Node != "" && c.Node != chk.Node {
|
||||
if c.Node != "" && !strings.EqualFold(c.Node, chk.Node) {
|
||||
continue
|
||||
}
|
||||
serviceMatch := c.ServiceID.Matches(chk.CompoundServiceID())
|
||||
|
|
|
@ -454,6 +454,7 @@ func TestCheckAlias_remoteNodeOnlyPassing(t *testing.T) {
|
|||
func TestCheckAlias_remoteNodeOnlyCritical(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
run := func(t *testing.T, responseNodeName string) {
|
||||
notify := newMockAliasNotify()
|
||||
chkID := structs.NewCheckID(types.CheckID("foo"), nil)
|
||||
rpc := &mockRPC{}
|
||||
|
@ -475,14 +476,14 @@ func TestCheckAlias_remoteNodeOnlyCritical(t *testing.T) {
|
|||
|
||||
// Should ignore any services
|
||||
{
|
||||
Node: "remote",
|
||||
Node: responseNodeName,
|
||||
ServiceID: "db",
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
|
||||
// Match
|
||||
{
|
||||
Node: "remote",
|
||||
Node: responseNodeName,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
},
|
||||
|
@ -495,6 +496,14 @@ func TestCheckAlias_remoteNodeOnlyCritical(t *testing.T) {
|
|||
r.Fatalf("got state %q want %q", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("same case node name", func(t *testing.T) {
|
||||
run(t, "remote")
|
||||
})
|
||||
t.Run("lowercase node name", func(t *testing.T) {
|
||||
run(t, "ReMoTe")
|
||||
})
|
||||
}
|
||||
|
||||
type mockAliasNotify struct {
|
||||
|
|
|
@ -669,6 +669,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
connectEnabled := boolVal(c.Connect.Enabled)
|
||||
connectCAProvider := stringVal(c.Connect.CAProvider)
|
||||
connectCAConfig := c.Connect.CAConfig
|
||||
serverlessPluginEnabled := boolVal(c.Connect.EnableServerlessPlugin)
|
||||
|
||||
// autoEncrypt and autoConfig implicitly turns on connect which is why
|
||||
// they need to be above other settings that rely on connect.
|
||||
|
@ -979,6 +980,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
ConnectCAProvider: connectCAProvider,
|
||||
ConnectCAConfig: connectCAConfig,
|
||||
ConnectMeshGatewayWANFederationEnabled: connectMeshGatewayWANFederationEnabled,
|
||||
ConnectServerlessPluginEnabled: serverlessPluginEnabled,
|
||||
ConnectSidecarMinPort: sidecarMinPort,
|
||||
ConnectSidecarMaxPort: sidecarMaxPort,
|
||||
ConnectTestCALeafRootChangeSpread: b.durationVal("connect.test_ca_leaf_root_change_spread", c.Connect.TestCALeafRootChangeSpread),
|
||||
|
|
|
@ -612,6 +612,7 @@ type Connect struct {
|
|||
CAProvider *string `mapstructure:"ca_provider"`
|
||||
CAConfig map[string]interface{} `mapstructure:"ca_config"`
|
||||
MeshGatewayWANFederationEnabled *bool `mapstructure:"enable_mesh_gateway_wan_federation"`
|
||||
EnableServerlessPlugin *bool `mapstructure:"enable_serverless_plugin"`
|
||||
|
||||
// TestCALeafRootChangeSpread controls how long after a CA roots change before new leaft certs will be generated.
|
||||
// This is only tuned in tests, generally set to 1ns to make tests deterministic with when to expect updated leaf
|
||||
|
|
|
@ -486,6 +486,12 @@ type RuntimeConfig struct {
|
|||
// and servers in a cluster for correct connect operation.
|
||||
ConnectEnabled bool
|
||||
|
||||
// ConnectServerlessPluginEnabled opts the agent into the serverless plugin.
|
||||
// This plugin allows services to be configured as AWS Lambdas. After the
|
||||
// Lambda service is configured, Connect services can invoke the Lambda
|
||||
// service like any other upstream.
|
||||
ConnectServerlessPluginEnabled bool
|
||||
|
||||
// ConnectSidecarMinPort is the inclusive start of the range of ports
|
||||
// allocated to the agent for asigning to sidecar services where no port is
|
||||
// specified.
|
||||
|
|
|
@ -5538,6 +5538,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
"CSRMaxConcurrent": float64(2),
|
||||
},
|
||||
ConnectMeshGatewayWANFederationEnabled: false,
|
||||
ConnectServerlessPluginEnabled: true,
|
||||
DNSAddrs: []net.Addr{tcpAddr("93.95.95.81:7001"), udpAddr("93.95.95.81:7001")},
|
||||
DNSARecordLimit: 29907,
|
||||
DNSAllowStale: true,
|
||||
|
|
|
@ -127,6 +127,7 @@
|
|||
"ConnectCAProvider": "",
|
||||
"ConnectEnabled": false,
|
||||
"ConnectMeshGatewayWANFederationEnabled": false,
|
||||
"ConnectServerlessPluginEnabled": false,
|
||||
"ConnectSidecarMaxPort": 0,
|
||||
"ConnectSidecarMinPort": 0,
|
||||
"ConnectTestCALeafRootChangeSpread": "0s",
|
||||
|
|
|
@ -210,6 +210,7 @@ connect {
|
|||
}
|
||||
enable_mesh_gateway_wan_federation = false
|
||||
enabled = true
|
||||
enable_serverless_plugin = true
|
||||
}
|
||||
gossip_lan {
|
||||
gossip_nodes = 6
|
||||
|
|
|
@ -209,7 +209,8 @@
|
|||
"csr_max_concurrent": 2
|
||||
},
|
||||
"enable_mesh_gateway_wan_federation": false,
|
||||
"enabled": true
|
||||
"enabled": true,
|
||||
"enable_serverless_plugin": true
|
||||
},
|
||||
"gossip_lan" : {
|
||||
"gossip_nodes": 6,
|
||||
|
|
|
@ -32,3 +32,7 @@ func NewKindName(kind, name string, entMeta *structs.EnterpriseMeta) KindName {
|
|||
ret.Normalize()
|
||||
return ret
|
||||
}
|
||||
|
||||
func NewKindNameForEntry(entry structs.ConfigEntry) KindName {
|
||||
return NewKindName(entry.GetKind(), entry.GetName(), entry.GetEnterpriseMeta())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
package configentry
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// ResolvedServiceConfigSet is a wrapped set of raw cross-referenced config
|
||||
// entries necessary for the ConfigEntry.ResolveServiceConfig RPC process.
|
||||
//
|
||||
// None of these are defaulted.
|
||||
type ResolvedServiceConfigSet struct {
|
||||
ServiceDefaults map[structs.ServiceID]*structs.ServiceConfigEntry
|
||||
ProxyDefaults map[string]*structs.ProxyConfigEntry
|
||||
}
|
||||
|
||||
func (r *ResolvedServiceConfigSet) IsEmpty() bool {
|
||||
return len(r.ServiceDefaults) == 0 && len(r.ProxyDefaults) == 0
|
||||
}
|
||||
|
||||
func (r *ResolvedServiceConfigSet) GetServiceDefaults(sid structs.ServiceID) *structs.ServiceConfigEntry {
|
||||
if r.ServiceDefaults == nil {
|
||||
return nil
|
||||
}
|
||||
return r.ServiceDefaults[sid]
|
||||
}
|
||||
|
||||
func (r *ResolvedServiceConfigSet) GetProxyDefaults(partition string) *structs.ProxyConfigEntry {
|
||||
if r.ProxyDefaults == nil {
|
||||
return nil
|
||||
}
|
||||
return r.ProxyDefaults[partition]
|
||||
}
|
||||
|
||||
func (r *ResolvedServiceConfigSet) AddServiceDefaults(entry *structs.ServiceConfigEntry) {
|
||||
if entry == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ServiceDefaults == nil {
|
||||
r.ServiceDefaults = make(map[structs.ServiceID]*structs.ServiceConfigEntry)
|
||||
}
|
||||
|
||||
sid := structs.NewServiceID(entry.Name, &entry.EnterpriseMeta)
|
||||
r.ServiceDefaults[sid] = entry
|
||||
}
|
||||
|
||||
func (r *ResolvedServiceConfigSet) AddProxyDefaults(entry *structs.ProxyConfigEntry) {
|
||||
if entry == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ProxyDefaults == nil {
|
||||
r.ProxyDefaults = make(map[string]*structs.ProxyConfigEntry)
|
||||
}
|
||||
|
||||
r.ProxyDefaults[entry.PartitionOrDefault()] = entry
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
@ -86,6 +87,13 @@ func TestConsulProvider(t testing.T, d ConsulProviderStateDelegate) *ConsulProvi
|
|||
// These tests may be skipped in CI. They are run as part of a separate
|
||||
// integration test suite.
|
||||
func SkipIfVaultNotPresent(t testing.T) {
|
||||
// Try to safeguard against tests that will never run in CI.
|
||||
// This substring should match the pattern used by the
|
||||
// test-connect-ca-providers CI job.
|
||||
if !strings.Contains(t.Name(), "Vault") {
|
||||
t.Fatalf("test name must contain Vault, otherwise CI will never run it")
|
||||
}
|
||||
|
||||
vaultBinaryName := os.Getenv("VAULT_BINARY_NAME")
|
||||
if vaultBinaryName == "" {
|
||||
vaultBinaryName = "vault"
|
||||
|
|
|
@ -65,8 +65,8 @@ func (a *Agent) ConnectAuthorize(token string,
|
|||
return returnErr(err)
|
||||
}
|
||||
|
||||
if authz.ServiceWrite(req.Target, &authzContext) != acl.Allow {
|
||||
return returnErr(acl.ErrPermissionDenied)
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(req.Target, &authzContext); err != nil {
|
||||
return returnErr(err)
|
||||
}
|
||||
|
||||
if !uriService.MatchesPartition(req.TargetPartition()) {
|
||||
|
|
|
@ -286,8 +286,8 @@ func (a *ACL) TokenRead(args *structs.ACLTokenGetRequest, reply *structs.ACLToke
|
|||
// secrets will be redacted
|
||||
if authz, err = a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLRead(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -354,8 +354,8 @@ func (a *ACL) TokenClone(args *structs.ACLTokenSetRequest, reply *structs.ACLTok
|
|||
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.ACLToken.EnterpriseMeta, &authzContext)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, token, err := a.srv.fsm.State().ACLTokenGetByAccessor(nil, args.ACLToken.AccessorID, &args.ACLToken.EnterpriseMeta)
|
||||
|
@ -425,8 +425,8 @@ func (a *ACL) TokenSet(args *structs.ACLTokenSetRequest, reply *structs.ACLToken
|
|||
var authzContext acl.AuthorizerContext
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.ACLToken.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.tokenSetInternal(args, reply, false)
|
||||
|
@ -830,8 +830,8 @@ func (a *ACL) TokenDelete(args *structs.ACLTokenDeleteRequest, reply *string) er
|
|||
var authzContext acl.AuthorizerContext
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := uuid.ParseUUID(args.TokenID); err != nil {
|
||||
|
@ -919,8 +919,8 @@ func (a *ACL) TokenList(args *structs.ACLTokenListRequest, reply *structs.ACLTok
|
|||
// merge the token default meta into the requests meta
|
||||
args.EnterpriseMeta.Merge(&requestMeta)
|
||||
args.EnterpriseMeta.FillAuthzContext(&authzContext)
|
||||
if authz.ACLRead(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var methodMeta *structs.EnterpriseMeta
|
||||
|
@ -1026,8 +1026,8 @@ func (a *ACL) PolicyRead(args *structs.ACLPolicyGetRequest, reply *structs.ACLPo
|
|||
var authzContext acl.AuthorizerContext
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLRead(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
|
||||
|
@ -1107,8 +1107,8 @@ func (a *ACL) PolicySet(args *structs.ACLPolicySetRequest, reply *structs.ACLPol
|
|||
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.Policy.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policy := &args.Policy
|
||||
|
@ -1237,8 +1237,8 @@ func (a *ACL) PolicyDelete(args *structs.ACLPolicyDeleteRequest, reply *string)
|
|||
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, policy, err := a.srv.fsm.State().ACLPolicyGetByID(nil, args.PolicyID, &args.EnterpriseMeta)
|
||||
|
@ -1288,8 +1288,8 @@ func (a *ACL) PolicyList(args *structs.ACLPolicyListRequest, reply *structs.ACLP
|
|||
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if authz.ACLRead(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
|
||||
|
@ -1412,8 +1412,8 @@ func (a *ACL) RoleRead(args *structs.ACLRoleGetRequest, reply *structs.ACLRoleRe
|
|||
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLRead(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
|
||||
|
@ -1493,8 +1493,8 @@ func (a *ACL) RoleSet(args *structs.ACLRoleSetRequest, reply *structs.ACLRole) e
|
|||
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.Role.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
role := &args.Role
|
||||
|
@ -1651,8 +1651,8 @@ func (a *ACL) RoleDelete(args *structs.ACLRoleDeleteRequest, reply *string) erro
|
|||
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, role, err := a.srv.fsm.State().ACLRoleGetByID(nil, args.RoleID, &args.EnterpriseMeta)
|
||||
|
@ -1698,8 +1698,8 @@ func (a *ACL) RoleList(args *structs.ACLRoleListRequest, reply *structs.ACLRoleL
|
|||
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if authz.ACLRead(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
|
||||
|
@ -1797,8 +1797,8 @@ func (a *ACL) BindingRuleRead(args *structs.ACLBindingRuleGetRequest, reply *str
|
|||
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if authz.ACLRead(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
|
||||
|
@ -1840,8 +1840,8 @@ func (a *ACL) BindingRuleSet(args *structs.ACLBindingRuleSetRequest, reply *stru
|
|||
// Verify token is permitted to modify ACLs
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.BindingRule.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var existing *structs.ACLBindingRule
|
||||
|
@ -1969,8 +1969,8 @@ func (a *ACL) BindingRuleDelete(args *structs.ACLBindingRuleDeleteRequest, reply
|
|||
// Verify token is permitted to modify ACLs
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, rule, err := a.srv.fsm.State().ACLBindingRuleGetByID(nil, args.BindingRuleID, &args.EnterpriseMeta)
|
||||
|
@ -2017,8 +2017,8 @@ func (a *ACL) BindingRuleList(args *structs.ACLBindingRuleListRequest, reply *st
|
|||
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if authz.ACLRead(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
|
||||
|
@ -2056,8 +2056,8 @@ func (a *ACL) AuthMethodRead(args *structs.ACLAuthMethodGetRequest, reply *struc
|
|||
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLRead(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
|
||||
|
@ -2101,8 +2101,8 @@ func (a *ACL) AuthMethodSet(args *structs.ACLAuthMethodSetRequest, reply *struct
|
|||
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.AuthMethod.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
method := &args.AuthMethod
|
||||
|
@ -2213,8 +2213,8 @@ func (a *ACL) AuthMethodDelete(args *structs.ACLAuthMethodDeleteRequest, reply *
|
|||
|
||||
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
|
||||
return err
|
||||
} else if authz.ACLWrite(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, method, err := a.srv.fsm.State().ACLAuthMethodGetByName(nil, args.AuthMethodName, &args.EnterpriseMeta)
|
||||
|
@ -2267,8 +2267,8 @@ func (a *ACL) AuthMethodList(args *structs.ACLAuthMethodListRequest, reply *stru
|
|||
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if authz.ACLRead(&authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
|
||||
|
|
|
@ -3,6 +3,7 @@ package consul
|
|||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
|
@ -191,15 +192,15 @@ func servicePreApply(service *structs.NodeService, authz acl.Authorizer, authzCt
|
|||
// later if version 0.8 is enabled, so we can eventually just
|
||||
// delete this and do all the ACL checks down there.
|
||||
if service.Service != structs.ConsulServiceName {
|
||||
if authz.ServiceWrite(service.Service, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(service.Service, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Proxies must have write permission on their destination
|
||||
if service.Kind == structs.ServiceKindConnectProxy {
|
||||
if authz.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(service.Proxy.DestinationServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -240,16 +241,18 @@ func vetRegisterWithACL(
|
|||
// privileges.
|
||||
needsNode := ns == nil || subj.ChangesNode(ns.Node)
|
||||
|
||||
if needsNode && authz.NodeWrite(subj.Node, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if needsNode {
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(subj.Node, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Vet the service change. This includes making sure they can register
|
||||
// the given service, and that we can write to any existing service that
|
||||
// is being modified by id (if any).
|
||||
if subj.Service != nil {
|
||||
if authz.ServiceWrite(subj.Service.Service, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(subj.Service.Service, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ns != nil {
|
||||
|
@ -262,7 +265,7 @@ func vetRegisterWithACL(
|
|||
var secondaryCtx acl.AuthorizerContext
|
||||
other.FillAuthzContext(&secondaryCtx)
|
||||
|
||||
if authz.ServiceWrite(other.Service, &secondaryCtx) != acl.Allow {
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(other.Service, &secondaryCtx); err != nil {
|
||||
return acl.ErrPermissionDenied
|
||||
}
|
||||
}
|
||||
|
@ -285,15 +288,15 @@ func vetRegisterWithACL(
|
|||
// note in state_store.go to ban this down there in Consul 0.8,
|
||||
// but it's good to leave this here because it's required for
|
||||
// correctness wrt. ACLs.
|
||||
if check.Node != subj.Node {
|
||||
if !strings.EqualFold(check.Node, subj.Node) {
|
||||
return fmt.Errorf("Node '%s' for check '%s' doesn't match register request node '%s'",
|
||||
check.Node, check.CheckID, subj.Node)
|
||||
}
|
||||
|
||||
// Node-level check.
|
||||
if check.ServiceID == "" {
|
||||
if authz.NodeWrite(subj.Node, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(subj.Node, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -323,8 +326,8 @@ func vetRegisterWithACL(
|
|||
var secondaryCtx acl.AuthorizerContext
|
||||
other.FillAuthzContext(&secondaryCtx)
|
||||
|
||||
if authz.ServiceWrite(other.Service, &secondaryCtx) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(other.Service, &secondaryCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -407,7 +410,8 @@ func vetDeregisterWithACL(
|
|||
// Allow service deregistration if the token has write permission for the node.
|
||||
// This accounts for cases where the agent no longer has a token with write permission
|
||||
// on the service to deregister it.
|
||||
if authz.NodeWrite(subj.Node, &authzContext) == acl.Allow {
|
||||
nodeWriteErr := authz.ToAllowAuthorizer().NodeWriteAllowed(subj.Node, &authzContext)
|
||||
if nodeWriteErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -422,8 +426,8 @@ func vetDeregisterWithACL(
|
|||
|
||||
ns.FillAuthzContext(&authzContext)
|
||||
|
||||
if authz.ServiceWrite(ns.Service, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(ns.Service, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if subj.CheckID != "" {
|
||||
if nc == nil {
|
||||
|
@ -433,18 +437,18 @@ func vetDeregisterWithACL(
|
|||
nc.FillAuthzContext(&authzContext)
|
||||
|
||||
if nc.ServiceID != "" {
|
||||
if authz.ServiceWrite(nc.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(nc.ServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if authz.NodeWrite(subj.Node, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(subj.Node, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Since NodeWrite is not given - otherwise the earlier check
|
||||
// would've returned already - we can deny here.
|
||||
return acl.ErrPermissionDenied
|
||||
return nodeWriteErr
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -646,6 +650,8 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
|
|||
// If we're doing a connect query, we need read access to the service
|
||||
// we're trying to find proxies for, so check that.
|
||||
if args.Connect {
|
||||
// TODO(acl-error-enhancements) can this be improved? What happens if we returned an error here?
|
||||
// Is this similar to filters where we might want to return a hint?
|
||||
if authz.ServiceRead(args.ServiceName, &authzContext) != acl.Allow {
|
||||
// Just return nil, which will return an empty response (tested)
|
||||
return nil
|
||||
|
@ -861,8 +867,8 @@ func (c *Catalog) GatewayServices(args *structs.ServiceSpecificRequest, reply *s
|
|||
return err
|
||||
}
|
||||
|
||||
if authz.ServiceRead(args.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(args.ServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.srv.blockingQuery(
|
||||
|
@ -925,8 +931,8 @@ func (c *Catalog) VirtualIPForService(args *structs.ServiceSpecificRequest, repl
|
|||
return err
|
||||
}
|
||||
|
||||
if authz.ServiceRead(args.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(args.ServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state := c.srv.fsm.State()
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -3431,42 +3432,49 @@ service "gateway" {
|
|||
}
|
||||
|
||||
func TestVetRegisterWithACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
appendAuthz := func(t *testing.T, defaultAuthz acl.Authorizer, rules string) acl.Authorizer {
|
||||
policy, err := acl.NewPolicyFromSource(rules, acl.SyntaxCurrent, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
authz, err := acl.NewPolicyAuthorizerWithDefaults(defaultAuthz, []*acl.Policy{policy}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
return authz
|
||||
}
|
||||
|
||||
t.Run("With an 'allow all' authorizer the update should be allowed", func(t *testing.T) {
|
||||
args := &structs.RegisterRequest{
|
||||
Node: "nope",
|
||||
Address: "127.0.0.1",
|
||||
}
|
||||
|
||||
// With an "allow all" authorizer the update should be allowed.
|
||||
if err := vetRegisterWithACL(acl.ManageAll(), args, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
require.NoError(t, vetRegisterWithACL(acl.ManageAll(), args, nil))
|
||||
})
|
||||
|
||||
var perms acl.Authorizer = acl.DenyAll()
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Node: "nope",
|
||||
Address: "127.0.0.1",
|
||||
}
|
||||
|
||||
// Create a basic node policy.
|
||||
policy, err := acl.NewPolicyFromSource(`
|
||||
node "node" {
|
||||
perms = appendAuthz(t, perms, `
|
||||
node "node" {
|
||||
policy = "write"
|
||||
}
|
||||
`, acl.SyntaxLegacy, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
perms, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
} `)
|
||||
|
||||
// With that policy, the update should now be blocked for node reasons.
|
||||
err = vetRegisterWithACL(perms, args, nil)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
err := vetRegisterWithACL(perms, args, nil)
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
|
||||
// Now use a permitted node name.
|
||||
args.Node = "node"
|
||||
if err := vetRegisterWithACL(perms, args, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, nil))
|
||||
|
||||
// Build some node info that matches what we have now.
|
||||
ns := &structs.NodeServices{
|
||||
|
@ -3478,183 +3486,220 @@ node "node" {
|
|||
}
|
||||
|
||||
// Try to register a service, which should be blocked.
|
||||
args.Service = &structs.NodeService{
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
}
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
|
||||
// Chain on a basic service policy.
|
||||
policy, err = acl.NewPolicyFromSource(`
|
||||
service "service" {
|
||||
perms = appendAuthz(t, perms, `
|
||||
service "service" {
|
||||
policy = "write"
|
||||
}
|
||||
`, acl.SyntaxLegacy, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
} `)
|
||||
|
||||
// With the service ACL, the update should go through.
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Add an existing service that they are clobbering and aren't allowed
|
||||
// to write to.
|
||||
ns.Services["my-id"] = &structs.NodeService{
|
||||
ns = &structs.NodeServices{
|
||||
Node: &structs.Node{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
Services: map[string]*structs.NodeService{
|
||||
"my-id": {
|
||||
Service: "other",
|
||||
ID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
|
||||
// Chain on a policy that allows them to write to the other service.
|
||||
policy, err = acl.NewPolicyFromSource(`
|
||||
service "other" {
|
||||
perms = appendAuthz(t, perms, `
|
||||
service "other" {
|
||||
policy = "write"
|
||||
}
|
||||
`, acl.SyntaxLegacy, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
} `)
|
||||
|
||||
// Now it should go through.
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Try creating the node and the service at once by having no existing
|
||||
// node record. This should be ok since we have node and service
|
||||
// permissions.
|
||||
if err := vetRegisterWithACL(perms, args, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, nil))
|
||||
|
||||
// Add a node-level check to the member, which should be rejected.
|
||||
args.Check = &structs.HealthCheck{
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Node: "node",
|
||||
},
|
||||
}
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if err == nil || !strings.Contains(err.Error(), "check member must be nil") {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
testutil.RequireErrorContains(t, err, "check member must be nil")
|
||||
|
||||
// Move the check into the slice, but give a bad node name.
|
||||
args.Check.Node = "nope"
|
||||
args.Checks = append(args.Checks, args.Check)
|
||||
args.Check = nil
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if err == nil || !strings.Contains(err.Error(), "doesn't match register request node") {
|
||||
t.Fatalf("bad: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "nope",
|
||||
},
|
||||
},
|
||||
}
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
testutil.RequireErrorContains(t, err, "doesn't match register request node")
|
||||
|
||||
// Fix the node name, which should now go through.
|
||||
args.Checks[0].Node = "node"
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "node",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Add a service-level check.
|
||||
args.Checks = append(args.Checks, &structs.HealthCheck{
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "node",
|
||||
},
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
})
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Try creating everything at once. This should be ok since we have all
|
||||
// the permissions we need. It also makes sure that we can register a
|
||||
// new node, service, and associated checks.
|
||||
if err := vetRegisterWithACL(perms, args, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, nil))
|
||||
|
||||
// Nil out the service registration, which'll skip the special case
|
||||
// and force us to look at the ns data (it will look like we are
|
||||
// writing to the "other" service which also has "my-id").
|
||||
args.Service = nil
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "node",
|
||||
},
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Chain on a policy that forbids them to write to the other service.
|
||||
policy, err = acl.NewPolicyFromSource(`
|
||||
service "other" {
|
||||
perms = appendAuthz(t, perms, `
|
||||
service "other" {
|
||||
policy = "deny"
|
||||
}
|
||||
`, acl.SyntaxLegacy, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
} `)
|
||||
|
||||
// This should get rejected.
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
|
||||
// Change the existing service data to point to a service name they
|
||||
// car write to. This should go through.
|
||||
ns.Services["my-id"] = &structs.NodeService{
|
||||
// can write to. This should go through.
|
||||
ns = &structs.NodeServices{
|
||||
Node: &structs.Node{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
Services: map[string]*structs.NodeService{
|
||||
"my-id": {
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Chain on a policy that forbids them to write to the node.
|
||||
policy, err = acl.NewPolicyFromSource(`
|
||||
node "node" {
|
||||
perms = appendAuthz(t, perms, `
|
||||
node "node" {
|
||||
policy = "deny"
|
||||
}
|
||||
`, acl.SyntaxLegacy, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
} `)
|
||||
|
||||
// This should get rejected because there's a node-level check in here.
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
|
||||
// Change the node-level check into a service check, and then it should
|
||||
// go through.
|
||||
args.Checks[0].ServiceID = "my-id"
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Finally, attempt to update the node part of the data and make sure
|
||||
// that gets rejected since they no longer have permissions.
|
||||
args.Address = "127.0.0.2"
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.2",
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
}
|
||||
|
||||
func TestVetDeregisterWithACL(t *testing.T) {
|
||||
|
|
|
@ -10,8 +10,10 @@ import (
|
|||
"github.com/hashicorp/go-hclog"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/mitchellh/copystructure"
|
||||
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/configentry"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
@ -87,8 +89,8 @@ func (c *ConfigEntry) Apply(args *structs.ConfigEntryRequest, reply *bool) error
|
|||
return err
|
||||
}
|
||||
|
||||
if !args.Entry.CanWrite(authz) {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := args.Entry.CanWrite(authz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if args.Op != structs.ConfigEntryUpsert && args.Op != structs.ConfigEntryUpsertCAS {
|
||||
|
@ -192,8 +194,8 @@ func (c *ConfigEntry) Get(args *structs.ConfigEntryQuery, reply *structs.ConfigE
|
|||
}
|
||||
lookupEntry.GetEnterpriseMeta().Merge(&args.EnterpriseMeta)
|
||||
|
||||
if !lookupEntry.CanRead(authz) {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := lookupEntry.CanRead(authz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.srv.blockingQuery(
|
||||
|
@ -236,6 +238,10 @@ func (c *ConfigEntry) List(args *structs.ConfigEntryQuery, reply *structs.Indexe
|
|||
}
|
||||
}
|
||||
|
||||
var (
|
||||
priorHash uint64
|
||||
ranOnce bool
|
||||
)
|
||||
return c.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
|
@ -248,7 +254,8 @@ func (c *ConfigEntry) List(args *structs.ConfigEntryQuery, reply *structs.Indexe
|
|||
// Filter the entries returned by ACL permissions.
|
||||
filteredEntries := make([]structs.ConfigEntry, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if !entry.CanRead(authz) {
|
||||
if err := entry.CanRead(authz); err != nil {
|
||||
// TODO we may wish to extract more details from this error to aid user comprehension
|
||||
reply.QueryMeta.ResultsFilteredByACLs = true
|
||||
continue
|
||||
}
|
||||
|
@ -258,6 +265,26 @@ func (c *ConfigEntry) List(args *structs.ConfigEntryQuery, reply *structs.Indexe
|
|||
reply.Kind = args.Kind
|
||||
reply.Index = index
|
||||
reply.Entries = filteredEntries
|
||||
|
||||
// Generate a hash of the content driving this response. Use it to
|
||||
// determine if the response is identical to a prior wakeup.
|
||||
newHash, err := hashstructure_v2.Hash(filteredEntries, hashstructure_v2.FormatV2, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error hashing reply for spurious wakeup suppression: %w", err)
|
||||
}
|
||||
|
||||
if ranOnce && priorHash == newHash {
|
||||
priorHash = newHash
|
||||
return errNotChanged
|
||||
} else {
|
||||
priorHash = newHash
|
||||
ranOnce = true
|
||||
}
|
||||
|
||||
if len(reply.Entries) == 0 {
|
||||
return errNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -309,7 +336,8 @@ func (c *ConfigEntry) ListAll(args *structs.ConfigEntryListAllRequest, reply *st
|
|||
// Filter the entries returned by ACL permissions or by the provided kinds.
|
||||
filteredEntries := make([]structs.ConfigEntry, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if !entry.CanRead(authz) {
|
||||
if err := entry.CanRead(authz); err != nil {
|
||||
// TODO we may wish to extract more details from this error to aid user comprehension
|
||||
reply.QueryMeta.ResultsFilteredByACLs = true
|
||||
continue
|
||||
}
|
||||
|
@ -360,8 +388,8 @@ func (c *ConfigEntry) Delete(args *structs.ConfigEntryRequest, reply *structs.Co
|
|||
return err
|
||||
}
|
||||
|
||||
if !args.Entry.CanWrite(authz) {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := args.Entry.CanWrite(authz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Only delete and delete-cas ops are supported. If the caller erroneously
|
||||
|
@ -413,40 +441,112 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.ServiceRead(args.Name, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
}
|
||||
|
||||
return c.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
var thisReply structs.ServiceConfigResponse
|
||||
|
||||
thisReply.MeshGateway.Mode = structs.MeshGatewayModeDefault
|
||||
// TODO(freddy) Refactor this into smaller set of state store functions
|
||||
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
|
||||
// blocking query, this function will be rerun and these state store lookups will both be current.
|
||||
// We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
|
||||
_, proxyEntry, err := state.ConfigEntry(ws, structs.ProxyDefaults, structs.ProxyConfigGlobal, &args.EnterpriseMeta)
|
||||
if err != nil {
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(args.Name, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
proxyConf *structs.ProxyConfigEntry
|
||||
proxyConfGlobalProtocol string
|
||||
ok bool
|
||||
priorHash uint64
|
||||
ranOnce bool
|
||||
)
|
||||
if proxyEntry != nil {
|
||||
proxyConf, ok = proxyEntry.(*structs.ProxyConfigEntry)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid proxy config type %T", proxyEntry)
|
||||
return c.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
var (
|
||||
upstreamIDs = args.UpstreamIDs
|
||||
legacyUpstreams = false
|
||||
)
|
||||
|
||||
// The request is considered legacy if the deprecated args.Upstream was used
|
||||
if len(upstreamIDs) == 0 && len(args.Upstreams) > 0 {
|
||||
legacyUpstreams = true
|
||||
|
||||
upstreamIDs = make([]structs.ServiceID, 0)
|
||||
for _, upstream := range args.Upstreams {
|
||||
// Before Consul namespaces were released, the Upstreams
|
||||
// provided to the endpoint did not contain the namespace.
|
||||
// Because of this we attach the enterprise meta of the
|
||||
// request, which will just be the default namespace.
|
||||
sid := structs.NewServiceID(upstream, &args.EnterpriseMeta)
|
||||
upstreamIDs = append(upstreamIDs, sid)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch all relevant config entries.
|
||||
|
||||
index, entries, err := state.ReadResolvedServiceConfigEntries(
|
||||
ws,
|
||||
args.Name,
|
||||
&args.EnterpriseMeta,
|
||||
upstreamIDs,
|
||||
args.Mode,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate a hash of the config entry content driving this
|
||||
// response. Use it to determine if the response is identical to a
|
||||
// prior wakeup.
|
||||
newHash, err := hashstructure_v2.Hash(entries, hashstructure_v2.FormatV2, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error hashing reply for spurious wakeup suppression: %w", err)
|
||||
}
|
||||
|
||||
if ranOnce && priorHash == newHash {
|
||||
priorHash = newHash
|
||||
reply.Index = index
|
||||
// NOTE: the prior response is still alive inside of *reply, which
|
||||
// is desirable
|
||||
return errNotChanged
|
||||
} else {
|
||||
priorHash = newHash
|
||||
ranOnce = true
|
||||
}
|
||||
|
||||
thisReply, err := c.computeResolvedServiceConfig(
|
||||
args,
|
||||
upstreamIDs,
|
||||
legacyUpstreams,
|
||||
entries,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
thisReply.Index = index
|
||||
|
||||
*reply = *thisReply
|
||||
if entries.IsEmpty() {
|
||||
// No config entries factored into this reply; it's a default.
|
||||
return errNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConfigEntry) computeResolvedServiceConfig(
|
||||
args *structs.ServiceConfigRequest,
|
||||
upstreamIDs []structs.ServiceID,
|
||||
legacyUpstreams bool,
|
||||
entries *configentry.ResolvedServiceConfigSet,
|
||||
) (*structs.ServiceConfigResponse, error) {
|
||||
var thisReply structs.ServiceConfigResponse
|
||||
|
||||
thisReply.MeshGateway.Mode = structs.MeshGatewayModeDefault
|
||||
|
||||
// TODO(freddy) Refactor this into smaller set of state store functions
|
||||
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
|
||||
// blocking query, this function will be rerun and these state store lookups will both be current.
|
||||
// We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
|
||||
var proxyConfGlobalProtocol string
|
||||
proxyConf := entries.GetProxyDefaults(args.PartitionOrDefault())
|
||||
if proxyConf != nil {
|
||||
// Apply the proxy defaults to the sidecar's proxy config
|
||||
mapCopy, err := copystructure.Copy(proxyConf.Config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy global proxy-defaults: %v", err)
|
||||
return nil, fmt.Errorf("failed to copy global proxy-defaults: %v", err)
|
||||
}
|
||||
thisReply.ProxyConfig = mapCopy.(map[string]interface{})
|
||||
thisReply.Mode = proxyConf.Mode
|
||||
|
@ -457,25 +557,18 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
// Extract the global protocol from proxyConf for upstream configs.
|
||||
rawProtocol := proxyConf.Config["protocol"]
|
||||
if rawProtocol != nil {
|
||||
var ok bool
|
||||
proxyConfGlobalProtocol, ok = rawProtocol.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid protocol type %T", rawProtocol)
|
||||
return nil, fmt.Errorf("invalid protocol type %T", rawProtocol)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
index, serviceEntry, err := state.ConfigEntry(ws, structs.ServiceDefaults, args.Name, &args.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
thisReply.Index = index
|
||||
|
||||
var serviceConf *structs.ServiceConfigEntry
|
||||
if serviceEntry != nil {
|
||||
serviceConf, ok = serviceEntry.(*structs.ServiceConfigEntry)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid service config type %T", serviceEntry)
|
||||
}
|
||||
serviceConf := entries.GetServiceDefaults(
|
||||
structs.NewServiceID(args.Name, &args.EnterpriseMeta),
|
||||
)
|
||||
if serviceConf != nil {
|
||||
if serviceConf.Expose.Checks {
|
||||
thisReply.Expose.Checks = true
|
||||
}
|
||||
|
@ -500,6 +593,8 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
if serviceConf.Mode != structs.ProxyModeDefault {
|
||||
thisReply.Mode = serviceConf.Mode
|
||||
}
|
||||
|
||||
thisReply.Meta = serviceConf.Meta
|
||||
}
|
||||
|
||||
// First collect all upstreams into a set of seen upstreams.
|
||||
|
@ -508,9 +603,6 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
// - Implicitly from centralized upstream config in service-defaults
|
||||
seenUpstreams := map[structs.ServiceID]struct{}{}
|
||||
|
||||
upstreamIDs := args.UpstreamIDs
|
||||
legacyUpstreams := false
|
||||
|
||||
var (
|
||||
noUpstreamArgs = len(upstreamIDs) == 0 && len(args.Upstreams) == 0
|
||||
|
||||
|
@ -520,24 +612,10 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
)
|
||||
|
||||
// The upstreams passed as arguments to this endpoint are the upstreams explicitly defined in a proxy registration.
|
||||
// If no upstreams were passed, then we should only returned the resolved config if the proxy in transparent mode.
|
||||
// If no upstreams were passed, then we should only return the resolved config if the proxy is in transparent mode.
|
||||
// Otherwise we would return a resolved upstream config to a proxy with no configured upstreams.
|
||||
if noUpstreamArgs && !tproxy {
|
||||
*reply = thisReply
|
||||
return nil
|
||||
}
|
||||
|
||||
// The request is considered legacy if the deprecated args.Upstream was used
|
||||
if len(upstreamIDs) == 0 && len(args.Upstreams) > 0 {
|
||||
legacyUpstreams = true
|
||||
|
||||
upstreamIDs = make([]structs.ServiceID, 0)
|
||||
for _, upstream := range args.Upstreams {
|
||||
// Before Consul namespaces were released, the Upstreams provided to the endpoint did not contain the namespace.
|
||||
// Because of this we attach the enterprise meta of the request, which will just be the default namespace.
|
||||
sid := structs.NewServiceID(upstream, &args.EnterpriseMeta)
|
||||
upstreamIDs = append(upstreamIDs, sid)
|
||||
}
|
||||
return &thisReply, nil
|
||||
}
|
||||
|
||||
// First store all upstreams that were provided in the request
|
||||
|
@ -592,19 +670,15 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
// (how the downstream wants to address it)
|
||||
protocol := proxyConfGlobalProtocol
|
||||
|
||||
_, upstreamSvcDefaults, err := state.ConfigEntry(ws, structs.ServiceDefaults, upstream.ID, &upstream.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
upstreamSvcDefaults := entries.GetServiceDefaults(
|
||||
structs.NewServiceID(upstream.ID, &upstream.EnterpriseMeta),
|
||||
)
|
||||
if upstreamSvcDefaults != nil {
|
||||
cfg, ok := upstreamSvcDefaults.(*structs.ServiceConfigEntry)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid service config type %T", upstreamSvcDefaults)
|
||||
}
|
||||
if cfg.Protocol != "" {
|
||||
protocol = cfg.Protocol
|
||||
if upstreamSvcDefaults.Protocol != "" {
|
||||
protocol = upstreamSvcDefaults.Protocol
|
||||
}
|
||||
}
|
||||
|
||||
if protocol != "" {
|
||||
resolvedCfg["protocol"] = protocol
|
||||
}
|
||||
|
@ -637,8 +711,7 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
|
||||
// don't allocate the slices just to not fill them
|
||||
if len(usConfigs) == 0 {
|
||||
*reply = thisReply
|
||||
return nil
|
||||
return &thisReply, nil
|
||||
}
|
||||
|
||||
if legacyUpstreams {
|
||||
|
@ -658,9 +731,7 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
}
|
||||
}
|
||||
|
||||
*reply = thisReply
|
||||
return nil
|
||||
})
|
||||
return &thisReply, nil
|
||||
}
|
||||
|
||||
func gateWriteToSecondary(targetDC, localDC, primaryDC, kind string) error {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
|
@ -9,10 +8,11 @@ import (
|
|||
"time"
|
||||
|
||||
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
||||
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/configentry"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
|
@ -309,64 +309,54 @@ func TestConfigEntry_Get_BlockOnNonExistent(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
_, s1 := testServerWithConfig(t)
|
||||
codec := rpcClient(t, s1)
|
||||
store := s1.fsm.State()
|
||||
t.Parallel()
|
||||
|
||||
entry := &structs.ServiceConfigEntry{
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.DevMode = true // keep it in ram to make it 10x faster on macos
|
||||
})
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
|
||||
{ // create one relevant entry
|
||||
var out bool
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Entry: &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "alpha",
|
||||
},
|
||||
}, &out))
|
||||
require.True(t, out)
|
||||
}
|
||||
require.NoError(t, store.EnsureConfigEntry(1, entry))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var count int
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
g.Go(func() error {
|
||||
runStep(t, "test the errNotFound path", func(t *testing.T) {
|
||||
rpcBlockingQueryTestHarness(t,
|
||||
func(minQueryIndex uint64) (*structs.QueryMeta, <-chan error) {
|
||||
args := structs.ConfigEntryQuery{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "does-not-exist",
|
||||
}
|
||||
args.QueryOptions.MaxQueryTime = time.Second
|
||||
args.QueryOptions.MinQueryIndex = minQueryIndex
|
||||
|
||||
for ctx.Err() == nil {
|
||||
var out structs.ConfigEntryResponse
|
||||
|
||||
err := msgpackrpc.CallWithCodec(codec, "ConfigEntry.Get", &args, &out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Log("blocking query index", out.QueryMeta.Index, out.Entry)
|
||||
count++
|
||||
args.QueryOptions.MinQueryIndex = out.QueryMeta.Index
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
g.Go(func() error {
|
||||
for i := uint64(0); i < 200; i++ {
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
entry := &structs.ServiceConfigEntry{
|
||||
errCh := channelCallRPC(s1, "ConfigEntry.Get", &args, &out, nil)
|
||||
return &out.QueryMeta, errCh
|
||||
},
|
||||
func(i int) <-chan error {
|
||||
var out bool
|
||||
return channelCallRPC(s1, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Entry: &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: fmt.Sprintf("other%d", i),
|
||||
},
|
||||
}, &out, func() error {
|
||||
if !out {
|
||||
return fmt.Errorf("[%d] unexpectedly returned false", i)
|
||||
}
|
||||
if err := store.EnsureConfigEntry(i+2, entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, g.Wait())
|
||||
// The test is a bit racy because of the timing of the two goroutines, so
|
||||
// we relax the check for the count to be within a small range.
|
||||
if count < 2 || count > 3 {
|
||||
t.Fatalf("expected count to be 2 or 3, got %d", count)
|
||||
}
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigEntry_Get_ACLDeny(t *testing.T) {
|
||||
|
@ -472,6 +462,79 @@ func TestConfigEntry_List(t *testing.T) {
|
|||
require.Equal(t, expected, out)
|
||||
}
|
||||
|
||||
func TestConfigEntry_List_BlockOnNoChange(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.DevMode = true // keep it in ram to make it 10x faster on macos
|
||||
})
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
|
||||
run := func(t *testing.T, dataPrefix string) {
|
||||
rpcBlockingQueryTestHarness(t,
|
||||
func(minQueryIndex uint64) (*structs.QueryMeta, <-chan error) {
|
||||
args := structs.ConfigEntryQuery{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
args.QueryOptions.MinQueryIndex = minQueryIndex
|
||||
|
||||
var out structs.IndexedConfigEntries
|
||||
|
||||
errCh := channelCallRPC(s1, "ConfigEntry.List", &args, &out, nil)
|
||||
return &out.QueryMeta, errCh
|
||||
},
|
||||
func(i int) <-chan error {
|
||||
var out bool
|
||||
return channelCallRPC(s1, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Entry: &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: fmt.Sprintf(dataPrefix+"%d", i),
|
||||
ConnectTimeout: 33 * time.Second,
|
||||
},
|
||||
}, &out, func() error {
|
||||
if !out {
|
||||
return fmt.Errorf("[%d] unexpectedly returned false", i)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
runStep(t, "test the errNotFound path", func(t *testing.T) {
|
||||
run(t, "other")
|
||||
})
|
||||
|
||||
{ // Create some dummy services in the state store to look up.
|
||||
for _, entry := range []structs.ConfigEntry{
|
||||
&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "bar",
|
||||
},
|
||||
&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "foo",
|
||||
},
|
||||
} {
|
||||
var out bool
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Entry: entry,
|
||||
}, &out))
|
||||
require.True(t, out)
|
||||
}
|
||||
}
|
||||
|
||||
runStep(t, "test the errNotChanged path", func(t *testing.T) {
|
||||
run(t, "completely-different-other")
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigEntry_ListAll(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -970,6 +1033,7 @@ func TestConfigEntry_ResolveServiceConfig(t *testing.T) {
|
|||
Kind: structs.ServiceDefaults,
|
||||
Name: "foo",
|
||||
Protocol: "http",
|
||||
Meta: map[string]string{"foo": "bar"},
|
||||
}))
|
||||
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
|
@ -995,6 +1059,7 @@ func TestConfigEntry_ResolveServiceConfig(t *testing.T) {
|
|||
"protocol": "grpc",
|
||||
},
|
||||
},
|
||||
Meta: map[string]string{"foo": "bar"},
|
||||
// Don't know what this is deterministically
|
||||
QueryMeta: out.QueryMeta,
|
||||
}
|
||||
|
@ -2025,6 +2090,119 @@ func TestConfigEntry_ResolveServiceConfig_ProxyDefaultsProtocol_UsedForAllUpstre
|
|||
require.Equal(t, expected, out)
|
||||
}
|
||||
|
||||
func BenchmarkConfigEntry_ResolveServiceConfig_Hash(b *testing.B) {
|
||||
res := &configentry.ResolvedServiceConfigSet{}
|
||||
|
||||
res.AddServiceDefaults(&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "web",
|
||||
Protocol: "http",
|
||||
})
|
||||
res.AddServiceDefaults(&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "up1",
|
||||
Protocol: "http",
|
||||
})
|
||||
res.AddServiceDefaults(&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "up2",
|
||||
Protocol: "http",
|
||||
})
|
||||
res.AddProxyDefaults(&structs.ProxyConfigEntry{
|
||||
Kind: structs.ProxyDefaults,
|
||||
Name: structs.ProxyConfigGlobal,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "grpc",
|
||||
},
|
||||
})
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := hashstructure_v2.Hash(res, hashstructure_v2.FormatV2, nil)
|
||||
if err != nil {
|
||||
b.Fatalf("error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigEntry_ResolveServiceConfig_BlockOnNoChange(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.DevMode = true // keep it in ram to make it 10x faster on macos
|
||||
})
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
|
||||
run := func(t *testing.T, dataPrefix string) {
|
||||
rpcBlockingQueryTestHarness(t,
|
||||
func(minQueryIndex uint64) (*structs.QueryMeta, <-chan error) {
|
||||
args := structs.ServiceConfigRequest{
|
||||
Name: "foo",
|
||||
UpstreamIDs: []structs.ServiceID{
|
||||
structs.NewServiceID("bar", nil),
|
||||
},
|
||||
}
|
||||
args.QueryOptions.MinQueryIndex = minQueryIndex
|
||||
|
||||
var out structs.ServiceConfigResponse
|
||||
|
||||
errCh := channelCallRPC(s1, "ConfigEntry.ResolveServiceConfig", &args, &out, nil)
|
||||
return &out.QueryMeta, errCh
|
||||
},
|
||||
func(i int) <-chan error {
|
||||
var out bool
|
||||
return channelCallRPC(s1, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Entry: &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: fmt.Sprintf(dataPrefix+"%d", i),
|
||||
},
|
||||
}, &out, func() error {
|
||||
if !out {
|
||||
return fmt.Errorf("[%d] unexpectedly returned false", i)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
{ // create one unrelated entry
|
||||
var out bool
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Entry: &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "unrelated",
|
||||
},
|
||||
}, &out))
|
||||
require.True(t, out)
|
||||
}
|
||||
|
||||
runStep(t, "test the errNotFound path", func(t *testing.T) {
|
||||
run(t, "other")
|
||||
})
|
||||
|
||||
{ // create one relevant entry
|
||||
var out bool
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Entry: &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "bar",
|
||||
Protocol: "grpc",
|
||||
},
|
||||
}, &out))
|
||||
require.True(t, out)
|
||||
}
|
||||
|
||||
runStep(t, "test the errNotChanged path", func(t *testing.T) {
|
||||
run(t, "completely-different-other")
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigEntry_ResolveServiceConfigNoConfig(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
@ -91,6 +92,7 @@ func (s *Server) reconcileLocalConfig(ctx context.Context, configs []structs.Con
|
|||
ticker := time.NewTicker(time.Second / time.Duration(s.config.ConfigReplicationApplyLimit))
|
||||
defer ticker.Stop()
|
||||
|
||||
var merr error
|
||||
for i, entry := range configs {
|
||||
// Exported services only apply to the primary datacenter.
|
||||
if entry.GetKind() == structs.ExportedServices {
|
||||
|
@ -104,7 +106,7 @@ func (s *Server) reconcileLocalConfig(ctx context.Context, configs []structs.Con
|
|||
|
||||
_, err := s.raftApply(structs.ConfigEntryRequestType, &req)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to apply config %s: %v", op, err)
|
||||
merr = multierror.Append(merr, fmt.Errorf("Failed to apply config entry %s: %w", op, err))
|
||||
}
|
||||
|
||||
if i < len(configs)-1 {
|
||||
|
@ -117,7 +119,7 @@ func (s *Server) reconcileLocalConfig(ctx context.Context, configs []structs.Con
|
|||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return false, merr
|
||||
}
|
||||
|
||||
func (s *Server) fetchConfigEntries(lastRemoteIndex uint64) (*structs.IndexedGenericConfigEntries, error) {
|
||||
|
@ -204,6 +206,7 @@ func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64, lo
|
|||
"updates", len(updates),
|
||||
)
|
||||
|
||||
var merr error
|
||||
if len(deletions) > 0 {
|
||||
logger.Debug("Deleting local config entries",
|
||||
"deletions", len(deletions),
|
||||
|
@ -214,10 +217,11 @@ func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64, lo
|
|||
return 0, true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("failed to delete local config entries: %v", err)
|
||||
}
|
||||
merr = multierror.Append(merr, err)
|
||||
} else {
|
||||
logger.Debug("Config Entry replication - finished deletions")
|
||||
}
|
||||
}
|
||||
|
||||
if len(updates) > 0 {
|
||||
logger.Debug("Updating local config entries",
|
||||
|
@ -228,10 +232,15 @@ func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64, lo
|
|||
return 0, true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("failed to update local config entries: %v", err)
|
||||
}
|
||||
merr = multierror.Append(merr, err)
|
||||
} else {
|
||||
logger.Debug("Config Entry replication - finished updates")
|
||||
}
|
||||
}
|
||||
|
||||
if merr != nil {
|
||||
return 0, false, merr
|
||||
}
|
||||
|
||||
// Return the index we got back from the remote side, since we've synced
|
||||
// up with the remote state as of that index.
|
||||
|
|
|
@ -247,3 +247,100 @@ func TestReplication_ConfigEntries(t *testing.T) {
|
|||
checkSame(r)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplication_ConfigEntries_GraphValidationErrorDuringReplication(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.PrimaryDatacenter = "dc1"
|
||||
})
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
_, s2 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc1"
|
||||
c.ConfigReplicationRate = 100
|
||||
c.ConfigReplicationBurst = 100
|
||||
c.ConfigReplicationApplyLimit = 1000000
|
||||
})
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
|
||||
// Create two entries that will replicate in the wrong order and not work.
|
||||
entries := []structs.ConfigEntry{
|
||||
&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "foo",
|
||||
Protocol: "http",
|
||||
},
|
||||
&structs.IngressGatewayConfigEntry{
|
||||
Kind: structs.IngressGateway,
|
||||
Name: "foo",
|
||||
Listeners: []structs.IngressListener{
|
||||
{
|
||||
Port: 9191,
|
||||
Protocol: "http",
|
||||
Services: []structs.IngressService{
|
||||
{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, entry := range entries {
|
||||
arg := structs.ConfigEntryRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.ConfigEntryUpsert,
|
||||
Entry: entry,
|
||||
}
|
||||
|
||||
out := false
|
||||
require.NoError(t, s1.RPC("ConfigEntry.Apply", &arg, &out))
|
||||
}
|
||||
|
||||
// Try to join which should kick off replication.
|
||||
joinWAN(t, s2, s1)
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
||||
|
||||
checkSame := func(t require.TestingT) error {
|
||||
_, remote, err := s1.fsm.State().ConfigEntries(nil, structs.ReplicationEnterpriseMeta())
|
||||
require.NoError(t, err)
|
||||
_, local, err := s2.fsm.State().ConfigEntries(nil, structs.ReplicationEnterpriseMeta())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, local, len(remote))
|
||||
for i, entry := range remote {
|
||||
require.Equal(t, entry.GetKind(), local[i].GetKind())
|
||||
require.Equal(t, entry.GetName(), local[i].GetName())
|
||||
|
||||
// more validations
|
||||
switch entry.GetKind() {
|
||||
case structs.IngressGateway:
|
||||
localGw, ok := local[i].(*structs.IngressGatewayConfigEntry)
|
||||
require.True(t, ok)
|
||||
remoteGw, ok := entry.(*structs.IngressGatewayConfigEntry)
|
||||
require.True(t, ok)
|
||||
require.Len(t, remoteGw.Listeners, 1)
|
||||
require.Len(t, localGw.Listeners, 1)
|
||||
require.Equal(t, remoteGw.Listeners[0].Protocol, localGw.Listeners[0].Protocol)
|
||||
case structs.ServiceDefaults:
|
||||
localSvc, ok := local[i].(*structs.ServiceConfigEntry)
|
||||
require.True(t, ok)
|
||||
remoteSvc, ok := entry.(*structs.ServiceConfigEntry)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, remoteSvc.Protocol, localSvc.Protocol)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait for the replica to converge.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
checkSame(r)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -65,8 +65,8 @@ func (s *ConnectCA) ConfigurationGet(
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorWrite(nil) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().OperatorWriteAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state := s.srv.fsm.State()
|
||||
|
@ -97,8 +97,8 @@ func (s *ConnectCA) ConfigurationSet(
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorWrite(nil) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().OperatorWriteAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.srv.caManager.UpdateConfiguration(args)
|
||||
|
@ -175,8 +175,8 @@ func (s *ConnectCA) Sign(
|
|||
if isService {
|
||||
entMeta.Merge(serviceID.GetEnterpriseMeta())
|
||||
entMeta.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceWrite(serviceID.Service, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(serviceID.Service, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify that the DC in the service URI matches us. We might relax this
|
||||
|
@ -187,8 +187,8 @@ func (s *ConnectCA) Sign(
|
|||
}
|
||||
} else if isAgent {
|
||||
agentID.GetEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.NodeWrite(agentID.Agent, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(agentID.Agent, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,8 +223,8 @@ func (s *ConnectCA) SignIntermediate(
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorWrite(nil) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().OperatorWriteAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
provider, _ := s.srv.caManager.getCAProvider()
|
||||
|
|
|
@ -152,8 +152,8 @@ func (c *Coordinate) Update(args *structs.CoordinateUpdateRequest, reply *struct
|
|||
return err
|
||||
}
|
||||
|
||||
if authz.NodeWrite(args.Node, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(args.Node, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the coordinate to the map of pending updates.
|
||||
|
@ -245,8 +245,8 @@ func (c *Coordinate) Node(args *structs.NodeSpecificRequest, reply *structs.Inde
|
|||
return err
|
||||
}
|
||||
|
||||
if authz.NodeRead(args.Node, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().NodeReadAllowed(args.Node, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.srv.blockingQuery(&args.QueryOptions,
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/discoverychain"
|
||||
|
@ -35,8 +36,8 @@ func (c *DiscoveryChain) Get(args *structs.DiscoveryChainRequest, reply *structs
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.ServiceRead(args.Name, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(args.Name, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if args.Name == "" {
|
||||
|
@ -48,6 +49,10 @@ func (c *DiscoveryChain) Get(args *structs.DiscoveryChainRequest, reply *structs
|
|||
evalDC = c.srv.config.Datacenter
|
||||
}
|
||||
|
||||
var (
|
||||
priorHash uint64
|
||||
ranOnce bool
|
||||
)
|
||||
return c.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
|
@ -61,14 +66,37 @@ func (c *DiscoveryChain) Get(args *structs.DiscoveryChainRequest, reply *structs
|
|||
OverrideProtocol: args.OverrideProtocol,
|
||||
OverrideConnectTimeout: args.OverrideConnectTimeout,
|
||||
}
|
||||
index, chain, err := state.ServiceDiscoveryChain(ws, args.Name, entMeta, req)
|
||||
index, chain, entries, err := state.ServiceDiscoveryChain(ws, args.Name, entMeta, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate a hash of the config entry content driving this
|
||||
// response. Use it to determine if the response is identical to a
|
||||
// prior wakeup.
|
||||
newHash, err := hashstructure_v2.Hash(chain, hashstructure_v2.FormatV2, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error hashing reply for spurious wakeup suppression: %w", err)
|
||||
}
|
||||
|
||||
if ranOnce && priorHash == newHash {
|
||||
priorHash = newHash
|
||||
reply.Index = index
|
||||
// NOTE: the prior response is still alive inside of *reply, which
|
||||
// is desirable
|
||||
return errNotChanged
|
||||
} else {
|
||||
priorHash = newHash
|
||||
ranOnce = true
|
||||
}
|
||||
|
||||
reply.Index = index
|
||||
reply.Chain = chain
|
||||
|
||||
if entries.IsEmpty() {
|
||||
return errNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -242,3 +242,88 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
|
|||
require.Equal(t, expect, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiscoveryChainEndpoint_Get_BlockOnNoChange(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.DevMode = true // keep it in ram to make it 10x faster on macos
|
||||
c.PrimaryDatacenter = "dc1"
|
||||
})
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
|
||||
waitForLeaderEstablishment(t, s1)
|
||||
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
|
||||
|
||||
{ // create one unrelated entry
|
||||
var out bool
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Datacenter: "dc1",
|
||||
Entry: &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "unrelated",
|
||||
ConnectTimeout: 33 * time.Second,
|
||||
},
|
||||
}, &out))
|
||||
require.True(t, out)
|
||||
}
|
||||
|
||||
run := func(t *testing.T, dataPrefix string) {
|
||||
rpcBlockingQueryTestHarness(t,
|
||||
func(minQueryIndex uint64) (*structs.QueryMeta, <-chan error) {
|
||||
args := &structs.DiscoveryChainRequest{
|
||||
Name: "web",
|
||||
EvaluateInDatacenter: "dc1",
|
||||
EvaluateInNamespace: "default",
|
||||
EvaluateInPartition: "default",
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
args.QueryOptions.MinQueryIndex = minQueryIndex
|
||||
|
||||
var out structs.DiscoveryChainResponse
|
||||
errCh := channelCallRPC(s1, "DiscoveryChain.Get", &args, &out, func() error {
|
||||
if !out.Chain.IsDefault() {
|
||||
return fmt.Errorf("expected default chain")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return &out.QueryMeta, errCh
|
||||
},
|
||||
func(i int) <-chan error {
|
||||
var out bool
|
||||
return channelCallRPC(s1, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Datacenter: "dc1",
|
||||
Entry: &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: fmt.Sprintf(dataPrefix+"%d", i),
|
||||
},
|
||||
}, &out, nil)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
runStep(t, "test the errNotFound path", func(t *testing.T) {
|
||||
run(t, "other")
|
||||
})
|
||||
|
||||
{ // create one relevant entry
|
||||
var out bool
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Entry: &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "web",
|
||||
Protocol: "grpc",
|
||||
},
|
||||
}, &out))
|
||||
require.True(t, out)
|
||||
}
|
||||
|
||||
runStep(t, "test the errNotChanged path", func(t *testing.T) {
|
||||
run(t, "completely-different-other")
|
||||
})
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/armon/go-metrics/prometheus"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
@ -63,8 +62,8 @@ func (c *FederationState) Apply(args *structs.FederationStateRequest, reply *boo
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorWrite(nil) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().OperatorWriteAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if args.State == nil || args.State.Datacenter == "" {
|
||||
|
@ -109,8 +108,8 @@ func (c *FederationState) Get(args *structs.FederationStateQuery, reply *structs
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorRead(nil) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().OperatorReadAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.srv.blockingQuery(
|
||||
|
@ -148,8 +147,8 @@ func (c *FederationState) List(args *structs.DCSpecificRequest, reply *structs.I
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorRead(nil) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().OperatorReadAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.srv.blockingQuery(
|
||||
|
|
|
@ -220,6 +220,7 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc
|
|||
// If we're doing a connect or ingress query, we need read access to the service
|
||||
// we're trying to find proxies for, so check that.
|
||||
if args.Connect || args.Ingress {
|
||||
// TODO(acl-error-enhancements) Look for ways to percolate this information up to give any feedback to the user.
|
||||
if authz.ServiceRead(args.ServiceName, &authzContext) != acl.Allow {
|
||||
// Just return nil, which will return an empty response (tested)
|
||||
return nil
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/hashicorp/go-bexpr"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
|
@ -609,14 +610,20 @@ func (s *Intention) Match(args *structs.IntentionQueryRequest, reply *structs.In
|
|||
// matching, if you have it on the dest then perform a dest type match.
|
||||
for _, entry := range args.Match.Entries {
|
||||
entry.FillAuthzContext(&authzContext)
|
||||
if prefix := entry.Name; prefix != "" && authz.IntentionRead(prefix, &authzContext) != acl.Allow {
|
||||
if prefix := entry.Name; prefix != "" {
|
||||
if err := authz.ToAllowAuthorizer().IntentionReadAllowed(prefix, &authzContext); err != nil {
|
||||
accessorID := authz.AccessorID()
|
||||
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
||||
s.logger.Warn("Operation on intention prefix denied due to ACLs", "prefix", prefix, "accessorID", accessorID)
|
||||
return acl.ErrPermissionDenied
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
priorHash uint64
|
||||
ranOnce bool
|
||||
)
|
||||
return s.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
|
@ -628,6 +635,35 @@ func (s *Intention) Match(args *structs.IntentionQueryRequest, reply *structs.In
|
|||
|
||||
reply.Index = index
|
||||
reply.Matches = matches
|
||||
|
||||
// Generate a hash of the intentions content driving this response.
|
||||
// Use it to determine if the response is identical to a prior
|
||||
// wakeup.
|
||||
newHash, err := hashstructure_v2.Hash(matches, hashstructure_v2.FormatV2, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error hashing reply for spurious wakeup suppression: %w", err)
|
||||
}
|
||||
|
||||
if ranOnce && priorHash == newHash {
|
||||
priorHash = newHash
|
||||
return errNotChanged
|
||||
} else {
|
||||
priorHash = newHash
|
||||
ranOnce = true
|
||||
}
|
||||
|
||||
hasData := false
|
||||
for _, match := range matches {
|
||||
if len(match) > 0 {
|
||||
hasData = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !hasData {
|
||||
return errNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
@ -699,11 +735,11 @@ func (s *Intention) Check(args *structs.IntentionQueryRequest, reply *structs.In
|
|||
if prefix, ok := query.GetACLPrefix(); ok {
|
||||
var authzContext acl.AuthorizerContext
|
||||
query.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceRead(prefix, &authzContext) != acl.Allow {
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(prefix, &authzContext); err != nil {
|
||||
accessorID := authz.AccessorID()
|
||||
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
||||
s.logger.Warn("test on intention denied due to ACLs", "prefix", prefix, "accessorID", accessorID)
|
||||
return acl.ErrPermissionDenied
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1742,6 +1742,98 @@ func TestIntentionMatch_good(t *testing.T) {
|
|||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestIntentionMatch_BlockOnNoChange(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.DevMode = true // keep it in ram to make it 10x faster on macos
|
||||
})
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
|
||||
waitForLeaderEstablishment(t, s1)
|
||||
|
||||
run := func(t *testing.T, dataPrefix string, expectMatches int) {
|
||||
rpcBlockingQueryTestHarness(t,
|
||||
func(minQueryIndex uint64) (*structs.QueryMeta, <-chan error) {
|
||||
args := &structs.IntentionQueryRequest{
|
||||
Datacenter: "dc1",
|
||||
Match: &structs.IntentionQueryMatch{
|
||||
Type: structs.IntentionMatchDestination,
|
||||
Entries: []structs.IntentionMatchEntry{
|
||||
{Name: "bar"},
|
||||
},
|
||||
},
|
||||
}
|
||||
args.QueryOptions.MinQueryIndex = minQueryIndex
|
||||
|
||||
var out structs.IndexedIntentionMatches
|
||||
errCh := channelCallRPC(s1, "Intention.Match", args, &out, func() error {
|
||||
if len(out.Matches) != 1 {
|
||||
return fmt.Errorf("expected 1 match got %d", len(out.Matches))
|
||||
}
|
||||
if len(out.Matches[0]) != expectMatches {
|
||||
return fmt.Errorf("expected %d inner matches got %d", expectMatches, len(out.Matches[0]))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return &out.QueryMeta, errCh
|
||||
},
|
||||
func(i int) <-chan error {
|
||||
var out string
|
||||
return channelCallRPC(s1, "Intention.Apply", &structs.IntentionRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.IntentionOpCreate,
|
||||
Intention: &structs.Intention{
|
||||
// {"default", "*", "default", "baz"}, // shouldn't match
|
||||
SourceNS: "default",
|
||||
SourceName: "*",
|
||||
DestinationNS: "default",
|
||||
DestinationName: fmt.Sprintf(dataPrefix+"%d", i),
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
}, &out, nil)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
runStep(t, "test the errNotFound path", func(t *testing.T) {
|
||||
run(t, "other", 0)
|
||||
})
|
||||
|
||||
// Create some records
|
||||
{
|
||||
insert := [][]string{
|
||||
{"default", "*", "default", "*"},
|
||||
{"default", "*", "default", "bar"},
|
||||
{"default", "*", "default", "baz"}, // shouldn't match
|
||||
}
|
||||
|
||||
for _, v := range insert {
|
||||
var out string
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.Apply", &structs.IntentionRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.IntentionOpCreate,
|
||||
Intention: &structs.Intention{
|
||||
SourceNS: v[0],
|
||||
SourceName: v[1],
|
||||
DestinationNS: v[2],
|
||||
DestinationName: v[3],
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
}, &out))
|
||||
}
|
||||
}
|
||||
|
||||
runStep(t, "test the errNotChanged path", func(t *testing.T) {
|
||||
run(t, "completely-different-other", 2)
|
||||
})
|
||||
}
|
||||
|
||||
// Test matching with ACLs
|
||||
func TestIntentionMatch_acl(t *testing.T) {
|
||||
if testing.Short() {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
|
@ -162,8 +163,8 @@ func (m *Internal) ServiceTopology(args *structs.ServiceSpecificRequest, reply *
|
|||
if err := m.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.ServiceRead(args.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(args.ServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.srv.blockingQuery(
|
||||
|
@ -210,6 +211,10 @@ func (m *Internal) IntentionUpstreams(args *structs.ServiceSpecificRequest, repl
|
|||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
priorHash uint64
|
||||
ranOnce bool
|
||||
)
|
||||
return m.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
|
@ -224,6 +229,23 @@ func (m *Internal) IntentionUpstreams(args *structs.ServiceSpecificRequest, repl
|
|||
|
||||
reply.Index, reply.Services = index, services
|
||||
m.srv.filterACLWithAuthorizer(authz, reply)
|
||||
|
||||
// Generate a hash of the intentions content driving this response.
|
||||
// Use it to determine if the response is identical to a prior
|
||||
// wakeup.
|
||||
newHash, err := hashstructure_v2.Hash(services, hashstructure_v2.FormatV2, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error hashing reply for spurious wakeup suppression: %w", err)
|
||||
}
|
||||
|
||||
if ranOnce && priorHash == newHash {
|
||||
priorHash = newHash
|
||||
return errNotChanged
|
||||
} else {
|
||||
priorHash = newHash
|
||||
ranOnce = true
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -250,8 +272,8 @@ func (m *Internal) GatewayServiceDump(args *structs.ServiceSpecificRequest, repl
|
|||
}
|
||||
|
||||
// We need read access to the gateway we're trying to find services for, so check that first.
|
||||
if authz.ServiceRead(args.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(args.ServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.srv.blockingQuery(
|
||||
|
@ -334,8 +356,8 @@ func (m *Internal) GatewayIntentions(args *structs.IntentionQueryRequest, reply
|
|||
}
|
||||
|
||||
// We need read access to the gateway we're trying to find intentions for, so check that first.
|
||||
if authz.ServiceRead(args.Match.Entries[0].Name, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(args.Match.Entries[0].Name, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.srv.blockingQuery(
|
||||
|
@ -406,10 +428,10 @@ func (m *Internal) EventFire(args *structs.EventFireRequest,
|
|||
return err
|
||||
}
|
||||
|
||||
if authz.EventWrite(args.Name, nil) != acl.Allow {
|
||||
if err := authz.ToAllowAuthorizer().EventWriteAllowed(args.Name, nil); err != nil {
|
||||
accessorID := authz.AccessorID()
|
||||
m.logger.Warn("user event blocked by ACLs", "event", args.Name, "accessorID", accessorID)
|
||||
return acl.ErrPermissionDenied
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the query meta data
|
||||
|
@ -442,16 +464,16 @@ func (m *Internal) KeyringOperation(
|
|||
}
|
||||
switch args.Operation {
|
||||
case structs.KeyringList:
|
||||
if authz.KeyringRead(nil) != acl.Allow {
|
||||
return fmt.Errorf("Reading keyring denied by ACLs")
|
||||
if err := authz.ToAllowAuthorizer().KeyringReadAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
case structs.KeyringInstall:
|
||||
fallthrough
|
||||
case structs.KeyringUse:
|
||||
fallthrough
|
||||
case structs.KeyringRemove:
|
||||
if authz.KeyringWrite(nil) != acl.Allow {
|
||||
return fmt.Errorf("Modifying keyring denied due to ACLs")
|
||||
if err := authz.ToAllowAuthorizer().KeyringWriteAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
panic("Invalid keyring operation")
|
||||
|
|
|
@ -2,6 +2,7 @@ package consul
|
|||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -2317,6 +2318,89 @@ func TestInternal_IntentionUpstreams(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestInternal_IntentionUpstreams_BlockOnNoChange(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.DevMode = true // keep it in ram to make it 10x faster on macos
|
||||
})
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
|
||||
waitForLeaderEstablishment(t, s1)
|
||||
|
||||
{ // ensure it's default deny to start
|
||||
var out bool
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &structs.ConfigEntryRequest{
|
||||
Entry: &structs.ServiceIntentionsConfigEntry{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "*",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "*",
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, &out))
|
||||
require.True(t, out)
|
||||
}
|
||||
|
||||
run := func(t *testing.T, dataPrefix string, expectServices int) {
|
||||
rpcBlockingQueryTestHarness(t,
|
||||
func(minQueryIndex uint64) (*structs.QueryMeta, <-chan error) {
|
||||
args := &structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "web",
|
||||
}
|
||||
args.QueryOptions.MinQueryIndex = minQueryIndex
|
||||
|
||||
var out structs.IndexedServiceList
|
||||
errCh := channelCallRPC(s1, "Internal.IntentionUpstreams", args, &out, func() error {
|
||||
if len(out.Services) != expectServices {
|
||||
return fmt.Errorf("expected %d services got %d", expectServices, len(out.Services))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return &out.QueryMeta, errCh
|
||||
},
|
||||
func(i int) <-chan error {
|
||||
var out string
|
||||
return channelCallRPC(s1, "Intention.Apply", &structs.IntentionRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.IntentionOpCreate,
|
||||
Intention: &structs.Intention{
|
||||
SourceName: fmt.Sprintf(dataPrefix+"-src-%d", i),
|
||||
DestinationName: fmt.Sprintf(dataPrefix+"-dst-%d", i),
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
}, &out, nil)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
runStep(t, "test the errNotFound path", func(t *testing.T) {
|
||||
run(t, "other", 0)
|
||||
})
|
||||
|
||||
// Services:
|
||||
// api and api-proxy on node foo
|
||||
// web and web-proxy on node foo
|
||||
//
|
||||
// Intentions
|
||||
// * -> * (deny) intention
|
||||
// web -> api (allow)
|
||||
registerIntentionUpstreamEntries(t, codec, "")
|
||||
|
||||
runStep(t, "test the errNotChanged path", func(t *testing.T) {
|
||||
run(t, "completely-different-other", 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestInternal_IntentionUpstreams_ACL(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
|
|
@ -44,8 +44,8 @@ func kvsPreApply(logger hclog.Logger, srv *Server, authz acl.Authorizer, op api.
|
|||
var authzContext acl.AuthorizerContext
|
||||
dirEnt.FillAuthzContext(&authzContext)
|
||||
|
||||
if authz.KeyWritePrefix(dirEnt.Key, &authzContext) != acl.Allow {
|
||||
return false, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().KeyWritePrefixAllowed(dirEnt.Key, &authzContext); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
case api.KVGet, api.KVGetTree:
|
||||
|
@ -58,16 +58,16 @@ func kvsPreApply(logger hclog.Logger, srv *Server, authz acl.Authorizer, op api.
|
|||
var authzContext acl.AuthorizerContext
|
||||
dirEnt.FillAuthzContext(&authzContext)
|
||||
|
||||
if authz.KeyRead(dirEnt.Key, &authzContext) != acl.Allow {
|
||||
return false, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().KeyReadAllowed(dirEnt.Key, &authzContext); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
default:
|
||||
var authzContext acl.AuthorizerContext
|
||||
dirEnt.FillAuthzContext(&authzContext)
|
||||
|
||||
if authz.KeyWrite(dirEnt.Key, &authzContext) != acl.Allow {
|
||||
return false, acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().KeyWriteAllowed(dirEnt.Key, &authzContext); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -155,8 +155,8 @@ func (k *KVS) Get(args *structs.KeyRequest, reply *structs.IndexedDirEntries) er
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.KeyRead(args.Key, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().KeyReadAllowed(args.Key, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ent == nil {
|
||||
|
@ -187,8 +187,10 @@ func (k *KVS) List(args *structs.KeyRequest, reply *structs.IndexedDirEntries) e
|
|||
return err
|
||||
}
|
||||
|
||||
if k.srv.config.ACLEnableKeyListPolicy && authz.KeyList(args.Key, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if k.srv.config.ACLEnableKeyListPolicy {
|
||||
if err := authz.ToAllowAuthorizer().KeyListAllowed(args.Key, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return k.srv.blockingQuery(
|
||||
|
@ -240,8 +242,10 @@ func (k *KVS) ListKeys(args *structs.KeyListRequest, reply *structs.IndexedKeyLi
|
|||
return err
|
||||
}
|
||||
|
||||
if k.srv.config.ACLEnableKeyListPolicy && authz.KeyList(args.Prefix, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if k.srv.config.ACLEnableKeyListPolicy {
|
||||
if err := authz.ToAllowAuthorizer().KeyListAllowed(args.Prefix, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return k.srv.blockingQuery(
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -899,7 +900,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *structs
|
|||
}
|
||||
|
||||
// Check if this node is "known" by serf
|
||||
if _, ok := known[check.Node]; ok {
|
||||
if _, ok := known[strings.ToLower(check.Node)]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -1204,7 +1205,7 @@ func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeE
|
|||
// deregister us later.
|
||||
//
|
||||
// TODO(partitions): check partitions here too? server names should be unique in general though
|
||||
if member.Name == s.config.NodeName {
|
||||
if strings.EqualFold(member.Name, s.config.NodeName) {
|
||||
s.logger.Warn("deregistering self should be done by follower",
|
||||
"name", s.config.NodeName,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
|
|
|
@ -693,7 +693,7 @@ func (c *CAManager) persistNewRootAndConfig(provider ca.Provider, newActiveRoot
|
|||
return fmt.Errorf("local CA not initialized yet")
|
||||
}
|
||||
// Exit early if the change is a no-op.
|
||||
if newActiveRoot == nil && config != nil && config.Provider == storedConfig.Provider && reflect.DeepEqual(config.Config, storedConfig.Config) {
|
||||
if !shouldPersistNewRootAndConfig(newActiveRoot, storedConfig, config) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -758,6 +758,17 @@ func (c *CAManager) persistNewRootAndConfig(provider ca.Provider, newActiveRoot
|
|||
return nil
|
||||
}
|
||||
|
||||
func shouldPersistNewRootAndConfig(newActiveRoot *structs.CARoot, oldConfig, newConfig *structs.CAConfiguration) bool {
|
||||
if newActiveRoot != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if newConfig == nil {
|
||||
return false
|
||||
}
|
||||
return newConfig.Provider == oldConfig.Provider && reflect.DeepEqual(newConfig.Config, oldConfig.Config)
|
||||
}
|
||||
|
||||
func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error) {
|
||||
// Attempt to update the state first.
|
||||
oldState, err := c.setState(caStateReconfig, true)
|
||||
|
|
|
@ -693,6 +693,62 @@ func TestCAManager_Initialize_Vault_WithIntermediateAsPrimaryCA(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestCAManager_Verify_Vault_NoChangeToSecondaryConfig(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
ca.SkipIfVaultNotPresent(t)
|
||||
|
||||
vault := ca.NewTestVaultServer(t)
|
||||
|
||||
_, sDC1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
Provider: "vault",
|
||||
Config: map[string]interface{}{
|
||||
"Address": vault.Addr,
|
||||
"Token": vault.RootToken,
|
||||
"RootPKIPath": "pki-root/",
|
||||
"IntermediatePKIPath": "pki-intermediate/",
|
||||
},
|
||||
}
|
||||
})
|
||||
defer sDC1.Shutdown()
|
||||
testrpc.WaitForActiveCARoot(t, sDC1.RPC, "dc1", nil)
|
||||
|
||||
_, sDC2 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc1"
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
Provider: "vault",
|
||||
Config: map[string]interface{}{
|
||||
"Address": vault.Addr,
|
||||
"Token": vault.RootToken,
|
||||
"RootPKIPath": "pki-root/",
|
||||
"IntermediatePKIPath": "pki-intermediate-2/",
|
||||
},
|
||||
}
|
||||
})
|
||||
defer sDC2.Shutdown()
|
||||
joinWAN(t, sDC2, sDC1)
|
||||
testrpc.WaitForActiveCARoot(t, sDC2.RPC, "dc2", nil)
|
||||
|
||||
codec := rpcClient(t, sDC2)
|
||||
var configBefore structs.CAConfiguration
|
||||
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", &structs.DCSpecificRequest{}, &configBefore)
|
||||
require.NoError(t, err)
|
||||
|
||||
renewLeafSigningCert(t, sDC1.caManager, sDC1.caManager.primaryRenewIntermediate)
|
||||
|
||||
// Give the secondary some time to notice the update
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
var configAfter structs.CAConfiguration
|
||||
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", &structs.DCSpecificRequest{}, &configAfter)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.EqualValues(t, configBefore.ModifyIndex, configAfter.ModifyIndex)
|
||||
}
|
||||
|
||||
func getLeafCert(t *testing.T, codec rpc.ClientCodec, trustDomain string, dc string) string {
|
||||
pk, _, err := connect.GeneratePrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -1490,8 +1490,6 @@ func TestCAManager_Initialize_Vault_BadCAConfigDoesNotPreventLeaderEstablishment
|
|||
}
|
||||
|
||||
func TestCAManager_Initialize_BadCAConfigDoesNotPreventLeaderEstablishment(t *testing.T) {
|
||||
ca.SkipIfVaultNotPresent(t)
|
||||
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Build = "1.9.1"
|
||||
c.PrimaryDatacenter = "dc1"
|
||||
|
|
|
@ -156,6 +156,9 @@ func TestLeader_FailedMember(t *testing.T) {
|
|||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
if len(checks) != 1 {
|
||||
r.Fatalf("client missing check")
|
||||
}
|
||||
if got, want := checks[0].Status, api.HealthCritical; got != want {
|
||||
r.Fatalf("got status %q want %q", got, want)
|
||||
}
|
||||
|
@ -189,12 +192,8 @@ func TestLeader_LeftMember(t *testing.T) {
|
|||
// Should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
if node == nil {
|
||||
r.Fatal("client not registered")
|
||||
}
|
||||
require.NoError(r, err)
|
||||
require.NotNil(r, node, "client not registered")
|
||||
})
|
||||
|
||||
// Node should leave
|
||||
|
@ -204,14 +203,11 @@ func TestLeader_LeftMember(t *testing.T) {
|
|||
// Should be deregistered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
if node != nil {
|
||||
r.Fatal("client still registered")
|
||||
}
|
||||
require.NoError(r, err)
|
||||
require.Nil(r, node, "client still registered")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_ReapMember(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -239,12 +235,8 @@ func TestLeader_ReapMember(t *testing.T) {
|
|||
// Should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
if node == nil {
|
||||
r.Fatal("client not registered")
|
||||
}
|
||||
require.NoError(r, err)
|
||||
require.NotNil(r, node, "client not registered")
|
||||
})
|
||||
|
||||
// Simulate a node reaping
|
||||
|
@ -264,9 +256,7 @@ func TestLeader_ReapMember(t *testing.T) {
|
|||
reaped := false
|
||||
for start := time.Now(); time.Since(start) < 5*time.Second; {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
if node == nil {
|
||||
reaped = true
|
||||
break
|
||||
|
@ -277,6 +267,88 @@ func TestLeader_ReapMember(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
run := func(t *testing.T, status serf.MemberStatus, nameFn func(string) string) {
|
||||
t.Parallel()
|
||||
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.PrimaryDatacenter = "dc1"
|
||||
c.ACLsEnabled = true
|
||||
c.ACLInitialManagementToken = "root"
|
||||
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
|
||||
})
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
nodeName := s1.config.NodeName
|
||||
if nameFn != nil {
|
||||
nodeName = nameFn(nodeName)
|
||||
}
|
||||
|
||||
state := s1.fsm.State()
|
||||
|
||||
// Should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(nodeName, nil)
|
||||
require.NoError(r, err)
|
||||
require.NotNil(r, node, "server not registered")
|
||||
})
|
||||
|
||||
// Simulate THIS node reaping or leaving
|
||||
mems := s1.LANMembersInAgentPartition()
|
||||
var s1mem serf.Member
|
||||
for _, m := range mems {
|
||||
if strings.EqualFold(m.Name, nodeName) {
|
||||
s1mem = m
|
||||
s1mem.Status = status
|
||||
s1mem.Name = nodeName
|
||||
break
|
||||
}
|
||||
}
|
||||
s1.reconcileCh <- s1mem
|
||||
|
||||
// Should NOT be deregistered; we have to poll quickly here because
|
||||
// anti-entropy will put it back if it did get deleted.
|
||||
reaped := false
|
||||
for start := time.Now(); time.Since(start) < 5*time.Second; {
|
||||
_, node, err := state.GetNode(nodeName, nil)
|
||||
require.NoError(t, err)
|
||||
if node == nil {
|
||||
reaped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if reaped {
|
||||
t.Fatalf("server should still be registered")
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("original name", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("left", func(t *testing.T) {
|
||||
run(t, serf.StatusLeft, nil)
|
||||
})
|
||||
t.Run("reap", func(t *testing.T) {
|
||||
run(t, StatusReap, nil)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("uppercased name", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("left", func(t *testing.T) {
|
||||
run(t, serf.StatusLeft, strings.ToUpper)
|
||||
})
|
||||
t.Run("reap", func(t *testing.T) {
|
||||
run(t, StatusReap, strings.ToUpper)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_CheckServersMeta(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -637,6 +709,9 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
|||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
if len(checks) != 1 {
|
||||
r.Fatalf("client missing check")
|
||||
}
|
||||
if got, want := checks[0].Status, api.HealthCritical; got != want {
|
||||
r.Fatalf("got state %q want %q", got, want)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package consul
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
|
@ -38,7 +39,7 @@ func (md *lanMergeDelegate) NotifyMerge(members []*serf.Member) error {
|
|||
nodeID := types.NodeID(rawID)
|
||||
|
||||
// See if there's another node that conflicts with us.
|
||||
if (nodeID == md.nodeID) && (m.Name != md.nodeName) {
|
||||
if (nodeID == md.nodeID) && !strings.EqualFold(m.Name, md.nodeName) {
|
||||
return fmt.Errorf("Member '%s' has conflicting node ID '%s' with this agent's ID",
|
||||
m.Name, nodeID)
|
||||
}
|
||||
|
|
|
@ -58,6 +58,30 @@ func TestMerge_LAN(t *testing.T) {
|
|||
},
|
||||
expect: "wrong datacenter",
|
||||
},
|
||||
"node ID conflict with delegate's ID but same node name with same casing": {
|
||||
members: []*serf.Member{
|
||||
makeTestNode(t, testMember{
|
||||
dc: "dc1",
|
||||
name: "node0",
|
||||
id: thisNodeID,
|
||||
server: true,
|
||||
build: "0.7.5",
|
||||
}),
|
||||
},
|
||||
expect: "",
|
||||
},
|
||||
"node ID conflict with delegate's ID but same node name with different casing": {
|
||||
members: []*serf.Member{
|
||||
makeTestNode(t, testMember{
|
||||
dc: "dc1",
|
||||
name: "NoDe0",
|
||||
id: thisNodeID,
|
||||
server: true,
|
||||
build: "0.7.5",
|
||||
}),
|
||||
},
|
||||
expect: "",
|
||||
},
|
||||
"node ID conflict with delegate's ID": {
|
||||
members: []*serf.Member{
|
||||
makeTestNode(t, testMember{
|
||||
|
|
|
@ -2,11 +2,9 @@ package consul
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
autopilot "github.com/hashicorp/raft-autopilot"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
|
@ -24,8 +22,9 @@ func (op *Operator) AutopilotGetConfiguration(args *structs.DCSpecificRequest, r
|
|||
if err := op.srv.validateEnterpriseToken(authz.Identity()); err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorRead(nil) != acl.Allow {
|
||||
return acl.PermissionDeniedByACLUnnamed(authz, nil, acl.ResourceOperator, acl.AccessRead)
|
||||
|
||||
if err := authz.ToAllowAuthorizer().OperatorReadAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state := op.srv.fsm.State()
|
||||
|
@ -56,8 +55,9 @@ func (op *Operator) AutopilotSetConfiguration(args *structs.AutopilotSetConfigRe
|
|||
if err := op.srv.validateEnterpriseToken(authz.Identity()); err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorWrite(nil) != acl.Allow {
|
||||
return acl.PermissionDeniedByACLUnnamed(authz, nil, acl.ResourceOperator, acl.AccessWrite)
|
||||
|
||||
if err := authz.ToAllowAuthorizer().OperatorWriteAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Apply the update
|
||||
|
@ -91,8 +91,9 @@ func (op *Operator) ServerHealth(args *structs.DCSpecificRequest, reply *structs
|
|||
if err := op.srv.validateEnterpriseToken(authz.Identity()); err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorRead(nil) != acl.Allow {
|
||||
return acl.PermissionDeniedByACLUnnamed(authz, nil, acl.ResourceOperator, acl.AccessRead)
|
||||
|
||||
if err := authz.ToAllowAuthorizer().OperatorReadAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state := op.srv.autopilot.GetState()
|
||||
|
@ -158,8 +159,9 @@ func (op *Operator) AutopilotState(args *structs.DCSpecificRequest, reply *autop
|
|||
if err := op.srv.validateEnterpriseToken(authz.Identity()); err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorRead(nil) != acl.Allow {
|
||||
return acl.PermissionDeniedByACLUnnamed(authz, nil, acl.ResourceOperator, acl.AccessRead)
|
||||
|
||||
if err := authz.ToAllowAuthorizer().OperatorReadAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state := op.srv.autopilot.GetState()
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"github.com/hashicorp/raft"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/metadata"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
@ -23,8 +22,8 @@ func (op *Operator) RaftGetConfiguration(args *structs.DCSpecificRequest, reply
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorRead(nil) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().OperatorReadAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We can't fetch the leader and the configuration atomically with
|
||||
|
@ -88,8 +87,8 @@ func (op *Operator) RaftRemovePeerByAddress(args *structs.RaftRemovePeerRequest,
|
|||
if err := op.srv.validateEnterpriseToken(authz.Identity()); err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorWrite(nil) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().OperatorWriteAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since this is an operation designed for humans to use, we will return
|
||||
|
@ -141,8 +140,8 @@ func (op *Operator) RaftRemovePeerByID(args *structs.RaftRemovePeerRequest, repl
|
|||
if err := op.srv.validateEnterpriseToken(authz.Identity()); err != nil {
|
||||
return err
|
||||
}
|
||||
if authz.OperatorWrite(nil) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().OperatorWriteAllowed(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since this is an operation designed for humans to use, we will return
|
||||
|
|
|
@ -86,9 +86,9 @@ func (p *PreparedQuery) Apply(args *structs.PreparedQueryRequest, reply *string)
|
|||
// need to make sure they have write access for whatever they are
|
||||
// proposing.
|
||||
if prefix, ok := args.Query.GetACLPrefix(); ok {
|
||||
if authz.PreparedQueryWrite(prefix, nil) != acl.Allow {
|
||||
if err := authz.ToAllowAuthorizer().PreparedQueryWriteAllowed(prefix, nil); err != nil {
|
||||
p.logger.Warn("Operation on prepared query denied due to ACLs", "query", args.Query.ID)
|
||||
return acl.ErrPermissionDenied
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -106,9 +106,9 @@ func (p *PreparedQuery) Apply(args *structs.PreparedQueryRequest, reply *string)
|
|||
}
|
||||
|
||||
if prefix, ok := query.GetACLPrefix(); ok {
|
||||
if authz.PreparedQueryWrite(prefix, nil) != acl.Allow {
|
||||
if err := authz.ToAllowAuthorizer().PreparedQueryWriteAllowed(prefix, nil); err != nil {
|
||||
p.logger.Warn("Operation on prepared query denied due to ACLs", "query", args.Query.ID)
|
||||
return acl.ErrPermissionDenied
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -439,7 +439,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
|
|||
// position 0, provided the results are from the same datacenter.
|
||||
if qs.Node != "" && reply.Datacenter == qs.Datacenter {
|
||||
for i, node := range reply.Nodes {
|
||||
if node.Node.Node == qs.Node {
|
||||
if strings.EqualFold(node.Node.Node, qs.Node) {
|
||||
reply.Nodes[0], reply.Nodes[i] = reply.Nodes[i], reply.Nodes[0]
|
||||
break
|
||||
}
|
||||
|
|
|
@ -954,6 +954,19 @@ type blockingQueryResponseMeta interface {
|
|||
// a previous result. errNotFound will never be returned to the caller, it is
|
||||
// converted to nil before returning.
|
||||
//
|
||||
// The query function can return errNotChanged, which is a sentinel error. This
|
||||
// can only be returned on calls AFTER the first call, as it would not be
|
||||
// possible to detect the absence of a change on the first call. Returning
|
||||
// errNotChanged indicates that the query results are identical to the prior
|
||||
// results which allows blockingQuery to keep blocking until the query returns
|
||||
// a real changed result.
|
||||
//
|
||||
// The query function must take care to ensure the actual result of the query
|
||||
// is either left unmodified or explicitly left in a good state before
|
||||
// returning, otherwise when blockingQuery times out it may return an
|
||||
// incomplete or unexpected result. errNotChanged will never be returned to the
|
||||
// caller, it is converted to nil before returning.
|
||||
//
|
||||
// If query function returns any other error, the error is returned to the caller
|
||||
// immediately.
|
||||
//
|
||||
|
@ -993,7 +1006,7 @@ func (s *Server) blockingQuery(
|
|||
var ws memdb.WatchSet
|
||||
err := query(ws, s.fsm.State())
|
||||
s.setQueryMeta(responseMeta, opts.GetToken())
|
||||
if errors.Is(err, errNotFound) {
|
||||
if errors.Is(err, errNotFound) || errors.Is(err, errNotChanged) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
@ -1008,7 +1021,10 @@ func (s *Server) blockingQuery(
|
|||
// decrement the count when the function returns.
|
||||
defer atomic.AddUint64(&s.queriesBlocking, ^uint64(0))
|
||||
|
||||
var notFound bool
|
||||
var (
|
||||
notFound bool
|
||||
ranOnce bool
|
||||
)
|
||||
|
||||
for {
|
||||
if opts.GetRequireConsistent() {
|
||||
|
@ -1029,17 +1045,23 @@ func (s *Server) blockingQuery(
|
|||
|
||||
err := query(ws, state)
|
||||
s.setQueryMeta(responseMeta, opts.GetToken())
|
||||
|
||||
switch {
|
||||
case errors.Is(err, errNotFound):
|
||||
if notFound {
|
||||
// query result has not changed
|
||||
minQueryIndex = responseMeta.GetIndex()
|
||||
}
|
||||
|
||||
notFound = true
|
||||
case errors.Is(err, errNotChanged):
|
||||
if ranOnce {
|
||||
// query result has not changed
|
||||
minQueryIndex = responseMeta.GetIndex()
|
||||
}
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
ranOnce = true
|
||||
|
||||
if responseMeta.GetIndex() > minQueryIndex {
|
||||
return nil
|
||||
|
@ -1060,7 +1082,10 @@ func (s *Server) blockingQuery(
|
|||
}
|
||||
}
|
||||
|
||||
var errNotFound = fmt.Errorf("no data found for query")
|
||||
var (
|
||||
errNotFound = fmt.Errorf("no data found for query")
|
||||
errNotChanged = fmt.Errorf("data did not change for query")
|
||||
)
|
||||
|
||||
// setQueryMeta is used to populate the QueryMeta data for an RPC call
|
||||
//
|
||||
|
|
|
@ -1681,3 +1681,104 @@ func getFirstSubscribeEventOrError(conn *grpc.ClientConn, req *pbsubscribe.Subsc
|
|||
}
|
||||
return event, nil
|
||||
}
|
||||
|
||||
// channelCallRPC lets you execute an RPC async. Helpful in some
|
||||
// tests.
|
||||
func channelCallRPC(
|
||||
srv *Server,
|
||||
method string,
|
||||
args interface{},
|
||||
resp interface{},
|
||||
responseInterceptor func() error,
|
||||
) <-chan error {
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
codec, err := rpcClientNoClose(srv)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
defer codec.Close()
|
||||
|
||||
err = msgpackrpc.CallWithCodec(codec, method, args, resp)
|
||||
if err == nil && responseInterceptor != nil {
|
||||
err = responseInterceptor()
|
||||
}
|
||||
errCh <- err
|
||||
}()
|
||||
return errCh
|
||||
}
|
||||
|
||||
// rpcBlockingQueryTestHarness is specifically meant to test the
|
||||
// errNotFound and errNotChanged mechanisms in blockingQuery()
|
||||
func rpcBlockingQueryTestHarness(
|
||||
t *testing.T,
|
||||
readQueryFn func(minQueryIndex uint64) (*structs.QueryMeta, <-chan error),
|
||||
noisyWriteFn func(i int) <-chan error,
|
||||
) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
launchWriters := func() {
|
||||
defer cancel()
|
||||
|
||||
for i := 0; i < 200; i++ {
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
errCh := noisyWriteFn(i)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
t.Errorf("[%d] unexpected error: %w", i, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
count int
|
||||
minQueryIndex uint64
|
||||
)
|
||||
|
||||
for ctx.Err() == nil {
|
||||
// The first iteration is an orientation iteration, as we don't pass an
|
||||
// index value so there is no actual blocking that will happen.
|
||||
//
|
||||
// Since the data is not changing, we don't expect the second iteration
|
||||
// to return soon, so we wait a bit after kicking it off before
|
||||
// launching the write-storm.
|
||||
var timerCh <-chan time.Time
|
||||
if count == 1 {
|
||||
timerCh = time.After(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
qm, errCh := readQueryFn(minQueryIndex)
|
||||
|
||||
RESUME:
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Log("blocking query index", qm.Index)
|
||||
count++
|
||||
minQueryIndex = qm.Index
|
||||
|
||||
case <-timerCh:
|
||||
timerCh = nil
|
||||
go launchWriters()
|
||||
goto RESUME
|
||||
|
||||
case <-ctx.Done():
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, 1, count, "if this fails, then the timer likely needs to be increased above")
|
||||
}
|
||||
|
|
|
@ -822,7 +822,7 @@ func (s *Server) setupRaft() error {
|
|||
|
||||
// If we are in bootstrap or dev mode and the state is clean then we can
|
||||
// bootstrap now.
|
||||
if s.config.Bootstrap || s.config.DevMode {
|
||||
if (s.config.Bootstrap || s.config.DevMode) && !s.config.ReadReplica {
|
||||
hasState, err := raft.HasExistingState(log, stable, snap)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -5,6 +5,7 @@ package consul
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
|
@ -138,10 +139,11 @@ func (s *Server) reconcile() (err error) {
|
|||
members := s.serfLAN.Members()
|
||||
knownMembers := make(map[string]struct{})
|
||||
for _, member := range members {
|
||||
memberName := strings.ToLower(member.Name)
|
||||
if err := s.reconcileMember(member); err != nil {
|
||||
return err
|
||||
}
|
||||
knownMembers[member.Name] = struct{}{}
|
||||
knownMembers[memberName] = struct{}{}
|
||||
}
|
||||
|
||||
// Reconcile any members that have been reaped while we were not the
|
||||
|
|
|
@ -386,6 +386,11 @@ func (s *Server) maybeBootstrap() {
|
|||
return
|
||||
}
|
||||
|
||||
if s.config.ReadReplica {
|
||||
s.logger.Info("Read replicas cannot bootstrap raft")
|
||||
return
|
||||
}
|
||||
|
||||
// Scan for all the known servers.
|
||||
members := s.serfLAN.Members()
|
||||
var servers []metadata.Server
|
||||
|
|
|
@ -82,13 +82,13 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error {
|
|||
if existing == nil {
|
||||
return nil
|
||||
}
|
||||
if authz.SessionWrite(existing.Node, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().SessionWriteAllowed(existing.Node, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case structs.SessionCreate:
|
||||
if authz.SessionWrite(args.Session.Node, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().SessionWriteAllowed(args.Session.Node, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
|
@ -303,8 +303,8 @@ func (s *Session) Renew(args *structs.SessionSpecificRequest,
|
|||
return nil
|
||||
}
|
||||
|
||||
if authz.SessionWrite(session.Node, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().SessionWriteAllowed(session.Node, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reset the session TTL timer.
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul-net-rpc/go-msgpack/codec"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/pool"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/snapshot"
|
||||
|
@ -62,8 +61,8 @@ func (s *Server) dispatchSnapshotRequest(args *structs.SnapshotRequest, in io.Re
|
|||
// all the ACLs and you could escalate from there.
|
||||
if authz, err := s.ResolveToken(args.Token); err != nil {
|
||||
return nil, err
|
||||
} else if authz.Snapshot(nil) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
} else if err := authz.ToAllowAuthorizer().SnapshotAllowed(nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Dispatch the operation.
|
||||
|
|
|
@ -136,7 +136,7 @@ func (s *Store) ensureCheckIfNodeMatches(
|
|||
nodePartition string,
|
||||
check *structs.HealthCheck,
|
||||
) error {
|
||||
if check.Node != node || !structs.EqualPartitions(nodePartition, check.PartitionOrDefault()) {
|
||||
if !strings.EqualFold(check.Node, node) || !structs.EqualPartitions(nodePartition, check.PartitionOrDefault()) {
|
||||
return fmt.Errorf("check node %q does not match node %q",
|
||||
printNodeName(check.Node, check.PartitionOrDefault()),
|
||||
printNodeName(node, nodePartition),
|
||||
|
@ -330,7 +330,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod
|
|||
}
|
||||
if existing != nil {
|
||||
n = existing
|
||||
if n.Node != node.Node {
|
||||
if !strings.EqualFold(n.Node, node.Node) {
|
||||
// Lets first get all nodes and check whether name do match, we do not allow clash on nodes without ID
|
||||
dupNameError := ensureNoNodeWithSimilarNameTxn(tx, node, false)
|
||||
if dupNameError != nil {
|
||||
|
|
|
@ -105,7 +105,7 @@ type nodeServiceTuple struct {
|
|||
|
||||
func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTuple {
|
||||
return nodeServiceTuple{
|
||||
Node: sn.Node,
|
||||
Node: strings.ToLower(sn.Node),
|
||||
ServiceID: sn.ServiceID,
|
||||
EntMeta: sn.EnterpriseMeta,
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTupl
|
|||
|
||||
func newNodeServiceTupleFromServiceHealthCheck(hc *structs.HealthCheck) nodeServiceTuple {
|
||||
return nodeServiceTuple{
|
||||
Node: hc.Node,
|
||||
Node: strings.ToLower(hc.Node),
|
||||
ServiceID: hc.ServiceID,
|
||||
EntMeta: hc.EnterpriseMeta,
|
||||
}
|
||||
|
|
|
@ -3,22 +3,29 @@
|
|||
|
||||
package state
|
||||
|
||||
import "github.com/hashicorp/consul/agent/structs"
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func (nst nodeServiceTuple) nodeTuple() nodeTuple {
|
||||
return nodeTuple{Node: nst.Node, Partition: ""}
|
||||
return nodeTuple{
|
||||
Node: strings.ToLower(nst.Node),
|
||||
Partition: "",
|
||||
}
|
||||
}
|
||||
|
||||
func newNodeTupleFromNode(node *structs.Node) nodeTuple {
|
||||
return nodeTuple{
|
||||
Node: node.Node,
|
||||
Node: strings.ToLower(node.Node),
|
||||
Partition: "",
|
||||
}
|
||||
}
|
||||
|
||||
func newNodeTupleFromHealthCheck(hc *structs.HealthCheck) nodeTuple {
|
||||
return nodeTuple{
|
||||
Node: hc.Node,
|
||||
Node: strings.ToLower(hc.Node),
|
||||
Partition: "",
|
||||
}
|
||||
}
|
||||
|
|
|
@ -471,8 +471,11 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
|
|||
}
|
||||
|
||||
// Add in a top-level check.
|
||||
//
|
||||
// Verify that node name references in checks are case-insensitive during
|
||||
// restore.
|
||||
req.Check = &structs.HealthCheck{
|
||||
Node: nodeName,
|
||||
Node: strings.ToUpper(nodeName),
|
||||
CheckID: "check1",
|
||||
Name: "check",
|
||||
RaftIndex: structs.RaftIndex{
|
||||
|
@ -499,7 +502,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
|
|||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
c := out[0]
|
||||
if c.Node != nodeName || c.CheckID != "check1" || c.Name != "check" ||
|
||||
if c.Node != strings.ToUpper(nodeName) || c.CheckID != "check1" || c.Name != "check" ||
|
||||
c.CreateIndex != 3 || c.ModifyIndex != 3 {
|
||||
t.Fatalf("bad check returned: %#v", c)
|
||||
}
|
||||
|
@ -545,7 +548,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
|
|||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
c1 := out[0]
|
||||
if c1.Node != nodeName || c1.CheckID != "check1" || c1.Name != "check" ||
|
||||
if c1.Node != strings.ToUpper(nodeName) || c1.CheckID != "check1" || c1.Name != "check" ||
|
||||
c1.CreateIndex != 3 || c1.ModifyIndex != 3 {
|
||||
t.Fatalf("bad check returned, should not be modified: %#v", c1)
|
||||
}
|
||||
|
|
|
@ -431,7 +431,7 @@ func (s *Store) discoveryChainTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, serv
|
|||
EvaluateInPartition: source.PartitionOrDefault(),
|
||||
EvaluateInDatacenter: dc,
|
||||
}
|
||||
idx, chain, err := s.serviceDiscoveryChainTxn(tx, ws, source.Name, entMeta, req)
|
||||
idx, chain, _, err := s.serviceDiscoveryChainTxn(tx, ws, source.Name, entMeta, req)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to fetch discovery chain for %q: %v", source.String(), err)
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ func (s *Store) discoveryChainSourcesTxn(tx ReadTxn, ws memdb.WatchSet, dc strin
|
|||
EvaluateInPartition: sn.PartitionOrDefault(),
|
||||
EvaluateInDatacenter: dc,
|
||||
}
|
||||
idx, chain, err := s.serviceDiscoveryChainTxn(tx, ws, sn.Name, &sn.EnterpriseMeta, req)
|
||||
idx, chain, _, err := s.serviceDiscoveryChainTxn(tx, ws, sn.Name, &sn.EnterpriseMeta, req)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to fetch discovery chain for %q: %v", sn.String(), err)
|
||||
}
|
||||
|
@ -772,7 +772,7 @@ func (s *Store) ServiceDiscoveryChain(
|
|||
serviceName string,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
req discoverychain.CompileRequest,
|
||||
) (uint64, *structs.CompiledDiscoveryChain, error) {
|
||||
) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error) {
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
|
@ -785,19 +785,19 @@ func (s *Store) serviceDiscoveryChainTxn(
|
|||
serviceName string,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
req discoverychain.CompileRequest,
|
||||
) (uint64, *structs.CompiledDiscoveryChain, error) {
|
||||
) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error) {
|
||||
|
||||
index, entries, err := readDiscoveryChainConfigEntriesTxn(tx, ws, serviceName, nil, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
return 0, nil, nil, err
|
||||
}
|
||||
req.Entries = entries
|
||||
|
||||
_, config, err := s.CAConfig(ws)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
return 0, nil, nil, err
|
||||
} else if config == nil {
|
||||
return 0, nil, errors.New("no cluster ca config setup")
|
||||
return 0, nil, nil, errors.New("no cluster ca config setup")
|
||||
}
|
||||
|
||||
// Build TrustDomain based on the ClusterID stored.
|
||||
|
@ -805,17 +805,131 @@ func (s *Store) serviceDiscoveryChainTxn(
|
|||
if signingID == nil {
|
||||
// If CA is bootstrapped at all then this should never happen but be
|
||||
// defensive.
|
||||
return 0, nil, errors.New("no cluster trust domain setup")
|
||||
return 0, nil, nil, errors.New("no cluster trust domain setup")
|
||||
}
|
||||
req.EvaluateInTrustDomain = signingID.Host()
|
||||
|
||||
// Then we compile it into something useful.
|
||||
chain, err := discoverychain.Compile(req)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to compile discovery chain: %v", err)
|
||||
return 0, nil, nil, fmt.Errorf("failed to compile discovery chain: %v", err)
|
||||
}
|
||||
|
||||
return index, chain, nil
|
||||
return index, chain, entries, nil
|
||||
}
|
||||
|
||||
func (s *Store) ReadResolvedServiceConfigEntries(
|
||||
ws memdb.WatchSet,
|
||||
serviceName string,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
upstreamIDs []structs.ServiceID,
|
||||
proxyMode structs.ProxyMode,
|
||||
) (uint64, *configentry.ResolvedServiceConfigSet, error) {
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
var res configentry.ResolvedServiceConfigSet
|
||||
|
||||
// The caller will likely calculate this again, but we need to do it here
|
||||
// to determine if we are going to traverse into implicit upstream
|
||||
// definitions.
|
||||
var inferredProxyMode structs.ProxyMode
|
||||
|
||||
index, proxyEntry, err := configEntryTxn(tx, ws, structs.ProxyDefaults, structs.ProxyConfigGlobal, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
maxIndex := index
|
||||
|
||||
if proxyEntry != nil {
|
||||
var ok bool
|
||||
proxyConf, ok := proxyEntry.(*structs.ProxyConfigEntry)
|
||||
if !ok {
|
||||
return 0, nil, fmt.Errorf("invalid proxy config type %T", proxyEntry)
|
||||
}
|
||||
res.AddProxyDefaults(proxyConf)
|
||||
|
||||
inferredProxyMode = proxyConf.Mode
|
||||
}
|
||||
|
||||
index, serviceEntry, err := configEntryTxn(tx, ws, structs.ServiceDefaults, serviceName, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
if index > maxIndex {
|
||||
maxIndex = index
|
||||
}
|
||||
|
||||
var serviceConf *structs.ServiceConfigEntry
|
||||
if serviceEntry != nil {
|
||||
var ok bool
|
||||
serviceConf, ok = serviceEntry.(*structs.ServiceConfigEntry)
|
||||
if !ok {
|
||||
return 0, nil, fmt.Errorf("invalid service config type %T", serviceEntry)
|
||||
}
|
||||
res.AddServiceDefaults(serviceConf)
|
||||
|
||||
if serviceConf.Mode != structs.ProxyModeDefault {
|
||||
inferredProxyMode = serviceConf.Mode
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
noUpstreamArgs = len(upstreamIDs) == 0
|
||||
|
||||
// Check the args and the resolved value. If it was exclusively set via a config entry, then proxyMode
|
||||
// will never be transparent because the service config request does not use the resolved value.
|
||||
tproxy = proxyMode == structs.ProxyModeTransparent || inferredProxyMode == structs.ProxyModeTransparent
|
||||
)
|
||||
|
||||
// The upstreams passed as arguments to this endpoint are the upstreams explicitly defined in a proxy registration.
|
||||
// If no upstreams were passed, then we should only return the resolved config if the proxy is in transparent mode.
|
||||
// Otherwise we would return a resolved upstream config to a proxy with no configured upstreams.
|
||||
if noUpstreamArgs && !tproxy {
|
||||
return maxIndex, &res, nil
|
||||
}
|
||||
|
||||
// First collect all upstreams into a set of seen upstreams.
|
||||
// Upstreams can come from:
|
||||
// - Explicitly from proxy registrations, and therefore as an argument to this RPC endpoint
|
||||
// - Implicitly from centralized upstream config in service-defaults
|
||||
seenUpstreams := map[structs.ServiceID]struct{}{}
|
||||
|
||||
for _, sid := range upstreamIDs {
|
||||
if _, ok := seenUpstreams[sid]; !ok {
|
||||
seenUpstreams[sid] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if serviceConf != nil && serviceConf.UpstreamConfig != nil {
|
||||
for _, override := range serviceConf.UpstreamConfig.Overrides {
|
||||
if override.Name == "" {
|
||||
continue // skip this impossible condition
|
||||
}
|
||||
seenUpstreams[override.ServiceID()] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for upstream := range seenUpstreams {
|
||||
index, rawEntry, err := configEntryTxn(tx, ws, structs.ServiceDefaults, upstream.ID, &upstream.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if index > maxIndex {
|
||||
maxIndex = index
|
||||
}
|
||||
|
||||
if rawEntry != nil {
|
||||
entry, ok := rawEntry.(*structs.ServiceConfigEntry)
|
||||
if !ok {
|
||||
return 0, nil, fmt.Errorf("invalid service config type %T", rawEntry)
|
||||
}
|
||||
res.AddServiceDefaults(entry)
|
||||
}
|
||||
}
|
||||
|
||||
return maxIndex, &res, nil
|
||||
}
|
||||
|
||||
// ReadDiscoveryChainConfigEntries will query for the full discovery chain for
|
||||
|
|
|
@ -17,17 +17,25 @@ import (
|
|||
)
|
||||
|
||||
func rpcClient(t *testing.T, s *Server) rpc.ClientCodec {
|
||||
codec, err := rpcClientNoClose(s)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { codec.Close() })
|
||||
return codec
|
||||
}
|
||||
|
||||
func rpcClientNoClose(s *Server) (rpc.ClientCodec, error) {
|
||||
addr := s.config.RPCAdvertise
|
||||
conn, err := net.DialTimeout("tcp", addr.String(), time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Write the Consul RPC byte to set the mode
|
||||
conn.Write([]byte{byte(pool.RPCConsul)})
|
||||
codec := msgpackrpc.NewCodecFromHandle(true, true, conn, structs.MsgpackHandle)
|
||||
t.Cleanup(func() { codec.Close() })
|
||||
return codec
|
||||
return codec, nil
|
||||
}
|
||||
|
||||
func insecureRPCClient(s *Server, c tlsutil.Config) (rpc.ClientCodec, error) {
|
||||
|
|
|
@ -113,8 +113,8 @@ func vetNodeTxnOp(op *structs.TxnNodeOp, authz acl.Authorizer) error {
|
|||
var authzContext acl.AuthorizerContext
|
||||
op.FillAuthzContext(&authzContext)
|
||||
|
||||
if authz.NodeWrite(op.Node.Node, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(op.Node.Node, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -126,13 +126,13 @@ func vetCheckTxnOp(op *structs.TxnCheckOp, authz acl.Authorizer) error {
|
|||
|
||||
if op.Check.ServiceID == "" {
|
||||
// Node-level check.
|
||||
if authz.NodeWrite(op.Check.Node, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().NodeWriteAllowed(op.Check.Node, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Service-level check.
|
||||
if authz.ServiceWrite(op.Check.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.ErrPermissionDenied
|
||||
if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(op.Check.ServiceName, &authzContext); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -554,57 +554,64 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
}
|
||||
|
||||
// Verify the transaction's return value.
|
||||
var expected structs.TxnResponse
|
||||
var outPos int
|
||||
for i, op := range arg.Ops {
|
||||
err := out.Errors[outPos]
|
||||
switch {
|
||||
case op.KV != nil:
|
||||
switch op.KV.Verb {
|
||||
case api.KVGet, api.KVGetTree:
|
||||
// These get filtered but won't result in an error.
|
||||
|
||||
case api.KVSet, api.KVDelete, api.KVDeleteCAS, api.KVDeleteTree, api.KVCAS, api.KVLock, api.KVUnlock, api.KVCheckNotExists:
|
||||
require.Equal(t, err.OpIndex, i)
|
||||
acl.RequirePermissionDeniedMessage(t, err.What, nil, nil, acl.ResourceKey, acl.AccessWrite, "nope")
|
||||
outPos++
|
||||
default:
|
||||
expected.Errors = append(expected.Errors, &structs.TxnError{
|
||||
OpIndex: i,
|
||||
What: acl.ErrPermissionDenied.Error(),
|
||||
})
|
||||
require.Equal(t, err.OpIndex, i)
|
||||
acl.RequirePermissionDeniedMessage(t, err.What, nil, nil, acl.ResourceKey, acl.AccessRead, "nope")
|
||||
outPos++
|
||||
}
|
||||
case op.Node != nil:
|
||||
switch op.Node.Verb {
|
||||
case api.NodeGet:
|
||||
// These get filtered but won't result in an error.
|
||||
|
||||
case api.NodeSet, api.NodeDelete, api.NodeDeleteCAS, api.NodeCAS:
|
||||
require.Equal(t, err.OpIndex, i)
|
||||
acl.RequirePermissionDeniedMessage(t, err.What, nil, nil, acl.ResourceNode, acl.AccessWrite, "nope")
|
||||
outPos++
|
||||
default:
|
||||
expected.Errors = append(expected.Errors, &structs.TxnError{
|
||||
OpIndex: i,
|
||||
What: acl.ErrPermissionDenied.Error(),
|
||||
})
|
||||
require.Equal(t, err.OpIndex, i)
|
||||
acl.RequirePermissionDeniedMessage(t, err.What, nil, nil, acl.ResourceNode, acl.AccessRead, "nope")
|
||||
outPos++
|
||||
}
|
||||
case op.Service != nil:
|
||||
switch op.Service.Verb {
|
||||
case api.ServiceGet:
|
||||
// These get filtered but won't result in an error.
|
||||
|
||||
case api.ServiceSet, api.ServiceCAS, api.ServiceDelete, api.ServiceDeleteCAS:
|
||||
require.Equal(t, err.OpIndex, i)
|
||||
acl.RequirePermissionDeniedMessage(t, err.What, nil, nil, acl.ResourceService, acl.AccessWrite, "nope")
|
||||
outPos++
|
||||
default:
|
||||
expected.Errors = append(expected.Errors, &structs.TxnError{
|
||||
OpIndex: i,
|
||||
What: acl.ErrPermissionDenied.Error(),
|
||||
})
|
||||
require.Equal(t, err.OpIndex, i)
|
||||
acl.RequirePermissionDeniedMessage(t, err.What, nil, nil, acl.ResourceService, acl.AccessRead, "nope")
|
||||
outPos++
|
||||
}
|
||||
case op.Check != nil:
|
||||
switch op.Check.Verb {
|
||||
case api.CheckGet:
|
||||
// These get filtered but won't result in an error.
|
||||
|
||||
case api.CheckSet, api.CheckCAS, api.CheckDelete, api.CheckDeleteCAS:
|
||||
require.Equal(t, err.OpIndex, i)
|
||||
acl.RequirePermissionDeniedMessage(t, err.What, nil, nil, acl.ResourceNode, acl.AccessWrite, "nope")
|
||||
outPos++
|
||||
default:
|
||||
expected.Errors = append(expected.Errors, &structs.TxnError{
|
||||
OpIndex: i,
|
||||
What: acl.ErrPermissionDenied.Error(),
|
||||
})
|
||||
require.Equal(t, err.OpIndex, i)
|
||||
acl.RequirePermissionDeniedMessage(t, err.What, nil, nil, acl.ResourceNode, acl.AccessRead, "nope")
|
||||
outPos++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, expected, out)
|
||||
}
|
||||
|
||||
func TestTxn_Apply_LockDelay(t *testing.T) {
|
||||
|
@ -927,10 +934,9 @@ func TestTxn_Read_ACLDeny(t *testing.T) {
|
|||
var out structs.TxnReadResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "Txn.Read", &arg, &out)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, structs.TxnErrors{
|
||||
{OpIndex: 0, What: acl.ErrPermissionDenied.Error()},
|
||||
{OpIndex: 1, What: acl.ErrPermissionDenied.Error()},
|
||||
}, out.Errors)
|
||||
acl.RequirePermissionDeniedMessage(t, out.Errors[0].What, nil, nil, acl.ResourceKey, acl.AccessRead, "nope")
|
||||
acl.RequirePermissionDeniedMessage(t, out.Errors[1].What, nil, nil, acl.ResourceKey, acl.AccessRead, "nope")
|
||||
|
||||
require.Empty(t, out.Results)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package consul
|
|||
import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
@ -161,7 +162,7 @@ func (c *Client) CheckServers(datacenter string, fn func(*metadata.Server) bool)
|
|||
|
||||
func isSerfMember(s *serf.Serf, nodeName string) bool {
|
||||
for _, m := range s.Members() {
|
||||
if m.Name == nodeName {
|
||||
if strings.EqualFold(m.Name, nodeName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -273,6 +273,7 @@ func (s *HTTPHandlers) handler(enableDebug bool) http.Handler {
|
|||
// If the token provided does not have the necessary permissions,
|
||||
// write a forbidden response
|
||||
// TODO(partitions): should this be possible in a partition?
|
||||
// TODO(acl-error-enhancements): We should return error details somehow here.
|
||||
if authz.OperatorRead(nil) != acl.Allow {
|
||||
resp.WriteHeader(http.StatusForbidden)
|
||||
return
|
||||
|
|
|
@ -233,9 +233,6 @@ func decodeStringKey(key string) ([]byte, error) {
|
|||
func (a *Agent) keyringProcess(args *structs.KeyringRequest) (*structs.KeyringResponses, error) {
|
||||
var reply structs.KeyringResponses
|
||||
|
||||
if _, ok := a.delegate.(*consul.Server); !ok {
|
||||
return nil, fmt.Errorf("keyring operations must run against a server node")
|
||||
}
|
||||
if err := a.RPC("Internal.KeyringOperation", args, &reply); err != nil {
|
||||
return &reply, err
|
||||
}
|
||||
|
|
|
@ -152,10 +152,26 @@ func newState(ns *structs.NodeService, token string, config stateConfig) (*state
|
|||
return nil, err
|
||||
}
|
||||
|
||||
handler, err := newKindHandler(config, s, ch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &state{
|
||||
logger: config.logger.With("proxy", s.proxyID, "kind", s.kind),
|
||||
serviceInstance: s,
|
||||
handler: handler,
|
||||
ch: ch,
|
||||
snapCh: make(chan ConfigSnapshot, 1),
|
||||
reqCh: make(chan chan *ConfigSnapshot, 1),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newKindHandler(config stateConfig, s serviceInstance, ch chan cache.UpdateEvent) (kindHandler, error) {
|
||||
var handler kindHandler
|
||||
h := handlerState{stateConfig: config, serviceInstance: s, ch: ch}
|
||||
|
||||
switch ns.Kind {
|
||||
switch s.kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
handler = &handlerConnectProxy{handlerState: h}
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
|
@ -170,14 +186,7 @@ func newState(ns *structs.NodeService, token string, config stateConfig) (*state
|
|||
return nil, errors.New("not a connect-proxy, terminating-gateway, mesh-gateway, or ingress-gateway")
|
||||
}
|
||||
|
||||
return &state{
|
||||
logger: config.logger.With("proxy", s.proxyID, "kind", s.kind),
|
||||
serviceInstance: s,
|
||||
handler: handler,
|
||||
ch: ch,
|
||||
snapCh: make(chan ConfigSnapshot, 1),
|
||||
reqCh: make(chan chan *ConfigSnapshot, 1),
|
||||
}, nil
|
||||
return handler, nil
|
||||
}
|
||||
|
||||
func newServiceInstanceFromNodeService(ns *structs.NodeService, token string) (serviceInstance, error) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,271 @@
|
|||
package proxycfg
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/consul/discoverychain"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// TestConfigSnapshot returns a fully populated snapshot
|
||||
func TestConfigSnapshot(t testing.T, nsFn func(ns *structs.NodeService), extraUpdates []cache.UpdateEvent) *ConfigSnapshot {
|
||||
roots, leaf := TestCerts(t)
|
||||
|
||||
// no entries implies we'll get a default chain
|
||||
dbChain := discoverychain.TestCompileConfigEntries(t, "db", "default", "default", "dc1", connect.TestClusterID+".consul", nil)
|
||||
assert.True(t, dbChain.IsDefault())
|
||||
|
||||
var (
|
||||
upstreams = structs.TestUpstreams(t)
|
||||
dbUpstream = upstreams[0]
|
||||
geoUpstream = upstreams[1]
|
||||
|
||||
dbUID = NewUpstreamID(&dbUpstream)
|
||||
geoUID = NewUpstreamID(&geoUpstream)
|
||||
|
||||
webSN = structs.ServiceIDString("web", nil)
|
||||
)
|
||||
|
||||
baseEvents := []cache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: rootsWatchID,
|
||||
Result: roots,
|
||||
},
|
||||
{
|
||||
CorrelationID: leafWatchID,
|
||||
Result: leaf,
|
||||
},
|
||||
{
|
||||
CorrelationID: intentionsWatchID,
|
||||
Result: &structs.IndexedIntentionMatches{
|
||||
Matches: []structs.Intentions{
|
||||
nil, // no intentions defined
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: svcChecksWatchIDPrefix + webSN,
|
||||
Result: []structs.CheckType{},
|
||||
},
|
||||
{
|
||||
CorrelationID: "upstream:" + geoUID.String(),
|
||||
Result: &structs.PreparedQueryExecuteResponse{
|
||||
Nodes: TestPreparedQueryNodes(t, "geo-cache"),
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: "discovery-chain:" + dbUID.String(),
|
||||
Result: &structs.DiscoveryChainResponse{
|
||||
Chain: dbChain,
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: "upstream-target:" + dbChain.ID() + ":" + dbUID.String(),
|
||||
Result: &structs.IndexedCheckServiceNodes{
|
||||
Nodes: TestUpstreamNodes(t, "db"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return testConfigSnapshotFixture(t, &structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Service: "web-sidecar-proxy",
|
||||
Port: 9999,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceID: "web",
|
||||
DestinationServiceName: "web",
|
||||
LocalServiceAddress: "127.0.0.1",
|
||||
LocalServicePort: 8080,
|
||||
Config: map[string]interface{}{
|
||||
"foo": "bar",
|
||||
},
|
||||
Upstreams: upstreams,
|
||||
},
|
||||
Meta: nil,
|
||||
TaggedAddresses: nil,
|
||||
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
|
||||
}
|
||||
|
||||
// TestConfigSnapshotDiscoveryChain returns a fully populated snapshot using a discovery chain
|
||||
func TestConfigSnapshotDiscoveryChain(
|
||||
t testing.T,
|
||||
variation string,
|
||||
nsFn func(ns *structs.NodeService),
|
||||
extraUpdates []cache.UpdateEvent,
|
||||
additionalEntries ...structs.ConfigEntry,
|
||||
) *ConfigSnapshot {
|
||||
roots, leaf := TestCerts(t)
|
||||
|
||||
var (
|
||||
upstreams = structs.TestUpstreams(t)
|
||||
geoUpstream = upstreams[1]
|
||||
|
||||
geoUID = NewUpstreamID(&geoUpstream)
|
||||
|
||||
webSN = structs.ServiceIDString("web", nil)
|
||||
)
|
||||
|
||||
baseEvents := testSpliceEvents([]cache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: rootsWatchID,
|
||||
Result: roots,
|
||||
},
|
||||
{
|
||||
CorrelationID: leafWatchID,
|
||||
Result: leaf,
|
||||
},
|
||||
{
|
||||
CorrelationID: intentionsWatchID,
|
||||
Result: &structs.IndexedIntentionMatches{
|
||||
Matches: []structs.Intentions{
|
||||
nil, // no intentions defined
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: svcChecksWatchIDPrefix + webSN,
|
||||
Result: []structs.CheckType{},
|
||||
},
|
||||
{
|
||||
CorrelationID: "upstream:" + geoUID.String(),
|
||||
Result: &structs.PreparedQueryExecuteResponse{
|
||||
Nodes: TestPreparedQueryNodes(t, "geo-cache"),
|
||||
},
|
||||
},
|
||||
}, setupTestVariationConfigEntriesAndSnapshot(
|
||||
t, variation, upstreams, additionalEntries...,
|
||||
))
|
||||
|
||||
return testConfigSnapshotFixture(t, &structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Service: "web-sidecar-proxy",
|
||||
Port: 9999,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceID: "web",
|
||||
DestinationServiceName: "web",
|
||||
LocalServiceAddress: "127.0.0.1",
|
||||
LocalServicePort: 8080,
|
||||
Config: map[string]interface{}{
|
||||
"foo": "bar",
|
||||
},
|
||||
Upstreams: upstreams,
|
||||
},
|
||||
Meta: nil,
|
||||
TaggedAddresses: nil,
|
||||
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
|
||||
}
|
||||
|
||||
func TestConfigSnapshotExposeConfig(t testing.T, nsFn func(ns *structs.NodeService)) *ConfigSnapshot {
|
||||
roots, leaf := TestCerts(t)
|
||||
|
||||
var (
|
||||
webSN = structs.ServiceIDString("web", nil)
|
||||
)
|
||||
|
||||
baseEvents := []cache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: rootsWatchID,
|
||||
Result: roots,
|
||||
},
|
||||
{
|
||||
CorrelationID: leafWatchID, Result: leaf,
|
||||
},
|
||||
{
|
||||
CorrelationID: intentionsWatchID,
|
||||
Result: &structs.IndexedIntentionMatches{
|
||||
Matches: []structs.Intentions{
|
||||
nil, // no intentions defined
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: svcChecksWatchIDPrefix + webSN,
|
||||
Result: []structs.CheckType{},
|
||||
},
|
||||
}
|
||||
|
||||
return testConfigSnapshotFixture(t, &structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Service: "web-sidecar-proxy",
|
||||
Address: "1.2.3.4",
|
||||
Port: 8080,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceID: "web",
|
||||
DestinationServiceName: "web",
|
||||
LocalServicePort: 8080,
|
||||
Expose: structs.ExposeConfig{
|
||||
Checks: false,
|
||||
Paths: []structs.ExposePath{
|
||||
{
|
||||
LocalPathPort: 8080,
|
||||
Path: "/health1",
|
||||
ListenerPort: 21500,
|
||||
},
|
||||
{
|
||||
LocalPathPort: 8080,
|
||||
Path: "/health2",
|
||||
ListenerPort: 21501,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Meta: nil,
|
||||
TaggedAddresses: nil,
|
||||
}, nsFn, nil, baseEvents)
|
||||
}
|
||||
|
||||
func TestConfigSnapshotGRPCExposeHTTP1(t testing.T) *ConfigSnapshot {
|
||||
roots, leaf := TestCerts(t)
|
||||
|
||||
return testConfigSnapshotFixture(t, &structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Service: "grpc-proxy",
|
||||
Address: "1.2.3.4",
|
||||
Port: 8080,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceName: "grpc",
|
||||
DestinationServiceID: "grpc",
|
||||
LocalServicePort: 8080,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "grpc",
|
||||
},
|
||||
Expose: structs.ExposeConfig{
|
||||
Checks: false,
|
||||
Paths: []structs.ExposePath{
|
||||
{
|
||||
LocalPathPort: 8090,
|
||||
Path: "/healthz",
|
||||
ListenerPort: 21500,
|
||||
Protocol: "http",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Meta: nil,
|
||||
TaggedAddresses: nil,
|
||||
}, nil, nil, []cache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: rootsWatchID,
|
||||
Result: roots,
|
||||
},
|
||||
{
|
||||
CorrelationID: leafWatchID,
|
||||
Result: leaf,
|
||||
},
|
||||
{
|
||||
CorrelationID: intentionsWatchID,
|
||||
Result: &structs.IndexedIntentionMatches{
|
||||
Matches: []structs.Intentions{
|
||||
nil, // no intentions defined
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: svcChecksWatchIDPrefix + structs.ServiceIDString("grpc", nil),
|
||||
Result: []structs.CheckType{},
|
||||
},
|
||||
})
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,442 @@
|
|||
package proxycfg
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *structs.NodeService), extraUpdates []cache.UpdateEvent) *ConfigSnapshot {
|
||||
roots, _ := TestCerts(t)
|
||||
|
||||
var (
|
||||
populateServices = true
|
||||
useFederationStates = false
|
||||
deleteCrossDCEntry = false
|
||||
)
|
||||
|
||||
switch variant {
|
||||
case "default":
|
||||
case "federation-states":
|
||||
populateServices = true
|
||||
useFederationStates = true
|
||||
deleteCrossDCEntry = true
|
||||
case "newer-info-in-federation-states":
|
||||
populateServices = true
|
||||
useFederationStates = true
|
||||
deleteCrossDCEntry = false
|
||||
case "older-info-in-federation-states":
|
||||
populateServices = true
|
||||
useFederationStates = true
|
||||
deleteCrossDCEntry = false
|
||||
case "no-services":
|
||||
populateServices = false
|
||||
useFederationStates = false
|
||||
deleteCrossDCEntry = false
|
||||
case "service-subsets":
|
||||
extraUpdates = append(extraUpdates, cache.UpdateEvent{
|
||||
CorrelationID: serviceResolversWatchID,
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "bar",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
case "service-subsets2": // TODO(rb): make this merge with 'service-subsets'
|
||||
extraUpdates = append(extraUpdates, cache.UpdateEvent{
|
||||
CorrelationID: serviceResolversWatchID,
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "bar",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "foo",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
case "default-service-subsets2": // TODO(rb): rename to strip the 2 when the prior is merged with 'service-subsets'
|
||||
extraUpdates = append(extraUpdates, cache.UpdateEvent{
|
||||
CorrelationID: serviceResolversWatchID,
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "bar",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "foo",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
case "ignore-extra-resolvers":
|
||||
extraUpdates = append(extraUpdates, cache.UpdateEvent{
|
||||
CorrelationID: serviceResolversWatchID,
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "bar",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "notfound",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
case "service-timeouts":
|
||||
extraUpdates = append(extraUpdates, cache.UpdateEvent{
|
||||
CorrelationID: serviceResolversWatchID,
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "bar",
|
||||
ConnectTimeout: 10 * time.Second,
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
case "non-hash-lb-injected":
|
||||
extraUpdates = append(extraUpdates, cache.UpdateEvent{
|
||||
CorrelationID: "service-resolvers", // serviceResolversWatchID
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "bar",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
LoadBalancer: &structs.LoadBalancer{
|
||||
Policy: "least_request",
|
||||
LeastRequestConfig: &structs.LeastRequestConfig{
|
||||
ChoiceCount: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
case "hash-lb-ignored":
|
||||
extraUpdates = append(extraUpdates, cache.UpdateEvent{
|
||||
CorrelationID: "service-resolvers", // serviceResolversWatchID
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "bar",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
LoadBalancer: &structs.LoadBalancer{
|
||||
Policy: "ring_hash",
|
||||
RingHashConfig: &structs.RingHashConfig{
|
||||
MinimumRingSize: 20,
|
||||
MaximumRingSize: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
default:
|
||||
t.Fatalf("unknown variant: %s", variant)
|
||||
return nil
|
||||
}
|
||||
|
||||
baseEvents := []cache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: rootsWatchID,
|
||||
Result: roots,
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceListWatchID,
|
||||
Result: &structs.IndexedServiceList{
|
||||
Services: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceResolversWatchID,
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: datacentersWatchID,
|
||||
Result: &[]string{"dc1"},
|
||||
},
|
||||
}
|
||||
|
||||
if populateServices || useFederationStates {
|
||||
baseEvents = testSpliceEvents(baseEvents, []cache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: datacentersWatchID,
|
||||
Result: &[]string{"dc1", "dc2", "dc4", "dc6"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if populateServices {
|
||||
var (
|
||||
foo = structs.NewServiceName("foo", nil)
|
||||
bar = structs.NewServiceName("bar", nil)
|
||||
)
|
||||
baseEvents = testSpliceEvents(baseEvents, []cache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: "mesh-gateway:dc2",
|
||||
Result: &structs.IndexedNodesWithGateways{
|
||||
Nodes: TestGatewayNodesDC2(t),
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: "mesh-gateway:dc4",
|
||||
Result: &structs.IndexedNodesWithGateways{
|
||||
Nodes: TestGatewayNodesDC4Hostname(t),
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: "mesh-gateway:dc6",
|
||||
Result: &structs.IndexedNodesWithGateways{
|
||||
Nodes: TestGatewayNodesDC6Hostname(t),
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceListWatchID,
|
||||
Result: &structs.IndexedServiceList{
|
||||
Services: []structs.ServiceName{
|
||||
foo,
|
||||
bar,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: "connect-service:" + foo.String(),
|
||||
Result: &structs.IndexedCheckServiceNodes{
|
||||
Nodes: TestGatewayServiceGroupFooDC1(t),
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: "connect-service:" + bar.String(),
|
||||
Result: &structs.IndexedCheckServiceNodes{
|
||||
Nodes: TestGatewayServiceGroupBarDC1(t),
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceResolversWatchID,
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
//
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if useFederationStates {
|
||||
nsFn = testSpliceNodeServiceFunc(nsFn, func(ns *structs.NodeService) {
|
||||
ns.Meta[structs.MetaWANFederationKey] = "1"
|
||||
})
|
||||
|
||||
if deleteCrossDCEntry {
|
||||
baseEvents = testSpliceEvents(baseEvents, []cache.UpdateEvent{
|
||||
{
|
||||
// Have the cross-dc query mechanism not work for dc2 so
|
||||
// fedstates will infill.
|
||||
CorrelationID: "mesh-gateway:dc2",
|
||||
Result: &structs.IndexedNodesWithGateways{
|
||||
Nodes: nil,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
dc2Nodes := TestGatewayNodesDC2(t)
|
||||
switch variant {
|
||||
case "newer-info-in-federation-states":
|
||||
// Create a duplicate entry in FedStateGateways, with a high ModifyIndex, to
|
||||
// verify that fresh data in the federation state is preferred over stale data
|
||||
// in GatewayGroups.
|
||||
svc := structs.TestNodeServiceMeshGatewayWithAddrs(t,
|
||||
"10.0.1.3", 8443,
|
||||
structs.ServiceAddress{Address: "10.0.1.3", Port: 8443},
|
||||
structs.ServiceAddress{Address: "198.18.1.3", Port: 443},
|
||||
)
|
||||
svc.RaftIndex.ModifyIndex = math.MaxUint64
|
||||
|
||||
dc2Nodes = structs.CheckServiceNodes{
|
||||
{
|
||||
Node: dc2Nodes[0].Node,
|
||||
Service: svc,
|
||||
},
|
||||
}
|
||||
case "older-info-in-federation-states":
|
||||
// Create a duplicate entry in FedStateGateways, with a low ModifyIndex, to
|
||||
// verify that stale data in the federation state is ignored in favor of the
|
||||
// fresher data in GatewayGroups.
|
||||
svc := structs.TestNodeServiceMeshGatewayWithAddrs(t,
|
||||
"10.0.1.3", 8443,
|
||||
structs.ServiceAddress{Address: "10.0.1.3", Port: 8443},
|
||||
structs.ServiceAddress{Address: "198.18.1.3", Port: 443},
|
||||
)
|
||||
svc.RaftIndex.ModifyIndex = 0
|
||||
|
||||
dc2Nodes = structs.CheckServiceNodes{
|
||||
{
|
||||
Node: dc2Nodes[0].Node,
|
||||
Service: svc,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
baseEvents = testSpliceEvents(baseEvents, []cache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: federationStateListGatewaysWatchID,
|
||||
Result: &structs.DatacenterIndexedCheckServiceNodes{
|
||||
DatacenterNodes: map[string]structs.CheckServiceNodes{
|
||||
"dc2": dc2Nodes,
|
||||
"dc4": TestGatewayNodesDC4Hostname(t),
|
||||
"dc6": TestGatewayNodesDC6Hostname(t),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: consulServerListWatchID,
|
||||
Result: &structs.IndexedCheckServiceNodes{
|
||||
Nodes: nil, // TODO
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return testConfigSnapshotFixture(t, &structs.NodeService{
|
||||
Kind: structs.ServiceKindMeshGateway,
|
||||
Service: "mesh-gateway",
|
||||
Address: "1.2.3.4",
|
||||
Port: 8443,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
Config: map[string]interface{}{},
|
||||
},
|
||||
Meta: make(map[string]string),
|
||||
TaggedAddresses: map[string]structs.ServiceAddress{
|
||||
structs.TaggedAddressLAN: {
|
||||
Address: "1.2.3.4",
|
||||
Port: 8443,
|
||||
},
|
||||
structs.TaggedAddressWAN: {
|
||||
Address: "198.18.0.1",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
|
||||
}
|
|
@ -0,0 +1,655 @@
|
|||
package proxycfg
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
agentcache "github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func TestConfigSnapshotTerminatingGateway(
|
||||
t testing.T,
|
||||
populateServices bool,
|
||||
nsFn func(ns *structs.NodeService),
|
||||
extraUpdates []agentcache.UpdateEvent,
|
||||
) *ConfigSnapshot {
|
||||
roots, _ := TestCerts(t)
|
||||
|
||||
var (
|
||||
web = structs.NewServiceName("web", nil)
|
||||
api = structs.NewServiceName("api", nil)
|
||||
db = structs.NewServiceName("db", nil)
|
||||
cache = structs.NewServiceName("cache", nil)
|
||||
)
|
||||
|
||||
baseEvents := []agentcache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: rootsWatchID,
|
||||
Result: roots,
|
||||
},
|
||||
{
|
||||
CorrelationID: gatewayServicesWatchID,
|
||||
Result: &structs.IndexedGatewayServices{
|
||||
Services: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if populateServices {
|
||||
webNodes := TestUpstreamNodes(t, web.Name)
|
||||
webNodes[0].Service.Meta = map[string]string{"version": "1"}
|
||||
webNodes[1].Service.Meta = map[string]string{"version": "2"}
|
||||
|
||||
apiNodes := structs.CheckServiceNodes{
|
||||
structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
ID: "api",
|
||||
Node: "test1",
|
||||
Address: "10.10.1.1",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "api",
|
||||
Address: "api.mydomain",
|
||||
Port: 8081,
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
{Status: "critical"},
|
||||
},
|
||||
},
|
||||
structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
ID: "test2",
|
||||
Node: "test2",
|
||||
Address: "10.10.1.2",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "api",
|
||||
Address: "api.altdomain",
|
||||
Port: 8081,
|
||||
Meta: map[string]string{
|
||||
"domain": "alt",
|
||||
},
|
||||
},
|
||||
},
|
||||
structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
ID: "test3",
|
||||
Node: "test3",
|
||||
Address: "10.10.1.3",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "api",
|
||||
Address: "10.10.1.3",
|
||||
Port: 8081,
|
||||
},
|
||||
},
|
||||
structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
ID: "test4",
|
||||
Node: "test4",
|
||||
Address: "10.10.1.4",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "api",
|
||||
Address: "api.thirddomain",
|
||||
Port: 8081,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Has failing instance
|
||||
dbNodes := structs.CheckServiceNodes{
|
||||
structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
ID: "db",
|
||||
Node: "test4",
|
||||
Address: "10.10.1.4",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Address: "db.mydomain",
|
||||
Port: 8081,
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
{Status: "critical"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Has passing instance but failing subset
|
||||
cacheNodes := structs.CheckServiceNodes{
|
||||
{
|
||||
Node: &structs.Node{
|
||||
ID: "cache",
|
||||
Node: "test5",
|
||||
Address: "10.10.1.5",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "cache",
|
||||
Address: "cache.mydomain",
|
||||
Port: 8081,
|
||||
},
|
||||
},
|
||||
{
|
||||
Node: &structs.Node{
|
||||
ID: "cache",
|
||||
Node: "test5",
|
||||
Address: "10.10.1.5",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "cache",
|
||||
Address: "cache.mydomain",
|
||||
Port: 8081,
|
||||
Meta: map[string]string{
|
||||
"Env": "prod",
|
||||
},
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
{Status: "critical"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
baseEvents = testSpliceEvents(baseEvents, []agentcache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: gatewayServicesWatchID,
|
||||
Result: &structs.IndexedGatewayServices{
|
||||
Services: []*structs.GatewayService{
|
||||
{
|
||||
Service: web,
|
||||
CAFile: "ca.cert.pem",
|
||||
},
|
||||
{
|
||||
Service: api,
|
||||
CAFile: "ca.cert.pem",
|
||||
CertFile: "api.cert.pem",
|
||||
KeyFile: "api.key.pem",
|
||||
},
|
||||
{
|
||||
Service: db,
|
||||
},
|
||||
{
|
||||
Service: cache,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: externalServiceIDPrefix + web.String(),
|
||||
Result: &structs.IndexedCheckServiceNodes{
|
||||
Nodes: webNodes,
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: externalServiceIDPrefix + api.String(),
|
||||
Result: &structs.IndexedCheckServiceNodes{
|
||||
Nodes: apiNodes,
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: externalServiceIDPrefix + db.String(),
|
||||
Result: &structs.IndexedCheckServiceNodes{
|
||||
Nodes: dbNodes,
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: externalServiceIDPrefix + cache.String(),
|
||||
Result: &structs.IndexedCheckServiceNodes{
|
||||
Nodes: cacheNodes,
|
||||
},
|
||||
},
|
||||
// ========
|
||||
// no intentions defined for these services
|
||||
{
|
||||
CorrelationID: serviceIntentionsIDPrefix + web.String(),
|
||||
Result: &structs.IndexedIntentionMatches{
|
||||
Matches: []structs.Intentions{
|
||||
nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceIntentionsIDPrefix + api.String(),
|
||||
Result: &structs.IndexedIntentionMatches{
|
||||
Matches: []structs.Intentions{
|
||||
nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceIntentionsIDPrefix + db.String(),
|
||||
Result: &structs.IndexedIntentionMatches{
|
||||
Matches: []structs.Intentions{
|
||||
nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceIntentionsIDPrefix + cache.String(),
|
||||
Result: &structs.IndexedIntentionMatches{
|
||||
Matches: []structs.Intentions{
|
||||
nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
// ========
|
||||
{
|
||||
CorrelationID: serviceLeafIDPrefix + web.String(),
|
||||
Result: &structs.IssuedCert{
|
||||
CertPEM: golden(t, "test-leaf-cert"),
|
||||
PrivateKeyPEM: golden(t, "test-leaf-key"),
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceLeafIDPrefix + api.String(),
|
||||
Result: &structs.IssuedCert{
|
||||
CertPEM: golden(t, "alt-test-leaf-cert"),
|
||||
PrivateKeyPEM: golden(t, "alt-test-leaf-key"),
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceLeafIDPrefix + db.String(),
|
||||
Result: &structs.IssuedCert{
|
||||
CertPEM: golden(t, "db-test-leaf-cert"),
|
||||
PrivateKeyPEM: golden(t, "db-test-leaf-key"),
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceLeafIDPrefix + cache.String(),
|
||||
Result: &structs.IssuedCert{
|
||||
CertPEM: golden(t, "cache-test-leaf-cert"),
|
||||
PrivateKeyPEM: golden(t, "cache-test-leaf-key"),
|
||||
},
|
||||
},
|
||||
// ========
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + web.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "tcp"},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + api.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "tcp"},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + db.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "tcp"},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + cache.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "tcp"},
|
||||
},
|
||||
},
|
||||
// ========
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + web.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + api.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + db.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + cache.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: nil,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return testConfigSnapshotFixture(t, &structs.NodeService{
|
||||
Kind: structs.ServiceKindTerminatingGateway,
|
||||
Service: "terminating-gateway",
|
||||
Address: "1.2.3.4",
|
||||
Port: 8443,
|
||||
TaggedAddresses: map[string]structs.ServiceAddress{
|
||||
structs.TaggedAddressWAN: {
|
||||
Address: "198.18.0.1",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
|
||||
}
|
||||
|
||||
func TestConfigSnapshotTerminatingGatewayServiceSubsets(t testing.T) *ConfigSnapshot {
|
||||
return testConfigSnapshotTerminatingGatewayServiceSubsets(t, false)
|
||||
}
|
||||
func TestConfigSnapshotTerminatingGatewayServiceSubsetsWebAndCache(t testing.T) *ConfigSnapshot {
|
||||
return testConfigSnapshotTerminatingGatewayServiceSubsets(t, true)
|
||||
}
|
||||
func testConfigSnapshotTerminatingGatewayServiceSubsets(t testing.T, alsoAdjustCache bool) *ConfigSnapshot {
|
||||
var (
|
||||
web = structs.NewServiceName("web", nil)
|
||||
cache = structs.NewServiceName("cache", nil)
|
||||
)
|
||||
|
||||
events := []agentcache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + web.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + web.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "http"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if alsoAdjustCache {
|
||||
events = testSpliceEvents(events, []agentcache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + cache.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "cache",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"prod": {
|
||||
Filter: "Service.Meta.Env == prod",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + web.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "http"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return TestConfigSnapshotTerminatingGateway(t, true, nil, events)
|
||||
}
|
||||
|
||||
func TestConfigSnapshotTerminatingGatewayDefaultServiceSubset(t testing.T) *ConfigSnapshot {
|
||||
web := structs.NewServiceName("web", nil)
|
||||
|
||||
return TestConfigSnapshotTerminatingGateway(t, true, nil, []agentcache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + web.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// {
|
||||
// CorrelationID: serviceConfigIDPrefix + web.String(),
|
||||
// Result: &structs.ServiceConfigResponse{
|
||||
// ProxyConfig: map[string]interface{}{"protocol": "http"},
|
||||
// },
|
||||
// },
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigSnapshotTerminatingGatewayLBConfig(t testing.T) *ConfigSnapshot {
|
||||
return testConfigSnapshotTerminatingGatewayLBConfig(t, "default")
|
||||
}
|
||||
func TestConfigSnapshotTerminatingGatewayLBConfigNoHashPolicies(t testing.T) *ConfigSnapshot {
|
||||
return testConfigSnapshotTerminatingGatewayLBConfig(t, "no-hash-policies")
|
||||
}
|
||||
func testConfigSnapshotTerminatingGatewayLBConfig(t testing.T, variant string) *ConfigSnapshot {
|
||||
web := structs.NewServiceName("web", nil)
|
||||
|
||||
entry := &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
LoadBalancer: &structs.LoadBalancer{
|
||||
Policy: "ring_hash",
|
||||
RingHashConfig: &structs.RingHashConfig{
|
||||
MinimumRingSize: 20,
|
||||
MaximumRingSize: 50,
|
||||
},
|
||||
HashPolicies: []structs.HashPolicy{
|
||||
{
|
||||
Field: structs.HashPolicyCookie,
|
||||
FieldValue: "chocolate-chip",
|
||||
Terminal: true,
|
||||
},
|
||||
{
|
||||
Field: structs.HashPolicyHeader,
|
||||
FieldValue: "x-user-id",
|
||||
},
|
||||
{
|
||||
SourceIP: true,
|
||||
Terminal: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
switch variant {
|
||||
case "default":
|
||||
case "no-hash-policies":
|
||||
entry.LoadBalancer.HashPolicies = nil
|
||||
default:
|
||||
t.Fatalf("unknown variant %q", variant)
|
||||
return nil
|
||||
}
|
||||
|
||||
return TestConfigSnapshotTerminatingGateway(t, true, nil, []cache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + web.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "http"},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + web.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{entry},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + web.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "http"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigSnapshotTerminatingGatewayHostnameSubsets(t testing.T) *ConfigSnapshot {
|
||||
var (
|
||||
api = structs.NewServiceName("api", nil)
|
||||
cache = structs.NewServiceName("cache", nil)
|
||||
)
|
||||
|
||||
return TestConfigSnapshotTerminatingGateway(t, true, nil, []agentcache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + api.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "api",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"alt": {
|
||||
Filter: "Service.Meta.domain == alt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + cache.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "cache",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"prod": {
|
||||
Filter: "Service.Meta.Env == prod",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + api.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "http"},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + cache.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "http"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigSnapshotTerminatingGatewayIgnoreExtraResolvers(t testing.T) *ConfigSnapshot {
|
||||
var (
|
||||
web = structs.NewServiceName("web", nil)
|
||||
notfound = structs.NewServiceName("notfound", nil)
|
||||
)
|
||||
|
||||
return TestConfigSnapshotTerminatingGateway(t, true, nil, []agentcache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + web.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceResolverIDPrefix + notfound.String(),
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "notfound",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + web.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{"protocol": "http"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigSnapshotTerminatingGatewayWithServiceDefaultsMeta(t testing.T) *ConfigSnapshot {
|
||||
web := structs.NewServiceName("web", nil)
|
||||
return TestConfigSnapshotTerminatingGateway(t, true, nil, []agentcache.UpdateEvent{
|
||||
{
|
||||
CorrelationID: serviceConfigIDPrefix + web.String(),
|
||||
Result: &structs.ServiceConfigResponse{
|
||||
Meta: map[string]string{"a": "b"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue