Merge branch 'main' into nia/docs-0.7.0
This commit is contained in:
commit
09ff452fa6
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
connect: expose new tracing configuration on envoy
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
envoy: adds additional Envoy outlier ejection parameters to passive health check configurations.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
connect: Server address changes are streamed to peers
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bugfix
|
||||||
|
envoy: validate name before deleting proxy default configurations.
|
||||||
|
```
|
|
@ -0,0 +1,4 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Use withCredentials for all HTTP API requests
|
||||||
|
```
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bugfix
|
||||||
|
peering: Fix issue preventing deletion and recreation of peerings in TERMINATED state.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
xds: Set `max_ejection_percent` on Envoy's outlier detection to 100% for peered services.
|
||||||
|
```
|
|
@ -0,0 +1,5 @@
|
||||||
|
```release-note:bug
|
||||||
|
api: Fix a breaking change caused by renaming `QueryDatacenterOptions` to
|
||||||
|
`QueryFailoverOptions`. This adds `QueryDatacenterOptions` back as an alias to
|
||||||
|
`QueryFailoverOptions` and marks it as deprecated.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
peering: Add support to failover to services running on cluster peers.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
cli: Adds new subcommands for `peering` workflows. Refer to the [CLI docs](https://www.consul.io/commands/peering) for more information.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
connect: Fixed an issue where intermediate certificates could build up in the root CA because they were never being pruned after expiring.
|
||||||
|
``
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
checks: If set, use proxy address for automatically added sidecar check instead of service address.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
http: Add new `get-or-empty` operation to the txn api. Refer to the [API docs](https://www.consul.io/api-docs/txn#kv-operations) for more information.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Detect a TokenSecretID cookie and passthrough to localStorage
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
ui: Reuse connections for requests to /v1/internal/ui/metrics-proxy/
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
snapshot agent: **(Enterprise only)** Add support for path-based addressing when using s3 backend.
|
||||||
|
```
|
|
@ -816,7 +816,7 @@ jobs:
|
||||||
# Get go binary from workspace
|
# Get go binary from workspace
|
||||||
- attach_workspace:
|
- attach_workspace:
|
||||||
at: .
|
at: .
|
||||||
# Build the consul-dev image from the already built binary
|
# Build the consul:local image from the already built binary
|
||||||
- run:
|
- run:
|
||||||
command: |
|
command: |
|
||||||
sudo rm -rf /usr/local/go
|
sudo rm -rf /usr/local/go
|
||||||
|
@ -887,8 +887,8 @@ jobs:
|
||||||
- attach_workspace:
|
- attach_workspace:
|
||||||
at: .
|
at: .
|
||||||
- run: *install-gotestsum
|
- run: *install-gotestsum
|
||||||
# Build the consul-dev image from the already built binary
|
# Build the consul:local image from the already built binary
|
||||||
- run: docker build -t consul-dev -f ./build-support/docker/Consul-Dev.dockerfile .
|
- run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile .
|
||||||
- run:
|
- run:
|
||||||
name: Envoy Integration Tests
|
name: Envoy Integration Tests
|
||||||
command: |
|
command: |
|
||||||
|
@ -902,6 +902,7 @@ jobs:
|
||||||
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
|
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
|
||||||
GOTESTSUM_FORMAT: standard-verbose
|
GOTESTSUM_FORMAT: standard-verbose
|
||||||
COMPOSE_INTERACTIVE_NO_CLI: 1
|
COMPOSE_INTERACTIVE_NO_CLI: 1
|
||||||
|
LAMBDA_TESTS_ENABLED: "true"
|
||||||
# tput complains if this isn't set to something.
|
# tput complains if this isn't set to something.
|
||||||
TERM: ansi
|
TERM: ansi
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
||||||
backport:
|
backport:
|
||||||
if: github.event.pull_request.merged
|
if: github.event.pull_request.merged
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container: hashicorpdev/backport-assistant:0.2.3
|
container: hashicorpdev/backport-assistant:0.2.5
|
||||||
steps:
|
steps:
|
||||||
- name: Run Backport Assistant for stable-website
|
- name: Run Backport Assistant for stable-website
|
||||||
run: |
|
run: |
|
||||||
|
@ -24,6 +24,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)"
|
BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)"
|
||||||
BACKPORT_TARGET_TEMPLATE: "stable-website"
|
BACKPORT_TARGET_TEMPLATE: "stable-website"
|
||||||
|
BACKPORT_MERGE_COMMIT: true
|
||||||
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
|
||||||
- name: Backport changes to latest release branch
|
- name: Backport changes to latest release branch
|
||||||
run: |
|
run: |
|
||||||
|
|
|
@ -8,6 +8,8 @@ linters:
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- unparam
|
- unparam
|
||||||
- forbidigo
|
- forbidigo
|
||||||
|
- gomodguard
|
||||||
|
- depguard
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
# Disable the default exclude list so that all excludes are explicitly
|
# Disable the default exclude list so that all excludes are explicitly
|
||||||
|
@ -75,6 +77,30 @@ linters-settings:
|
||||||
# Exclude godoc examples from forbidigo checks.
|
# Exclude godoc examples from forbidigo checks.
|
||||||
# Default: true
|
# Default: true
|
||||||
exclude_godoc_examples: false
|
exclude_godoc_examples: false
|
||||||
|
gomodguard:
|
||||||
|
blocked:
|
||||||
|
# List of blocked modules.
|
||||||
|
modules:
|
||||||
|
# Blocked module.
|
||||||
|
- github.com/hashicorp/net-rpc-msgpackrpc:
|
||||||
|
recommendations:
|
||||||
|
- github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc
|
||||||
|
- github.com/hashicorp/go-msgpack:
|
||||||
|
recommendations:
|
||||||
|
- github.com/hashicorp/consul-net-rpc/go-msgpack
|
||||||
|
|
||||||
|
depguard:
|
||||||
|
list-type: denylist
|
||||||
|
include-go-root: true
|
||||||
|
# A list of packages for the list type specified.
|
||||||
|
# Default: []
|
||||||
|
packages:
|
||||||
|
- net/rpc
|
||||||
|
# A list of packages for the list type specified.
|
||||||
|
# Specify an error message to output when a denied package is used.
|
||||||
|
# Default: []
|
||||||
|
packages-with-error-message:
|
||||||
|
- net/rpc: 'only use forked copy in github.com/hashicorp/consul-net-rpc/net/rpc'
|
||||||
|
|
||||||
run:
|
run:
|
||||||
timeout: 10m
|
timeout: 10m
|
||||||
|
|
48
GNUmakefile
48
GNUmakefile
|
@ -16,6 +16,7 @@ PROTOC_GO_INJECT_TAG_VERSION='v1.3.0'
|
||||||
|
|
||||||
GOTAGS ?=
|
GOTAGS ?=
|
||||||
GOPATH=$(shell go env GOPATH)
|
GOPATH=$(shell go env GOPATH)
|
||||||
|
GOARCH?=$(shell go env GOARCH)
|
||||||
MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1)
|
MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1)
|
||||||
|
|
||||||
export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH)
|
export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH)
|
||||||
|
@ -129,7 +130,7 @@ export GOLDFLAGS
|
||||||
|
|
||||||
# Allow skipping docker build during integration tests in CI since we already
|
# Allow skipping docker build during integration tests in CI since we already
|
||||||
# have a built binary
|
# have a built binary
|
||||||
ENVOY_INTEG_DEPS?=dev-docker
|
ENVOY_INTEG_DEPS?=docker-envoy-integ
|
||||||
ifdef SKIP_DOCKER_BUILD
|
ifdef SKIP_DOCKER_BUILD
|
||||||
ENVOY_INTEG_DEPS=noop
|
ENVOY_INTEG_DEPS=noop
|
||||||
endif
|
endif
|
||||||
|
@ -152,7 +153,28 @@ dev-docker: linux
|
||||||
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
|
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
|
||||||
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
|
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
|
||||||
# 'consul:local' tag is needed to run the integration tests
|
# 'consul:local' tag is needed to run the integration tests
|
||||||
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
|
@docker buildx use default && docker buildx build -t 'consul:local' \
|
||||||
|
--platform linux/$(GOARCH) \
|
||||||
|
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
|
||||||
|
--load \
|
||||||
|
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
|
||||||
|
|
||||||
|
check-remote-dev-image-env:
|
||||||
|
ifndef REMOTE_DEV_IMAGE
|
||||||
|
$(error REMOTE_DEV_IMAGE is undefined: set this image to <your_docker_repo>/<your_docker_image>:<image_tag>, e.g. hashicorp/consul-k8s-dev:latest)
|
||||||
|
endif
|
||||||
|
|
||||||
|
remote-docker: check-remote-dev-image-env
|
||||||
|
$(MAKE) GOARCH=amd64 linux
|
||||||
|
$(MAKE) GOARCH=arm64 linux
|
||||||
|
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
|
||||||
|
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
|
||||||
|
@echo "Building and Pushing Consul Development container - $(REMOTE_DEV_IMAGE)"
|
||||||
|
@docker buildx use default && docker buildx build -t '$(REMOTE_DEV_IMAGE)' \
|
||||||
|
--platform linux/amd64,linux/arm64 \
|
||||||
|
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
|
||||||
|
--push \
|
||||||
|
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
|
||||||
|
|
||||||
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
|
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
|
||||||
# should only run in CI and not locally.
|
# should only run in CI and not locally.
|
||||||
|
@ -174,10 +196,10 @@ ifeq ($(CIRCLE_BRANCH), main)
|
||||||
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
|
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# linux builds a linux binary independent of the source platform
|
# linux builds a linux binary compatible with the source platform
|
||||||
linux:
|
linux:
|
||||||
@mkdir -p ./pkg/bin/linux_amd64
|
@mkdir -p ./pkg/bin/linux_$(GOARCH)
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./pkg/bin/linux_amd64 -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
|
CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o ./pkg/bin/linux_$(GOARCH) -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
|
||||||
|
|
||||||
# dist builds binaries for all platforms and packages them for distribution
|
# dist builds binaries for all platforms and packages them for distribution
|
||||||
dist:
|
dist:
|
||||||
|
@ -324,8 +346,22 @@ consul-docker: go-build-image
|
||||||
ui-docker: ui-build-image
|
ui-docker: ui-build-image
|
||||||
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui
|
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui
|
||||||
|
|
||||||
|
# Build image used to run integration tests locally.
|
||||||
|
docker-envoy-integ:
|
||||||
|
$(MAKE) GOARCH=amd64 linux
|
||||||
|
docker build \
|
||||||
|
--platform linux/amd64 $(NOCACHE) $(QUIET) \
|
||||||
|
-t 'consul:local' \
|
||||||
|
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
|
||||||
|
$(CURDIR)/pkg/bin/linux_amd64 \
|
||||||
|
-f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
|
||||||
|
|
||||||
|
# Run integration tests.
|
||||||
|
# Use GO_TEST_FLAGS to run specific tests:
|
||||||
|
# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic"
|
||||||
|
# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment.
|
||||||
test-envoy-integ: $(ENVOY_INTEG_DEPS)
|
test-envoy-integ: $(ENVOY_INTEG_DEPS)
|
||||||
@go test -v -timeout=30m -tags integration ./test/integration/connect/envoy
|
@go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy
|
||||||
|
|
||||||
.PHONY: test-compat-integ
|
.PHONY: test-compat-integ
|
||||||
test-compat-integ: dev-docker
|
test-compat-integ: dev-docker
|
||||||
|
|
|
@ -941,6 +941,7 @@ func (a *Agent) listenHTTP() ([]apiServer, error) {
|
||||||
srv := &HTTPHandlers{
|
srv := &HTTPHandlers{
|
||||||
agent: a,
|
agent: a,
|
||||||
denylist: NewDenylist(a.config.HTTPBlockEndpoints),
|
denylist: NewDenylist(a.config.HTTPBlockEndpoints),
|
||||||
|
proxyTransport: http.DefaultTransport,
|
||||||
}
|
}
|
||||||
a.configReloaders = append(a.configReloaders, srv.ReloadConfig)
|
a.configReloaders = append(a.configReloaders, srv.ReloadConfig)
|
||||||
a.httpHandlers = srv
|
a.httpHandlers = srv
|
||||||
|
@ -2104,6 +2105,21 @@ func (a *Agent) AddService(req AddServiceRequest) error {
|
||||||
// addServiceLocked adds a service entry to the service manager if enabled, or directly
|
// addServiceLocked adds a service entry to the service manager if enabled, or directly
|
||||||
// to the local state if it is not. This function assumes the state lock is already held.
|
// to the local state if it is not. This function assumes the state lock is already held.
|
||||||
func (a *Agent) addServiceLocked(req addServiceLockedRequest) error {
|
func (a *Agent) addServiceLocked(req addServiceLockedRequest) error {
|
||||||
|
// Must auto-assign the port and default checks (if needed) here to avoid race collisions.
|
||||||
|
if req.Service.LocallyRegisteredAsSidecar {
|
||||||
|
if req.Service.Port < 1 {
|
||||||
|
port, err := a.sidecarPortFromServiceIDLocked(req.Service.CompoundServiceID())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Service.Port = port
|
||||||
|
}
|
||||||
|
// Setup default check if none given.
|
||||||
|
if len(req.chkTypes) < 1 {
|
||||||
|
req.chkTypes = sidecarDefaultChecks(req.Service.ID, req.Service.Address, req.Service.Proxy.LocalServiceAddress, req.Service.Port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
req.Service.EnterpriseMeta.Normalize()
|
req.Service.EnterpriseMeta.Normalize()
|
||||||
|
|
||||||
if err := a.validateService(req.Service, req.chkTypes); err != nil {
|
if err := a.validateService(req.Service, req.chkTypes); err != nil {
|
||||||
|
@ -3368,7 +3384,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab and validate sidecar if there is one too
|
// Grab and validate sidecar if there is one too
|
||||||
sidecar, sidecarChecks, sidecarToken, err := a.sidecarServiceFromNodeService(ns, service.Token)
|
sidecar, sidecarChecks, sidecarToken, err := sidecarServiceFromNodeService(ns, service.Token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to validate sidecar for service %q: %v", service.Name, err)
|
return fmt.Errorf("Failed to validate sidecar for service %q: %v", service.Name, err)
|
||||||
}
|
}
|
||||||
|
@ -4268,7 +4284,10 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources {
|
||||||
sources.Health = proxycfgglue.ServerHealth(deps, proxycfgglue.ClientHealth(a.rpcClientHealth))
|
sources.Health = proxycfgglue.ServerHealth(deps, proxycfgglue.ClientHealth(a.rpcClientHealth))
|
||||||
sources.Intentions = proxycfgglue.ServerIntentions(deps)
|
sources.Intentions = proxycfgglue.ServerIntentions(deps)
|
||||||
sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps)
|
sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps)
|
||||||
|
sources.IntentionUpstreamsDestination = proxycfgglue.ServerIntentionUpstreamsDestination(deps)
|
||||||
|
sources.InternalServiceDump = proxycfgglue.ServerInternalServiceDump(deps, proxycfgglue.CacheInternalServiceDump(a.cache))
|
||||||
sources.PeeredUpstreams = proxycfgglue.ServerPeeredUpstreams(deps)
|
sources.PeeredUpstreams = proxycfgglue.ServerPeeredUpstreams(deps)
|
||||||
|
sources.ResolvedServiceConfig = proxycfgglue.ServerResolvedServiceConfig(deps, proxycfgglue.CacheResolvedServiceConfig(a.cache))
|
||||||
sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache))
|
sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache))
|
||||||
sources.TrustBundle = proxycfgglue.ServerTrustBundle(deps)
|
sources.TrustBundle = proxycfgglue.ServerTrustBundle(deps)
|
||||||
sources.TrustBundleList = proxycfgglue.ServerTrustBundleList(deps)
|
sources.TrustBundleList = proxycfgglue.ServerTrustBundleList(deps)
|
||||||
|
|
|
@ -1159,7 +1159,7 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
|
||||||
}
|
}
|
||||||
|
|
||||||
// See if we have a sidecar to register too
|
// See if we have a sidecar to register too
|
||||||
sidecar, sidecarChecks, sidecarToken, err := s.agent.sidecarServiceFromNodeService(ns, token)
|
sidecar, sidecarChecks, sidecarToken, err := sidecarServiceFromNodeService(ns, token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid SidecarService: %s", err)}
|
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid SidecarService: %s", err)}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3764,7 +3764,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
|
||||||
fmt.Println("TCP Check:= ", v)
|
fmt.Println("TCP Check:= ", v)
|
||||||
}
|
}
|
||||||
if hasNoCorrectTCPCheck {
|
if hasNoCorrectTCPCheck {
|
||||||
t.Fatalf("Did not find the expected TCP Healtcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
|
t.Fatalf("Did not find the expected TCP Healthcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
|
||||||
}
|
}
|
||||||
require.Equal(t, sidecarSvc, gotSidecar)
|
require.Equal(t, sidecarSvc, gotSidecar)
|
||||||
})
|
})
|
||||||
|
|
|
@ -2786,7 +2786,7 @@ func TestAgent_DeregisterPersistedSidecarAfterRestart(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
connectSrv, _, _, err := a.sidecarServiceFromNodeService(srv, "")
|
connectSrv, _, _, err := sidecarServiceFromNodeService(srv, "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// First persist the check
|
// First persist the check
|
||||||
|
@ -2959,11 +2959,24 @@ func testAgent_loadServices_sidecar(t *testing.T, extraHCL string) {
|
||||||
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq", nil)); token != "abc123" {
|
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq", nil)); token != "abc123" {
|
||||||
t.Fatalf("bad: %s", token)
|
t.Fatalf("bad: %s", token)
|
||||||
}
|
}
|
||||||
requireServiceExists(t, a, "rabbitmq-sidecar-proxy")
|
sidecarSvc := requireServiceExists(t, a, "rabbitmq-sidecar-proxy")
|
||||||
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq-sidecar-proxy", nil)); token != "abc123" {
|
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq-sidecar-proxy", nil)); token != "abc123" {
|
||||||
t.Fatalf("bad: %s", token)
|
t.Fatalf("bad: %s", token)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify default checks have been added
|
||||||
|
wantChecks := sidecarDefaultChecks(sidecarSvc.ID, sidecarSvc.Address, sidecarSvc.Proxy.LocalServiceAddress, sidecarSvc.Port)
|
||||||
|
gotChecks := a.State.ChecksForService(sidecarSvc.CompoundServiceID(), true)
|
||||||
|
gotChkNames := make(map[string]types.CheckID)
|
||||||
|
for _, check := range gotChecks {
|
||||||
|
requireCheckExists(t, a, check.CheckID)
|
||||||
|
gotChkNames[check.Name] = check.CheckID
|
||||||
|
}
|
||||||
|
for _, check := range wantChecks {
|
||||||
|
chkName := check.Name
|
||||||
|
require.NotNil(t, gotChkNames[chkName])
|
||||||
|
}
|
||||||
|
|
||||||
// Sanity check rabbitmq service should NOT have sidecar info in state since
|
// Sanity check rabbitmq service should NOT have sidecar info in state since
|
||||||
// it's done it's job and should be a registration syntax sugar only.
|
// it's done it's job and should be a registration syntax sugar only.
|
||||||
assert.Nil(t, svc.Connect.SidecarService)
|
assert.Nil(t, svc.Connect.SidecarService)
|
||||||
|
|
|
@ -0,0 +1,229 @@
|
||||||
|
package configentry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
"github.com/mitchellh/copystructure"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ComputeResolvedServiceConfig(
|
||||||
|
args *structs.ServiceConfigRequest,
|
||||||
|
upstreamIDs []structs.ServiceID,
|
||||||
|
legacyUpstreams bool,
|
||||||
|
entries *ResolvedServiceConfigSet,
|
||||||
|
logger hclog.Logger,
|
||||||
|
) (*structs.ServiceConfigResponse, error) {
|
||||||
|
var thisReply structs.ServiceConfigResponse
|
||||||
|
|
||||||
|
thisReply.MeshGateway.Mode = structs.MeshGatewayModeDefault
|
||||||
|
|
||||||
|
// TODO(freddy) Refactor this into smaller set of state store functions
|
||||||
|
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
|
||||||
|
// blocking query, this function will be rerun and these state store lookups will both be current.
|
||||||
|
// We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
|
||||||
|
var proxyConfGlobalProtocol string
|
||||||
|
proxyConf := entries.GetProxyDefaults(args.PartitionOrDefault())
|
||||||
|
if proxyConf != nil {
|
||||||
|
// Apply the proxy defaults to the sidecar's proxy config
|
||||||
|
mapCopy, err := copystructure.Copy(proxyConf.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to copy global proxy-defaults: %v", err)
|
||||||
|
}
|
||||||
|
thisReply.ProxyConfig = mapCopy.(map[string]interface{})
|
||||||
|
thisReply.Mode = proxyConf.Mode
|
||||||
|
thisReply.TransparentProxy = proxyConf.TransparentProxy
|
||||||
|
thisReply.MeshGateway = proxyConf.MeshGateway
|
||||||
|
thisReply.Expose = proxyConf.Expose
|
||||||
|
|
||||||
|
// Extract the global protocol from proxyConf for upstream configs.
|
||||||
|
rawProtocol := proxyConf.Config["protocol"]
|
||||||
|
if rawProtocol != nil {
|
||||||
|
var ok bool
|
||||||
|
proxyConfGlobalProtocol, ok = rawProtocol.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("invalid protocol type %T", rawProtocol)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceConf := entries.GetServiceDefaults(
|
||||||
|
structs.NewServiceID(args.Name, &args.EnterpriseMeta),
|
||||||
|
)
|
||||||
|
if serviceConf != nil {
|
||||||
|
if serviceConf.Expose.Checks {
|
||||||
|
thisReply.Expose.Checks = true
|
||||||
|
}
|
||||||
|
if len(serviceConf.Expose.Paths) >= 1 {
|
||||||
|
thisReply.Expose.Paths = serviceConf.Expose.Paths
|
||||||
|
}
|
||||||
|
if serviceConf.MeshGateway.Mode != structs.MeshGatewayModeDefault {
|
||||||
|
thisReply.MeshGateway.Mode = serviceConf.MeshGateway.Mode
|
||||||
|
}
|
||||||
|
if serviceConf.Protocol != "" {
|
||||||
|
if thisReply.ProxyConfig == nil {
|
||||||
|
thisReply.ProxyConfig = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
thisReply.ProxyConfig["protocol"] = serviceConf.Protocol
|
||||||
|
}
|
||||||
|
if serviceConf.TransparentProxy.OutboundListenerPort != 0 {
|
||||||
|
thisReply.TransparentProxy.OutboundListenerPort = serviceConf.TransparentProxy.OutboundListenerPort
|
||||||
|
}
|
||||||
|
if serviceConf.TransparentProxy.DialedDirectly {
|
||||||
|
thisReply.TransparentProxy.DialedDirectly = serviceConf.TransparentProxy.DialedDirectly
|
||||||
|
}
|
||||||
|
if serviceConf.Mode != structs.ProxyModeDefault {
|
||||||
|
thisReply.Mode = serviceConf.Mode
|
||||||
|
}
|
||||||
|
if serviceConf.Destination != nil {
|
||||||
|
thisReply.Destination = *serviceConf.Destination
|
||||||
|
}
|
||||||
|
|
||||||
|
if serviceConf.MaxInboundConnections > 0 {
|
||||||
|
if thisReply.ProxyConfig == nil {
|
||||||
|
thisReply.ProxyConfig = map[string]interface{}{}
|
||||||
|
}
|
||||||
|
thisReply.ProxyConfig["max_inbound_connections"] = serviceConf.MaxInboundConnections
|
||||||
|
}
|
||||||
|
|
||||||
|
thisReply.Meta = serviceConf.Meta
|
||||||
|
}
|
||||||
|
|
||||||
|
// First collect all upstreams into a set of seen upstreams.
|
||||||
|
// Upstreams can come from:
|
||||||
|
// - Explicitly from proxy registrations, and therefore as an argument to this RPC endpoint
|
||||||
|
// - Implicitly from centralized upstream config in service-defaults
|
||||||
|
seenUpstreams := map[structs.ServiceID]struct{}{}
|
||||||
|
|
||||||
|
var (
|
||||||
|
noUpstreamArgs = len(upstreamIDs) == 0 && len(args.Upstreams) == 0
|
||||||
|
|
||||||
|
// Check the args and the resolved value. If it was exclusively set via a config entry, then args.Mode
|
||||||
|
// will never be transparent because the service config request does not use the resolved value.
|
||||||
|
tproxy = args.Mode == structs.ProxyModeTransparent || thisReply.Mode == structs.ProxyModeTransparent
|
||||||
|
)
|
||||||
|
|
||||||
|
// The upstreams passed as arguments to this endpoint are the upstreams explicitly defined in a proxy registration.
|
||||||
|
// If no upstreams were passed, then we should only return the resolved config if the proxy is in transparent mode.
|
||||||
|
// Otherwise we would return a resolved upstream config to a proxy with no configured upstreams.
|
||||||
|
if noUpstreamArgs && !tproxy {
|
||||||
|
return &thisReply, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// First store all upstreams that were provided in the request
|
||||||
|
for _, sid := range upstreamIDs {
|
||||||
|
if _, ok := seenUpstreams[sid]; !ok {
|
||||||
|
seenUpstreams[sid] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then store upstreams inferred from service-defaults and mapify the overrides.
|
||||||
|
var (
|
||||||
|
upstreamConfigs = make(map[structs.ServiceID]*structs.UpstreamConfig)
|
||||||
|
upstreamDefaults *structs.UpstreamConfig
|
||||||
|
// usConfigs stores the opaque config map for each upstream and is keyed on the upstream's ID.
|
||||||
|
usConfigs = make(map[structs.ServiceID]map[string]interface{})
|
||||||
|
)
|
||||||
|
if serviceConf != nil && serviceConf.UpstreamConfig != nil {
|
||||||
|
for i, override := range serviceConf.UpstreamConfig.Overrides {
|
||||||
|
if override.Name == "" {
|
||||||
|
logger.Warn(
|
||||||
|
"Skipping UpstreamConfig.Overrides entry without a required name field",
|
||||||
|
"entryIndex", i,
|
||||||
|
"kind", serviceConf.GetKind(),
|
||||||
|
"name", serviceConf.GetName(),
|
||||||
|
"namespace", serviceConf.GetEnterpriseMeta().NamespaceOrEmpty(),
|
||||||
|
)
|
||||||
|
continue // skip this impossible condition
|
||||||
|
}
|
||||||
|
seenUpstreams[override.ServiceID()] = struct{}{}
|
||||||
|
upstreamConfigs[override.ServiceID()] = override
|
||||||
|
}
|
||||||
|
if serviceConf.UpstreamConfig.Defaults != nil {
|
||||||
|
upstreamDefaults = serviceConf.UpstreamConfig.Defaults
|
||||||
|
|
||||||
|
// Store the upstream defaults under a wildcard key so that they can be applied to
|
||||||
|
// upstreams that are inferred from intentions and do not have explicit upstream configuration.
|
||||||
|
cfgMap := make(map[string]interface{})
|
||||||
|
upstreamDefaults.MergeInto(cfgMap)
|
||||||
|
|
||||||
|
wildcard := structs.NewServiceID(structs.WildcardSpecifier, args.WithWildcardNamespace())
|
||||||
|
usConfigs[wildcard] = cfgMap
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for upstream := range seenUpstreams {
|
||||||
|
resolvedCfg := make(map[string]interface{})
|
||||||
|
|
||||||
|
// The protocol of an upstream is resolved in this order:
|
||||||
|
// 1. Default protocol from proxy-defaults (how all services should be addressed)
|
||||||
|
// 2. Protocol for upstream service defined in its service-defaults (how the upstream wants to be addressed)
|
||||||
|
// 3. Protocol defined for the upstream in the service-defaults.(upstream_config.defaults|upstream_config.overrides) of the downstream
|
||||||
|
// (how the downstream wants to address it)
|
||||||
|
protocol := proxyConfGlobalProtocol
|
||||||
|
|
||||||
|
upstreamSvcDefaults := entries.GetServiceDefaults(
|
||||||
|
structs.NewServiceID(upstream.ID, &upstream.EnterpriseMeta),
|
||||||
|
)
|
||||||
|
if upstreamSvcDefaults != nil {
|
||||||
|
if upstreamSvcDefaults.Protocol != "" {
|
||||||
|
protocol = upstreamSvcDefaults.Protocol
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if protocol != "" {
|
||||||
|
resolvedCfg["protocol"] = protocol
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge centralized defaults for all upstreams before configuration for specific upstreams
|
||||||
|
if upstreamDefaults != nil {
|
||||||
|
upstreamDefaults.MergeInto(resolvedCfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The MeshGateway value from the proxy registration overrides the one from upstream_defaults
|
||||||
|
// because it is specific to the proxy instance.
|
||||||
|
//
|
||||||
|
// The goal is to flatten the mesh gateway mode in this order:
|
||||||
|
// 0. Value from centralized upstream_defaults
|
||||||
|
// 1. Value from local proxy registration
|
||||||
|
// 2. Value from centralized upstream_config
|
||||||
|
// 3. Value from local upstream definition. This last step is done in the client's service manager.
|
||||||
|
if !args.MeshGateway.IsZero() {
|
||||||
|
resolvedCfg["mesh_gateway"] = args.MeshGateway
|
||||||
|
}
|
||||||
|
|
||||||
|
if upstreamConfigs[upstream] != nil {
|
||||||
|
upstreamConfigs[upstream].MergeInto(resolvedCfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resolvedCfg) > 0 {
|
||||||
|
usConfigs[upstream] = resolvedCfg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// don't allocate the slices just to not fill them
|
||||||
|
if len(usConfigs) == 0 {
|
||||||
|
return &thisReply, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if legacyUpstreams {
|
||||||
|
// For legacy upstreams we return a map that is only keyed on the string ID, since they precede namespaces
|
||||||
|
thisReply.UpstreamConfigs = make(map[string]map[string]interface{})
|
||||||
|
|
||||||
|
for us, conf := range usConfigs {
|
||||||
|
thisReply.UpstreamConfigs[us.ID] = conf
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
thisReply.UpstreamIDConfigs = make(structs.OpaqueUpstreamConfigs, 0, len(usConfigs))
|
||||||
|
|
||||||
|
for us, conf := range usConfigs {
|
||||||
|
thisReply.UpstreamIDConfigs = append(thisReply.UpstreamIDConfigs,
|
||||||
|
structs.OpaqueUpstreamConfig{Upstream: us, Config: conf})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &thisReply, nil
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
package configentry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_ComputeResolvedServiceConfig(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
scReq *structs.ServiceConfigRequest
|
||||||
|
upstreamIDs []structs.ServiceID
|
||||||
|
entries *ResolvedServiceConfigSet
|
||||||
|
}
|
||||||
|
|
||||||
|
sid := structs.ServiceID{
|
||||||
|
ID: "sid",
|
||||||
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want *structs.ServiceConfigResponse
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "proxy with maxinboundsconnections",
|
||||||
|
args: args{
|
||||||
|
scReq: &structs.ServiceConfigRequest{
|
||||||
|
Name: "sid",
|
||||||
|
},
|
||||||
|
entries: &ResolvedServiceConfigSet{
|
||||||
|
ServiceDefaults: map[structs.ServiceID]*structs.ServiceConfigEntry{
|
||||||
|
sid: {
|
||||||
|
MaxInboundConnections: 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: &structs.ServiceConfigResponse{
|
||||||
|
ProxyConfig: map[string]interface{}{
|
||||||
|
"max_inbound_connections": 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := ComputeResolvedServiceConfig(tt.args.scReq, tt.args.upstreamIDs,
|
||||||
|
false, tt.args.entries, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tt.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -24,6 +24,8 @@ var (
|
||||||
`^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`)
|
`^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`)
|
||||||
spiffeIDAgentRegexp = regexp.MustCompile(
|
spiffeIDAgentRegexp = regexp.MustCompile(
|
||||||
`^(?:/ap/([^/]+))?/agent/client/dc/([^/]+)/id/([^/]+)$`)
|
`^(?:/ap/([^/]+))?/agent/client/dc/([^/]+)/id/([^/]+)$`)
|
||||||
|
spiffeIDServerRegexp = regexp.MustCompile(
|
||||||
|
`^/agent/server/dc/([^/]+)$`)
|
||||||
spiffeIDMeshGatewayRegexp = regexp.MustCompile(
|
spiffeIDMeshGatewayRegexp = regexp.MustCompile(
|
||||||
`^(?:/ap/([^/]+))?/gateway/mesh/dc/([^/]+)$`)
|
`^(?:/ap/([^/]+))?/gateway/mesh/dc/([^/]+)$`)
|
||||||
)
|
)
|
||||||
|
@ -144,6 +146,19 @@ func ParseCertURI(input *url.URL) (CertURI, error) {
|
||||||
Partition: ap,
|
Partition: ap,
|
||||||
Datacenter: dc,
|
Datacenter: dc,
|
||||||
}, nil
|
}, nil
|
||||||
|
} else if v := spiffeIDServerRegexp.FindStringSubmatch(path); v != nil {
|
||||||
|
dc := v[1]
|
||||||
|
if input.RawPath != "" {
|
||||||
|
var err error
|
||||||
|
if dc, err = url.PathUnescape(v[1]); err != nil {
|
||||||
|
return nil, fmt.Errorf("Invalid datacenter: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SpiffeIDServer{
|
||||||
|
Host: input.Host,
|
||||||
|
Datacenter: dc,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test for signing ID
|
// Test for signing ID
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
package connect
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SpiffeIDServer struct {
|
||||||
|
Host string
|
||||||
|
Datacenter string
|
||||||
|
}
|
||||||
|
|
||||||
|
// URI returns the *url.URL for this SPIFFE ID.
|
||||||
|
func (id SpiffeIDServer) URI() *url.URL {
|
||||||
|
var result url.URL
|
||||||
|
result.Scheme = "spiffe"
|
||||||
|
result.Host = id.Host
|
||||||
|
result.Path = fmt.Sprintf("/agent/server/dc/%s", id.Datacenter)
|
||||||
|
return &result
|
||||||
|
}
|
|
@ -54,6 +54,12 @@ func (id SpiffeIDSigning) CanSign(cu CertURI) bool {
|
||||||
// worry about Unicode domains if we start allowing customisation beyond the
|
// worry about Unicode domains if we start allowing customisation beyond the
|
||||||
// built-in cluster ids.
|
// built-in cluster ids.
|
||||||
return strings.ToLower(other.Host) == id.Host()
|
return strings.ToLower(other.Host) == id.Host()
|
||||||
|
case *SpiffeIDServer:
|
||||||
|
// The host component of the service must be an exact match for now under
|
||||||
|
// ascii case folding (since hostnames are case-insensitive). Later we might
|
||||||
|
// worry about Unicode domains if we start allowing customisation beyond the
|
||||||
|
// built-in cluster ids.
|
||||||
|
return strings.ToLower(other.Host) == id.Host()
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,7 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
|
||||||
want: true,
|
want: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "service - good midex case",
|
name: "service - good mixed case",
|
||||||
id: testSigning,
|
id: testSigning,
|
||||||
input: &SpiffeIDService{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Namespace: "defAUlt", Datacenter: "dc1", Service: "WEB"},
|
input: &SpiffeIDService{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Namespace: "defAUlt", Datacenter: "dc1", Service: "WEB"},
|
||||||
want: true,
|
want: true,
|
||||||
|
@ -102,7 +102,7 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
|
||||||
want: true,
|
want: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mesh gateway - good midex case",
|
name: "mesh gateway - good mixed case",
|
||||||
id: testSigning,
|
id: testSigning,
|
||||||
input: &SpiffeIDMeshGateway{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Datacenter: "dc1"},
|
input: &SpiffeIDMeshGateway{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Datacenter: "dc1"},
|
||||||
want: true,
|
want: true,
|
||||||
|
@ -119,6 +119,30 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
|
||||||
input: &SpiffeIDMeshGateway{Host: TestClusterID + ".fake", Datacenter: "dc1"},
|
input: &SpiffeIDMeshGateway{Host: TestClusterID + ".fake", Datacenter: "dc1"},
|
||||||
want: false,
|
want: false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "server - good",
|
||||||
|
id: testSigning,
|
||||||
|
input: &SpiffeIDServer{Host: TestClusterID + ".consul", Datacenter: "dc1"},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "server - good mixed case",
|
||||||
|
id: testSigning,
|
||||||
|
input: &SpiffeIDServer{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Datacenter: "dc1"},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "server - different cluster",
|
||||||
|
id: testSigning,
|
||||||
|
input: &SpiffeIDServer{Host: "55555555-4444-3333-2222-111111111111.consul", Datacenter: "dc1"},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "server - different TLD",
|
||||||
|
id: testSigning,
|
||||||
|
input: &SpiffeIDServer{Host: TestClusterID + ".fake", Datacenter: "dc1"},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
|
|
@ -19,109 +19,118 @@ func TestParseCertURIFromString(t *testing.T) {
|
||||||
ParseError string
|
ParseError string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"invalid scheme",
|
Name: "invalid scheme",
|
||||||
"http://google.com/",
|
URI: "http://google.com/",
|
||||||
nil,
|
Struct: nil,
|
||||||
"scheme",
|
ParseError: "scheme",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"basic service ID",
|
Name: "basic service ID",
|
||||||
"spiffe://1234.consul/ns/default/dc/dc01/svc/web",
|
URI: "spiffe://1234.consul/ns/default/dc/dc01/svc/web",
|
||||||
&SpiffeIDService{
|
Struct: &SpiffeIDService{
|
||||||
Host: "1234.consul",
|
Host: "1234.consul",
|
||||||
Partition: defaultEntMeta.PartitionOrDefault(),
|
Partition: defaultEntMeta.PartitionOrDefault(),
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
Datacenter: "dc01",
|
Datacenter: "dc01",
|
||||||
Service: "web",
|
Service: "web",
|
||||||
},
|
},
|
||||||
"",
|
ParseError: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"basic service ID with partition",
|
Name: "basic service ID with partition",
|
||||||
"spiffe://1234.consul/ap/bizdev/ns/default/dc/dc01/svc/web",
|
URI: "spiffe://1234.consul/ap/bizdev/ns/default/dc/dc01/svc/web",
|
||||||
&SpiffeIDService{
|
Struct: &SpiffeIDService{
|
||||||
Host: "1234.consul",
|
Host: "1234.consul",
|
||||||
Partition: "bizdev",
|
Partition: "bizdev",
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
Datacenter: "dc01",
|
Datacenter: "dc01",
|
||||||
Service: "web",
|
Service: "web",
|
||||||
},
|
},
|
||||||
"",
|
ParseError: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"basic agent ID",
|
Name: "basic agent ID",
|
||||||
"spiffe://1234.consul/agent/client/dc/dc1/id/uuid",
|
URI: "spiffe://1234.consul/agent/client/dc/dc1/id/uuid",
|
||||||
&SpiffeIDAgent{
|
Struct: &SpiffeIDAgent{
|
||||||
Host: "1234.consul",
|
Host: "1234.consul",
|
||||||
Partition: defaultEntMeta.PartitionOrDefault(),
|
Partition: defaultEntMeta.PartitionOrDefault(),
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
Agent: "uuid",
|
Agent: "uuid",
|
||||||
},
|
},
|
||||||
"",
|
ParseError: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"basic agent ID with partition",
|
Name: "basic agent ID with partition",
|
||||||
"spiffe://1234.consul/ap/bizdev/agent/client/dc/dc1/id/uuid",
|
URI: "spiffe://1234.consul/ap/bizdev/agent/client/dc/dc1/id/uuid",
|
||||||
&SpiffeIDAgent{
|
Struct: &SpiffeIDAgent{
|
||||||
Host: "1234.consul",
|
Host: "1234.consul",
|
||||||
Partition: "bizdev",
|
Partition: "bizdev",
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
Agent: "uuid",
|
Agent: "uuid",
|
||||||
},
|
},
|
||||||
"",
|
ParseError: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"mesh-gateway with no partition",
|
Name: "basic server",
|
||||||
"spiffe://1234.consul/gateway/mesh/dc/dc1",
|
URI: "spiffe://1234.consul/agent/server/dc/dc1",
|
||||||
&SpiffeIDMeshGateway{
|
Struct: &SpiffeIDServer{
|
||||||
|
Host: "1234.consul",
|
||||||
|
Datacenter: "dc1",
|
||||||
|
},
|
||||||
|
ParseError: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "mesh-gateway with no partition",
|
||||||
|
URI: "spiffe://1234.consul/gateway/mesh/dc/dc1",
|
||||||
|
Struct: &SpiffeIDMeshGateway{
|
||||||
Host: "1234.consul",
|
Host: "1234.consul",
|
||||||
Partition: "default",
|
Partition: "default",
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
},
|
},
|
||||||
"",
|
ParseError: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"mesh-gateway with partition",
|
Name: "mesh-gateway with partition",
|
||||||
"spiffe://1234.consul/ap/bizdev/gateway/mesh/dc/dc1",
|
URI: "spiffe://1234.consul/ap/bizdev/gateway/mesh/dc/dc1",
|
||||||
&SpiffeIDMeshGateway{
|
Struct: &SpiffeIDMeshGateway{
|
||||||
Host: "1234.consul",
|
Host: "1234.consul",
|
||||||
Partition: "bizdev",
|
Partition: "bizdev",
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
},
|
},
|
||||||
"",
|
ParseError: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"service with URL-encoded values",
|
Name: "service with URL-encoded values",
|
||||||
"spiffe://1234.consul/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
|
URI: "spiffe://1234.consul/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
|
||||||
&SpiffeIDService{
|
Struct: &SpiffeIDService{
|
||||||
Host: "1234.consul",
|
Host: "1234.consul",
|
||||||
Partition: defaultEntMeta.PartitionOrDefault(),
|
Partition: defaultEntMeta.PartitionOrDefault(),
|
||||||
Namespace: "foo/bar",
|
Namespace: "foo/bar",
|
||||||
Datacenter: "bar/baz",
|
Datacenter: "bar/baz",
|
||||||
Service: "baz/qux",
|
Service: "baz/qux",
|
||||||
},
|
},
|
||||||
"",
|
ParseError: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"service with URL-encoded values with partition",
|
Name: "service with URL-encoded values with partition",
|
||||||
"spiffe://1234.consul/ap/biz%2Fdev/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
|
URI: "spiffe://1234.consul/ap/biz%2Fdev/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
|
||||||
&SpiffeIDService{
|
Struct: &SpiffeIDService{
|
||||||
Host: "1234.consul",
|
Host: "1234.consul",
|
||||||
Partition: "biz/dev",
|
Partition: "biz/dev",
|
||||||
Namespace: "foo/bar",
|
Namespace: "foo/bar",
|
||||||
Datacenter: "bar/baz",
|
Datacenter: "bar/baz",
|
||||||
Service: "baz/qux",
|
Service: "baz/qux",
|
||||||
},
|
},
|
||||||
"",
|
ParseError: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"signing ID",
|
Name: "signing ID",
|
||||||
"spiffe://1234.consul",
|
URI: "spiffe://1234.consul",
|
||||||
&SpiffeIDSigning{
|
Struct: &SpiffeIDSigning{
|
||||||
ClusterID: "1234",
|
ClusterID: "1234",
|
||||||
Domain: "consul",
|
Domain: "consul",
|
||||||
},
|
},
|
||||||
"",
|
ParseError: "",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,3 +148,12 @@ func TestParseCertURIFromString(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSpiffeIDServer_URI(t *testing.T) {
|
||||||
|
srv := &SpiffeIDServer{
|
||||||
|
Host: "1234.consul",
|
||||||
|
Datacenter: "dc1",
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, "spiffe://1234.consul/agent/server/dc/dc1", srv.URI().String())
|
||||||
|
}
|
||||||
|
|
|
@ -4,6 +4,8 @@ package autopilotevents
|
||||||
|
|
||||||
import (
|
import (
|
||||||
acl "github.com/hashicorp/consul/acl"
|
acl "github.com/hashicorp/consul/acl"
|
||||||
|
memdb "github.com/hashicorp/go-memdb"
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
|
||||||
structs "github.com/hashicorp/consul/agent/structs"
|
structs "github.com/hashicorp/consul/agent/structs"
|
||||||
|
@ -48,6 +50,36 @@ func (_m *MockStateStore) GetNodeID(_a0 types.NodeID, _a1 *acl.EnterpriseMeta, _
|
||||||
return r0, r1, r2
|
return r0, r1, r2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeService provides a mock function with given fields: ws, nodeName, serviceID, entMeta, peerName
|
||||||
|
func (_m *MockStateStore) NodeService(ws memdb.WatchSet, nodeName string, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeService, error) {
|
||||||
|
ret := _m.Called(ws, nodeName, serviceID, entMeta, peerName)
|
||||||
|
|
||||||
|
var r0 uint64
|
||||||
|
if rf, ok := ret.Get(0).(func(memdb.WatchSet, string, string, *acl.EnterpriseMeta, string) uint64); ok {
|
||||||
|
r0 = rf(ws, nodeName, serviceID, entMeta, peerName)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Get(0).(uint64)
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 *structs.NodeService
|
||||||
|
if rf, ok := ret.Get(1).(func(memdb.WatchSet, string, string, *acl.EnterpriseMeta, string) *structs.NodeService); ok {
|
||||||
|
r1 = rf(ws, nodeName, serviceID, entMeta, peerName)
|
||||||
|
} else {
|
||||||
|
if ret.Get(1) != nil {
|
||||||
|
r1 = ret.Get(1).(*structs.NodeService)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r2 error
|
||||||
|
if rf, ok := ret.Get(2).(func(memdb.WatchSet, string, string, *acl.EnterpriseMeta, string) error); ok {
|
||||||
|
r2 = rf(ws, nodeName, serviceID, entMeta, peerName)
|
||||||
|
} else {
|
||||||
|
r2 = ret.Error(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1, r2
|
||||||
|
}
|
||||||
|
|
||||||
// NewMockStateStore creates a new instance of MockStateStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
// NewMockStateStore creates a new instance of MockStateStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
func NewMockStateStore(t testing.TB) *MockStateStore {
|
func NewMockStateStore(t testing.TB) *MockStateStore {
|
||||||
mock := &MockStateStore{}
|
mock := &MockStateStore{}
|
||||||
|
|
|
@ -4,9 +4,11 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-memdb"
|
||||||
autopilot "github.com/hashicorp/raft-autopilot"
|
autopilot "github.com/hashicorp/raft-autopilot"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
@ -26,6 +28,7 @@ type ReadyServerInfo struct {
|
||||||
ID string
|
ID string
|
||||||
Address string
|
Address string
|
||||||
TaggedAddresses map[string]string
|
TaggedAddresses map[string]string
|
||||||
|
ExtGRPCPort int
|
||||||
Version string
|
Version string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,6 +125,7 @@ func NewReadyServersEventPublisher(config Config) *ReadyServersEventPublisher {
|
||||||
//go:generate mockery --name StateStore --inpackage --filename mock_StateStore_test.go
|
//go:generate mockery --name StateStore --inpackage --filename mock_StateStore_test.go
|
||||||
type StateStore interface {
|
type StateStore interface {
|
||||||
GetNodeID(types.NodeID, *acl.EnterpriseMeta, string) (uint64, *structs.Node, error)
|
GetNodeID(types.NodeID, *acl.EnterpriseMeta, string) (uint64, *structs.Node, error)
|
||||||
|
NodeService(ws memdb.WatchSet, nodeName string, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeService, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate mockery --name Publisher --inpackage --filename mock_Publisher_test.go
|
//go:generate mockery --name Publisher --inpackage --filename mock_Publisher_test.go
|
||||||
|
@ -226,6 +230,7 @@ func (r *ReadyServersEventPublisher) autopilotStateToReadyServers(state *autopil
|
||||||
Address: host,
|
Address: host,
|
||||||
Version: srv.Server.Version,
|
Version: srv.Server.Version,
|
||||||
TaggedAddresses: r.getTaggedAddresses(srv),
|
TaggedAddresses: r.getTaggedAddresses(srv),
|
||||||
|
ExtGRPCPort: r.getGRPCPort(srv),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -254,7 +259,7 @@ func (r *ReadyServersEventPublisher) getTaggedAddresses(srv *autopilot.ServerSta
|
||||||
// code and reason about and having those addresses be updated within 30s is good enough.
|
// code and reason about and having those addresses be updated within 30s is good enough.
|
||||||
_, node, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
|
_, node, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
|
||||||
if err != nil || node == nil {
|
if err != nil || node == nil {
|
||||||
// no catalog information means we should return a nil addres map
|
// no catalog information means we should return a nil address map
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -276,6 +281,38 @@ func (r *ReadyServersEventPublisher) getTaggedAddresses(srv *autopilot.ServerSta
|
||||||
return addrs
|
return addrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getGRPCPort will get the external gRPC port for a Consul server.
|
||||||
|
// Returns 0 if there is none assigned or if an error is encountered.
|
||||||
|
func (r *ReadyServersEventPublisher) getGRPCPort(srv *autopilot.ServerState) int {
|
||||||
|
if r.GetStore == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
_, n, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
|
||||||
|
if err != nil || n == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ns, err := r.GetStore().NodeService(
|
||||||
|
nil,
|
||||||
|
n.Node,
|
||||||
|
structs.ConsulServiceID,
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
|
)
|
||||||
|
if err != nil || ns == nil || ns.Meta == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if str, ok := ns.Meta["grpc_port"]; ok {
|
||||||
|
grpcPort, err := strconv.Atoi(str)
|
||||||
|
if err == nil {
|
||||||
|
return grpcPort
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
// newReadyServersEvent will create a stream.Event with the provided ready server info.
|
// newReadyServersEvent will create a stream.Event with the provided ready server info.
|
||||||
func (r *ReadyServersEventPublisher) newReadyServersEvent(servers EventPayloadReadyServers) stream.Event {
|
func (r *ReadyServersEventPublisher) newReadyServersEvent(servers EventPayloadReadyServers) stream.Event {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
time "time"
|
time "time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-memdb"
|
||||||
"github.com/hashicorp/raft"
|
"github.com/hashicorp/raft"
|
||||||
autopilot "github.com/hashicorp/raft-autopilot"
|
autopilot "github.com/hashicorp/raft-autopilot"
|
||||||
mock "github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
@ -164,9 +165,21 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) {
|
||||||
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
|
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
|
||||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
structs.DefaultPeerKeyword,
|
structs.DefaultPeerKeyword,
|
||||||
|
).Times(2).Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.Node{Node: "node-1", TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("NodeService",
|
||||||
|
memdb.WatchSet(nil),
|
||||||
|
"node-1",
|
||||||
|
structs.ConsulServiceID,
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
).Once().Return(
|
).Once().Return(
|
||||||
uint64(0),
|
uint64(0),
|
||||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
|
nil,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -174,9 +187,21 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) {
|
||||||
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
|
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
|
||||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
structs.DefaultPeerKeyword,
|
structs.DefaultPeerKeyword,
|
||||||
|
).Times(2).Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.Node{Node: "node-2", TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("NodeService",
|
||||||
|
memdb.WatchSet(nil),
|
||||||
|
"node-2",
|
||||||
|
structs.ConsulServiceID,
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
).Once().Return(
|
).Once().Return(
|
||||||
uint64(0),
|
uint64(0),
|
||||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
|
nil,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -184,9 +209,119 @@ func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) {
|
||||||
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
|
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
|
||||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
structs.DefaultPeerKeyword,
|
structs.DefaultPeerKeyword,
|
||||||
|
).Times(2).Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.Node{Node: "node-3", TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("NodeService",
|
||||||
|
memdb.WatchSet(nil),
|
||||||
|
"node-3",
|
||||||
|
structs.ConsulServiceID,
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
).Once().Return(
|
).Once().Return(
|
||||||
uint64(0),
|
uint64(0),
|
||||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
|
nil,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
r := NewReadyServersEventPublisher(Config{
|
||||||
|
GetStore: func() StateStore { return store },
|
||||||
|
})
|
||||||
|
|
||||||
|
actual := r.autopilotStateToReadyServers(exampleState)
|
||||||
|
require.ElementsMatch(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutopilotStateToReadyServersWithExtGRPCPort(t *testing.T) {
|
||||||
|
expected := EventPayloadReadyServers{
|
||||||
|
{
|
||||||
|
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||||
|
Address: "198.18.0.2",
|
||||||
|
ExtGRPCPort: 1234,
|
||||||
|
Version: "v1.12.0",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||||
|
Address: "198.18.0.3",
|
||||||
|
ExtGRPCPort: 2345,
|
||||||
|
Version: "v1.12.0",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||||
|
Address: "198.18.0.4",
|
||||||
|
ExtGRPCPort: 3456,
|
||||||
|
Version: "v1.12.0",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
store := &MockStateStore{}
|
||||||
|
t.Cleanup(func() { store.AssertExpectations(t) })
|
||||||
|
store.On("GetNodeID",
|
||||||
|
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
|
).Times(2).Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.Node{Node: "node-1"},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("NodeService",
|
||||||
|
memdb.WatchSet(nil),
|
||||||
|
"node-1",
|
||||||
|
structs.ConsulServiceID,
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
|
).Once().Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.NodeService{Meta: map[string]string{"grpc_port": "1234"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("GetNodeID",
|
||||||
|
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
|
).Times(2).Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.Node{Node: "node-2"},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("NodeService",
|
||||||
|
memdb.WatchSet(nil),
|
||||||
|
"node-2",
|
||||||
|
structs.ConsulServiceID,
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
|
).Once().Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.NodeService{Meta: map[string]string{"grpc_port": "2345"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("GetNodeID",
|
||||||
|
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
|
).Times(2).Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.Node{Node: "node-3"},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("NodeService",
|
||||||
|
memdb.WatchSet(nil),
|
||||||
|
"node-3",
|
||||||
|
structs.ConsulServiceID,
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
|
).Once().Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.NodeService{Meta: map[string]string{"grpc_port": "3456"}},
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -493,9 +628,21 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) {
|
||||||
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
|
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
|
||||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
structs.DefaultPeerKeyword,
|
structs.DefaultPeerKeyword,
|
||||||
|
).Times(2).Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.Node{Node: "node-1", TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("NodeService",
|
||||||
|
memdb.WatchSet(nil),
|
||||||
|
"node-1",
|
||||||
|
structs.ConsulServiceID,
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
).Once().Return(
|
).Once().Return(
|
||||||
uint64(0),
|
uint64(0),
|
||||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
|
nil,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -503,9 +650,21 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) {
|
||||||
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
|
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
|
||||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
structs.DefaultPeerKeyword,
|
structs.DefaultPeerKeyword,
|
||||||
|
).Times(2).Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.Node{Node: "node-2", TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("NodeService",
|
||||||
|
memdb.WatchSet(nil),
|
||||||
|
"node-2",
|
||||||
|
structs.ConsulServiceID,
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
).Once().Return(
|
).Once().Return(
|
||||||
uint64(0),
|
uint64(0),
|
||||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
|
nil,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -513,9 +672,21 @@ func TestReadyServerEventsSnapshotHandler(t *testing.T) {
|
||||||
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
|
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
|
||||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
structs.DefaultPeerKeyword,
|
structs.DefaultPeerKeyword,
|
||||||
|
).Times(2).Return(
|
||||||
|
uint64(0),
|
||||||
|
&structs.Node{Node: "node-3", TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
store.On("NodeService",
|
||||||
|
memdb.WatchSet(nil),
|
||||||
|
"node-3",
|
||||||
|
structs.ConsulServiceID,
|
||||||
|
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||||
|
structs.DefaultPeerKeyword,
|
||||||
).Once().Return(
|
).Once().Return(
|
||||||
uint64(0),
|
uint64(0),
|
||||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
|
nil,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/agent/configentry"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
)
|
)
|
||||||
|
@ -510,7 +511,7 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
||||||
ranOnce = true
|
ranOnce = true
|
||||||
}
|
}
|
||||||
|
|
||||||
thisReply, err := computeResolvedServiceConfig(
|
thisReply, err := configentry.ComputeResolvedServiceConfig(
|
||||||
args,
|
args,
|
||||||
upstreamIDs,
|
upstreamIDs,
|
||||||
legacyUpstreams,
|
legacyUpstreams,
|
||||||
|
|
|
@ -1401,6 +1401,7 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
|
||||||
PassiveHealthCheck: &structs.PassiveHealthCheck{
|
PassiveHealthCheck: &structs.PassiveHealthCheck{
|
||||||
Interval: 10,
|
Interval: 10,
|
||||||
MaxFailures: 2,
|
MaxFailures: 2,
|
||||||
|
EnforcingConsecutive5xx: uintPointer(60),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Overrides: []*structs.UpstreamConfig{
|
Overrides: []*structs.UpstreamConfig{
|
||||||
|
@ -1434,6 +1435,7 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
|
||||||
"passive_health_check": map[string]interface{}{
|
"passive_health_check": map[string]interface{}{
|
||||||
"Interval": int64(10),
|
"Interval": int64(10),
|
||||||
"MaxFailures": int64(2),
|
"MaxFailures": int64(2),
|
||||||
|
"EnforcingConsecutive5xx": int64(60),
|
||||||
},
|
},
|
||||||
"mesh_gateway": map[string]interface{}{
|
"mesh_gateway": map[string]interface{}{
|
||||||
"Mode": "remote",
|
"Mode": "remote",
|
||||||
|
@ -1447,6 +1449,7 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
|
||||||
"passive_health_check": map[string]interface{}{
|
"passive_health_check": map[string]interface{}{
|
||||||
"Interval": int64(10),
|
"Interval": int64(10),
|
||||||
"MaxFailures": int64(2),
|
"MaxFailures": int64(2),
|
||||||
|
"EnforcingConsecutive5xx": int64(60),
|
||||||
},
|
},
|
||||||
"mesh_gateway": map[string]interface{}{
|
"mesh_gateway": map[string]interface{}{
|
||||||
"Mode": "local",
|
"Mode": "local",
|
||||||
|
@ -2507,3 +2510,7 @@ func Test_gateWriteToSecondary_AllowedKinds(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func uintPointer(v uint32) *uint32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
|
@ -39,6 +39,7 @@ func TestCompile(t *testing.T) {
|
||||||
"service redirect": testcase_ServiceRedirect(),
|
"service redirect": testcase_ServiceRedirect(),
|
||||||
"service and subset redirect": testcase_ServiceAndSubsetRedirect(),
|
"service and subset redirect": testcase_ServiceAndSubsetRedirect(),
|
||||||
"datacenter redirect": testcase_DatacenterRedirect(),
|
"datacenter redirect": testcase_DatacenterRedirect(),
|
||||||
|
"redirect to cluster peer": testcase_PeerRedirect(),
|
||||||
"datacenter redirect with mesh gateways": testcase_DatacenterRedirect_WithMeshGateways(),
|
"datacenter redirect with mesh gateways": testcase_DatacenterRedirect_WithMeshGateways(),
|
||||||
"service failover": testcase_ServiceFailover(),
|
"service failover": testcase_ServiceFailover(),
|
||||||
"service failover through redirect": testcase_ServiceFailoverThroughRedirect(),
|
"service failover through redirect": testcase_ServiceFailoverThroughRedirect(),
|
||||||
|
@ -1084,6 +1085,47 @@ func testcase_DatacenterRedirect() compileTestCase {
|
||||||
return compileTestCase{entries: entries, expect: expect}
|
return compileTestCase{entries: entries, expect: expect}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testcase_PeerRedirect() compileTestCase {
|
||||||
|
entries := newEntries()
|
||||||
|
entries.AddResolvers(
|
||||||
|
&structs.ServiceResolverConfigEntry{
|
||||||
|
Kind: "service-resolver",
|
||||||
|
Name: "main",
|
||||||
|
Redirect: &structs.ServiceResolverRedirect{
|
||||||
|
Service: "other",
|
||||||
|
Peer: "cluster-01",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
expect := &structs.CompiledDiscoveryChain{
|
||||||
|
Protocol: "tcp",
|
||||||
|
StartNode: "resolver:other.default.default.external.cluster-01",
|
||||||
|
Nodes: map[string]*structs.DiscoveryGraphNode{
|
||||||
|
"resolver:other.default.default.external.cluster-01": {
|
||||||
|
Type: structs.DiscoveryGraphNodeTypeResolver,
|
||||||
|
Name: "other.default.default.external.cluster-01",
|
||||||
|
Resolver: &structs.DiscoveryResolver{
|
||||||
|
Default: true,
|
||||||
|
ConnectTimeout: 5 * time.Second,
|
||||||
|
Target: "other.default.default.external.cluster-01",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Targets: map[string]*structs.DiscoveryTarget{
|
||||||
|
"other.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{
|
||||||
|
Service: "other",
|
||||||
|
Peer: "cluster-01",
|
||||||
|
}, func(t *structs.DiscoveryTarget) {
|
||||||
|
t.SNI = ""
|
||||||
|
t.Name = ""
|
||||||
|
t.Datacenter = ""
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return compileTestCase{entries: entries, expect: expect}
|
||||||
|
}
|
||||||
|
|
||||||
func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase {
|
func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase {
|
||||||
entries := newEntries()
|
entries := newEntries()
|
||||||
entries.AddProxyDefaults(&structs.ProxyConfigEntry{
|
entries.AddProxyDefaults(&structs.ProxyConfigEntry{
|
||||||
|
|
|
@ -153,14 +153,33 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
|
||||||
&args.QueryOptions,
|
&args.QueryOptions,
|
||||||
&reply.QueryMeta,
|
&reply.QueryMeta,
|
||||||
func(ws memdb.WatchSet, state *state.Store) error {
|
func(ws memdb.WatchSet, state *state.Store) error {
|
||||||
// we don't support calling this endpoint for a specific peer
|
|
||||||
if args.PeerName != "" {
|
|
||||||
return fmt.Errorf("this endpoint does not support specifying a peer: %q", args.PeerName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// this maxIndex will be the max of the ServiceDump calls and the PeeringList call
|
// this maxIndex will be the max of the ServiceDump calls and the PeeringList call
|
||||||
var maxIndex uint64
|
var maxIndex uint64
|
||||||
|
|
||||||
|
// If PeerName is not empty, we return only the imported services from that peer
|
||||||
|
if args.PeerName != "" {
|
||||||
|
// get a local dump for services
|
||||||
|
index, nodes, err := state.ServiceDump(ws,
|
||||||
|
args.ServiceKind,
|
||||||
|
args.UseServiceKind,
|
||||||
|
// Note we fetch imported services with wildcard namespace because imported services' namespaces
|
||||||
|
// are in a different locality; regardless of our local namespace, we return all imported services
|
||||||
|
// of the local partition.
|
||||||
|
args.EnterpriseMeta.WithWildcardNamespace(),
|
||||||
|
args.PeerName)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get a service dump for peer %q: %w", args.PeerName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if index > maxIndex {
|
||||||
|
maxIndex = index
|
||||||
|
}
|
||||||
|
reply.Index = maxIndex
|
||||||
|
reply.ImportedNodes = nodes
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// otherwise return both local and all imported services
|
||||||
|
|
||||||
// get a local dump for services
|
// get a local dump for services
|
||||||
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
|
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -183,7 +202,10 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, p := range listedPeerings {
|
for _, p := range listedPeerings {
|
||||||
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, p.Name)
|
// Note we fetch imported services with wildcard namespace because imported services' namespaces
|
||||||
|
// are in a different locality; regardless of our local namespace, we return all imported services
|
||||||
|
// of the local partition.
|
||||||
|
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, args.EnterpriseMeta.WithWildcardNamespace(), p.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
|
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
|
||||||
}
|
}
|
||||||
|
@ -211,6 +233,7 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
|
||||||
return fmt.Errorf("could not filter local service dump: %w", err)
|
return fmt.Errorf("could not filter local service dump: %w", err)
|
||||||
}
|
}
|
||||||
reply.Nodes = raw.(structs.CheckServiceNodes)
|
reply.Nodes = raw.(structs.CheckServiceNodes)
|
||||||
|
}
|
||||||
|
|
||||||
importedRaw, err := filter.Execute(reply.ImportedNodes)
|
importedRaw, err := filter.Execute(reply.ImportedNodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -49,7 +49,7 @@ func kvsPreApply(logger hclog.Logger, srv *Server, authz resolver.Result, op api
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
case api.KVGet, api.KVGetTree:
|
case api.KVGet, api.KVGetTree, api.KVGetOrEmpty:
|
||||||
// Filtering for GETs is done on the output side.
|
// Filtering for GETs is done on the output side.
|
||||||
|
|
||||||
case api.KVCheckSession, api.KVCheckIndex:
|
case api.KVCheckSession, api.KVCheckIndex:
|
||||||
|
|
|
@ -1098,11 +1098,36 @@ func setLeafSigningCert(caRoot *structs.CARoot, pem string) error {
|
||||||
return fmt.Errorf("error parsing leaf signing cert: %w", err)
|
return fmt.Errorf("error parsing leaf signing cert: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := pruneExpiredIntermediates(caRoot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem)
|
caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem)
|
||||||
caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId)
|
caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// pruneExpiredIntermediates removes expired intermediate certificates
|
||||||
|
// from the given CARoot.
|
||||||
|
func pruneExpiredIntermediates(caRoot *structs.CARoot) error {
|
||||||
|
var newIntermediates []string
|
||||||
|
now := time.Now()
|
||||||
|
for _, intermediatePEM := range caRoot.IntermediateCerts {
|
||||||
|
cert, err := connect.ParseCert(intermediatePEM)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing leaf signing cert: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only keep the intermediate cert if it's still valid.
|
||||||
|
if cert.NotAfter.After(now) {
|
||||||
|
newIntermediates = append(newIntermediates, intermediatePEM)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
caRoot.IntermediateCerts = newIntermediates
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// runRenewIntermediate periodically attempts to renew the intermediate cert.
|
// runRenewIntermediate periodically attempts to renew the intermediate cert.
|
||||||
func (c *CAManager) runRenewIntermediate(ctx context.Context) error {
|
func (c *CAManager) runRenewIntermediate(ctx context.Context) error {
|
||||||
isPrimary := c.serverConf.Datacenter == c.serverConf.PrimaryDatacenter
|
isPrimary := c.serverConf.Datacenter == c.serverConf.PrimaryDatacenter
|
||||||
|
@ -1426,6 +1451,19 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au
|
||||||
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+
|
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+
|
||||||
"we are %s", v.Datacenter, dc)
|
"we are %s", v.Datacenter, dc)
|
||||||
}
|
}
|
||||||
|
case *connect.SpiffeIDServer:
|
||||||
|
// The authorizer passed in should have unlimited permissions.
|
||||||
|
if err := allow.ACLWriteAllowed(&authzContext); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the DC in the URI matches us.
|
||||||
|
// The request must have been issued by a local server.
|
||||||
|
dc := c.serverConf.Datacenter
|
||||||
|
if v.Datacenter != dc {
|
||||||
|
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+
|
||||||
|
"we are %s", v.Datacenter, dc)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service or agent ID")
|
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service or agent ID")
|
||||||
}
|
}
|
||||||
|
@ -1447,9 +1485,11 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
signingID := connect.SpiffeIDSigningForCluster(config.ClusterID)
|
signingID := connect.SpiffeIDSigningForCluster(config.ClusterID)
|
||||||
serviceID, isService := spiffeID.(*connect.SpiffeIDService)
|
serviceID, isService := spiffeID.(*connect.SpiffeIDService)
|
||||||
agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent)
|
agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent)
|
||||||
|
serverID, isServer := spiffeID.(*connect.SpiffeIDServer)
|
||||||
mgwID, isMeshGateway := spiffeID.(*connect.SpiffeIDMeshGateway)
|
mgwID, isMeshGateway := spiffeID.(*connect.SpiffeIDMeshGateway)
|
||||||
|
|
||||||
var entMeta acl.EnterpriseMeta
|
var entMeta acl.EnterpriseMeta
|
||||||
|
@ -1468,6 +1508,12 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
|
||||||
}
|
}
|
||||||
entMeta.Merge(mgwID.GetEnterpriseMeta())
|
entMeta.Merge(mgwID.GetEnterpriseMeta())
|
||||||
|
|
||||||
|
case isServer:
|
||||||
|
if !signingID.CanSign(spiffeID) {
|
||||||
|
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+
|
||||||
|
"we are %s", serverID.Host, signingID.Host())
|
||||||
|
}
|
||||||
|
entMeta.Normalize()
|
||||||
case isAgent:
|
case isAgent:
|
||||||
// isAgent - if we support more ID types then this would need to be an else if
|
// isAgent - if we support more ID types then this would need to be an else if
|
||||||
// here we are just automatically fixing the trust domain. For auto-encrypt and
|
// here we are just automatically fixing the trust domain. For auto-encrypt and
|
||||||
|
@ -1494,7 +1540,7 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
|
||||||
entMeta.Merge(agentID.GetEnterpriseMeta())
|
entMeta.Merge(agentID.GetEnterpriseMeta())
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service, agent, or mesh gateway ID")
|
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service, agent, server, or mesh gateway ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
commonCfg, err := config.GetCommonConfig()
|
commonCfg, err := config.GetCommonConfig()
|
||||||
|
@ -1583,6 +1629,8 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
|
||||||
case isAgent:
|
case isAgent:
|
||||||
reply.Agent = agentID.Agent
|
reply.Agent = agentID.Agent
|
||||||
reply.AgentURI = cert.URIs[0].String()
|
reply.AgentURI = cert.URIs[0].String()
|
||||||
|
case isServer:
|
||||||
|
reply.ServerURI = cert.URIs[0].String()
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("not possible")
|
return nil, errors.New("not possible")
|
||||||
}
|
}
|
||||||
|
|
|
@ -435,7 +435,6 @@ func TestCAManager_SignCertificate_WithExpiredCert(t *testing.T) {
|
||||||
errorMsg string
|
errorMsg string
|
||||||
}{
|
}{
|
||||||
{"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
|
{"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
|
||||||
{"intermediate expired", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), true, "intermediate expired: certificate expired, expiration date"},
|
|
||||||
{"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"},
|
{"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"},
|
||||||
// a cert that is not yet valid is ok, assume it will be valid soon enough
|
// a cert that is not yet valid is ok, assume it will be valid soon enough
|
||||||
{"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""},
|
{"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""},
|
||||||
|
@ -1043,3 +1042,43 @@ func setupPrimaryCA(t *testing.T, client *vaultapi.Client, path string, rootPEM
|
||||||
require.NoError(t, err, "failed to set signed intermediate")
|
require.NoError(t, err, "failed to set signed intermediate")
|
||||||
return lib.EnsureTrailingNewline(buf.String())
|
return lib.EnsureTrailingNewline(buf.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCAManager_Sign_SpiffeIDServer(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, s1 := testServerWithConfig(t)
|
||||||
|
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
|
||||||
|
|
||||||
|
codec := rpcClient(t, s1)
|
||||||
|
roots := structs.IndexedCARoots{}
|
||||||
|
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
|
||||||
|
require.NoError(r, err)
|
||||||
|
require.Len(r, roots.Roots, 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
pk, _, err := connect.GeneratePrivateKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Request a leaf certificate for a server.
|
||||||
|
spiffeID := &connect.SpiffeIDServer{
|
||||||
|
Host: roots.TrustDomain,
|
||||||
|
Datacenter: "dc1",
|
||||||
|
}
|
||||||
|
csr, err := connect.CreateCSR(spiffeID, pk, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req := structs.CASignRequest{CSR: csr}
|
||||||
|
cert := structs.IssuedCert{}
|
||||||
|
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify the chain of trust.
|
||||||
|
verifyLeafCert(t, roots.Roots[0], cert.CertPEM)
|
||||||
|
|
||||||
|
// Verify the Server's URI.
|
||||||
|
require.Equal(t, fmt.Sprintf("spiffe://%s/agent/server/dc/dc1", roots.TrustDomain), cert.ServerURI)
|
||||||
|
}
|
||||||
|
|
|
@ -401,6 +401,18 @@ func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) {
|
||||||
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
|
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
verifyLeafCert(t, activeRoot, cert.CertPEM)
|
verifyLeafCert(t, activeRoot, cert.CertPEM)
|
||||||
|
|
||||||
|
// Wait for the primary's old intermediate to be pruned after expiring.
|
||||||
|
oldIntermediate := activeRoot.IntermediateCerts[0]
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
store := s1.caManager.delegate.State()
|
||||||
|
_, storedRoot, err := store.CARootActive(nil)
|
||||||
|
r.Check(err)
|
||||||
|
|
||||||
|
if storedRoot.IntermediateCerts[0] == oldIntermediate {
|
||||||
|
r.Fatal("old intermediate should be gone")
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func patchIntermediateCertRenewInterval(t *testing.T) {
|
func patchIntermediateCertRenewInterval(t *testing.T) {
|
||||||
|
@ -516,6 +528,18 @@ func TestCAManager_RenewIntermediate_Secondary(t *testing.T) {
|
||||||
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
|
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
verifyLeafCert(t, activeRoot, cert.CertPEM)
|
verifyLeafCert(t, activeRoot, cert.CertPEM)
|
||||||
|
|
||||||
|
// Wait for dc2's old intermediate to be pruned after expiring.
|
||||||
|
oldIntermediate := activeRoot.IntermediateCerts[0]
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
store := s2.caManager.delegate.State()
|
||||||
|
_, storedRoot, err := store.CARootActive(nil)
|
||||||
|
r.Check(err)
|
||||||
|
|
||||||
|
if storedRoot.IntermediateCerts[0] == oldIntermediate {
|
||||||
|
r.Fatal("old intermediate should be gone")
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
|
func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
|
||||||
|
|
|
@ -112,7 +112,7 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
|
||||||
if status.NeverConnected {
|
if status.NeverConnected {
|
||||||
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
|
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
|
||||||
} else {
|
} else {
|
||||||
healthy := status.IsHealthy()
|
healthy := s.peerStreamServer.Tracker.IsHealthy(status)
|
||||||
healthyInt := 0
|
healthyInt := 0
|
||||||
if healthy {
|
if healthy {
|
||||||
healthyInt = 1
|
healthyInt = 1
|
||||||
|
@ -305,7 +305,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
|
||||||
|
|
||||||
logger.Trace("establishing stream to peer")
|
logger.Trace("establishing stream to peer")
|
||||||
|
|
||||||
streamStatus, err := s.peerStreamTracker.Register(peer.ID)
|
streamStatus, err := s.peerStreamServer.Tracker.Register(peer.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to register stream: %v", err)
|
return fmt.Errorf("failed to register stream: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,6 +40,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
|
||||||
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
|
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
|
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("too slow for testing.Short")
|
t.Skip("too slow for testing.Short")
|
||||||
|
@ -139,6 +140,8 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
|
||||||
deleted := &pbpeering.Peering{
|
deleted := &pbpeering.Peering{
|
||||||
ID: p.Peering.ID,
|
ID: p.Peering.ID,
|
||||||
Name: "my-peer-acceptor",
|
Name: "my-peer-acceptor",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
PeerServerAddresses: p.Peering.PeerServerAddresses,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
}
|
}
|
||||||
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
|
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
|
||||||
|
@ -262,6 +265,7 @@ func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS b
|
||||||
deleted := &pbpeering.Peering{
|
deleted := &pbpeering.Peering{
|
||||||
ID: p.Peering.PeerID,
|
ID: p.Peering.PeerID,
|
||||||
Name: "my-peer-dialer",
|
Name: "my-peer-dialer",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -431,6 +435,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: peerID,
|
ID: peerID,
|
||||||
Name: peerName,
|
Name: peerName,
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
|
@ -1165,6 +1170,7 @@ func TestLeader_Peering_NoDeletionWhenPeeringDisabled(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: peerID,
|
ID: peerID,
|
||||||
Name: peerName,
|
Name: peerName,
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
|
@ -1216,7 +1222,7 @@ func TestLeader_Peering_NoEstablishmentWhenPeeringDisabled(t *testing.T) {
|
||||||
}))
|
}))
|
||||||
|
|
||||||
require.Never(t, func() bool {
|
require.Never(t, func() bool {
|
||||||
_, found := s1.peerStreamTracker.StreamStatus(peerID)
|
_, found := s1.peerStreamServer.StreamStatus(peerID)
|
||||||
return found
|
return found
|
||||||
}, 7*time.Second, 1*time.Second, "peering should not have been established")
|
}, 7*time.Second, 1*time.Second, "peering should not have been established")
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,13 +3,14 @@ package consul
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/configentry"
|
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
memdb "github.com/hashicorp/go-memdb"
|
memdb "github.com/hashicorp/go-memdb"
|
||||||
"github.com/imdario/mergo"
|
"github.com/imdario/mergo"
|
||||||
"github.com/mitchellh/copystructure"
|
"github.com/mitchellh/copystructure"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/configentry"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// mergeNodeServiceWithCentralConfig merges a service instance (NodeService) with the
|
// mergeNodeServiceWithCentralConfig merges a service instance (NodeService) with the
|
||||||
|
@ -66,7 +67,7 @@ func mergeNodeServiceWithCentralConfig(
|
||||||
ns.ID, err)
|
ns.ID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defaults, err := computeResolvedServiceConfig(
|
defaults, err := configentry.ComputeResolvedServiceConfig(
|
||||||
configReq,
|
configReq,
|
||||||
upstreams,
|
upstreams,
|
||||||
false,
|
false,
|
||||||
|
@ -87,225 +88,6 @@ func mergeNodeServiceWithCentralConfig(
|
||||||
return cfgIndex, mergedns, nil
|
return cfgIndex, mergedns, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func computeResolvedServiceConfig(
|
|
||||||
args *structs.ServiceConfigRequest,
|
|
||||||
upstreamIDs []structs.ServiceID,
|
|
||||||
legacyUpstreams bool,
|
|
||||||
entries *configentry.ResolvedServiceConfigSet,
|
|
||||||
logger hclog.Logger,
|
|
||||||
) (*structs.ServiceConfigResponse, error) {
|
|
||||||
var thisReply structs.ServiceConfigResponse
|
|
||||||
|
|
||||||
thisReply.MeshGateway.Mode = structs.MeshGatewayModeDefault
|
|
||||||
|
|
||||||
// TODO(freddy) Refactor this into smaller set of state store functions
|
|
||||||
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
|
|
||||||
// blocking query, this function will be rerun and these state store lookups will both be current.
|
|
||||||
// We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
|
|
||||||
var proxyConfGlobalProtocol string
|
|
||||||
proxyConf := entries.GetProxyDefaults(args.PartitionOrDefault())
|
|
||||||
if proxyConf != nil {
|
|
||||||
// Apply the proxy defaults to the sidecar's proxy config
|
|
||||||
mapCopy, err := copystructure.Copy(proxyConf.Config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to copy global proxy-defaults: %v", err)
|
|
||||||
}
|
|
||||||
thisReply.ProxyConfig = mapCopy.(map[string]interface{})
|
|
||||||
thisReply.Mode = proxyConf.Mode
|
|
||||||
thisReply.TransparentProxy = proxyConf.TransparentProxy
|
|
||||||
thisReply.MeshGateway = proxyConf.MeshGateway
|
|
||||||
thisReply.Expose = proxyConf.Expose
|
|
||||||
|
|
||||||
// Extract the global protocol from proxyConf for upstream configs.
|
|
||||||
rawProtocol := proxyConf.Config["protocol"]
|
|
||||||
if rawProtocol != nil {
|
|
||||||
var ok bool
|
|
||||||
proxyConfGlobalProtocol, ok = rawProtocol.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("invalid protocol type %T", rawProtocol)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
serviceConf := entries.GetServiceDefaults(
|
|
||||||
structs.NewServiceID(args.Name, &args.EnterpriseMeta),
|
|
||||||
)
|
|
||||||
if serviceConf != nil {
|
|
||||||
if serviceConf.Expose.Checks {
|
|
||||||
thisReply.Expose.Checks = true
|
|
||||||
}
|
|
||||||
if len(serviceConf.Expose.Paths) >= 1 {
|
|
||||||
thisReply.Expose.Paths = serviceConf.Expose.Paths
|
|
||||||
}
|
|
||||||
if serviceConf.MeshGateway.Mode != structs.MeshGatewayModeDefault {
|
|
||||||
thisReply.MeshGateway.Mode = serviceConf.MeshGateway.Mode
|
|
||||||
}
|
|
||||||
if serviceConf.Protocol != "" {
|
|
||||||
if thisReply.ProxyConfig == nil {
|
|
||||||
thisReply.ProxyConfig = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
thisReply.ProxyConfig["protocol"] = serviceConf.Protocol
|
|
||||||
}
|
|
||||||
if serviceConf.TransparentProxy.OutboundListenerPort != 0 {
|
|
||||||
thisReply.TransparentProxy.OutboundListenerPort = serviceConf.TransparentProxy.OutboundListenerPort
|
|
||||||
}
|
|
||||||
if serviceConf.TransparentProxy.DialedDirectly {
|
|
||||||
thisReply.TransparentProxy.DialedDirectly = serviceConf.TransparentProxy.DialedDirectly
|
|
||||||
}
|
|
||||||
if serviceConf.Mode != structs.ProxyModeDefault {
|
|
||||||
thisReply.Mode = serviceConf.Mode
|
|
||||||
}
|
|
||||||
if serviceConf.Destination != nil {
|
|
||||||
thisReply.Destination = *serviceConf.Destination
|
|
||||||
}
|
|
||||||
|
|
||||||
if serviceConf.MaxInboundConnections > 0 {
|
|
||||||
if thisReply.ProxyConfig == nil {
|
|
||||||
thisReply.ProxyConfig = map[string]interface{}{}
|
|
||||||
}
|
|
||||||
thisReply.ProxyConfig["max_inbound_connections"] = serviceConf.MaxInboundConnections
|
|
||||||
}
|
|
||||||
|
|
||||||
thisReply.Meta = serviceConf.Meta
|
|
||||||
}
|
|
||||||
|
|
||||||
// First collect all upstreams into a set of seen upstreams.
|
|
||||||
// Upstreams can come from:
|
|
||||||
// - Explicitly from proxy registrations, and therefore as an argument to this RPC endpoint
|
|
||||||
// - Implicitly from centralized upstream config in service-defaults
|
|
||||||
seenUpstreams := map[structs.ServiceID]struct{}{}
|
|
||||||
|
|
||||||
var (
|
|
||||||
noUpstreamArgs = len(upstreamIDs) == 0 && len(args.Upstreams) == 0
|
|
||||||
|
|
||||||
// Check the args and the resolved value. If it was exclusively set via a config entry, then args.Mode
|
|
||||||
// will never be transparent because the service config request does not use the resolved value.
|
|
||||||
tproxy = args.Mode == structs.ProxyModeTransparent || thisReply.Mode == structs.ProxyModeTransparent
|
|
||||||
)
|
|
||||||
|
|
||||||
// The upstreams passed as arguments to this endpoint are the upstreams explicitly defined in a proxy registration.
|
|
||||||
// If no upstreams were passed, then we should only return the resolved config if the proxy is in transparent mode.
|
|
||||||
// Otherwise we would return a resolved upstream config to a proxy with no configured upstreams.
|
|
||||||
if noUpstreamArgs && !tproxy {
|
|
||||||
return &thisReply, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// First store all upstreams that were provided in the request
|
|
||||||
for _, sid := range upstreamIDs {
|
|
||||||
if _, ok := seenUpstreams[sid]; !ok {
|
|
||||||
seenUpstreams[sid] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then store upstreams inferred from service-defaults and mapify the overrides.
|
|
||||||
var (
|
|
||||||
upstreamConfigs = make(map[structs.ServiceID]*structs.UpstreamConfig)
|
|
||||||
upstreamDefaults *structs.UpstreamConfig
|
|
||||||
// usConfigs stores the opaque config map for each upstream and is keyed on the upstream's ID.
|
|
||||||
usConfigs = make(map[structs.ServiceID]map[string]interface{})
|
|
||||||
)
|
|
||||||
if serviceConf != nil && serviceConf.UpstreamConfig != nil {
|
|
||||||
for i, override := range serviceConf.UpstreamConfig.Overrides {
|
|
||||||
if override.Name == "" {
|
|
||||||
logger.Warn(
|
|
||||||
"Skipping UpstreamConfig.Overrides entry without a required name field",
|
|
||||||
"entryIndex", i,
|
|
||||||
"kind", serviceConf.GetKind(),
|
|
||||||
"name", serviceConf.GetName(),
|
|
||||||
"namespace", serviceConf.GetEnterpriseMeta().NamespaceOrEmpty(),
|
|
||||||
)
|
|
||||||
continue // skip this impossible condition
|
|
||||||
}
|
|
||||||
seenUpstreams[override.ServiceID()] = struct{}{}
|
|
||||||
upstreamConfigs[override.ServiceID()] = override
|
|
||||||
}
|
|
||||||
if serviceConf.UpstreamConfig.Defaults != nil {
|
|
||||||
upstreamDefaults = serviceConf.UpstreamConfig.Defaults
|
|
||||||
|
|
||||||
// Store the upstream defaults under a wildcard key so that they can be applied to
|
|
||||||
// upstreams that are inferred from intentions and do not have explicit upstream configuration.
|
|
||||||
cfgMap := make(map[string]interface{})
|
|
||||||
upstreamDefaults.MergeInto(cfgMap)
|
|
||||||
|
|
||||||
wildcard := structs.NewServiceID(structs.WildcardSpecifier, args.WithWildcardNamespace())
|
|
||||||
usConfigs[wildcard] = cfgMap
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for upstream := range seenUpstreams {
|
|
||||||
resolvedCfg := make(map[string]interface{})
|
|
||||||
|
|
||||||
// The protocol of an upstream is resolved in this order:
|
|
||||||
// 1. Default protocol from proxy-defaults (how all services should be addressed)
|
|
||||||
// 2. Protocol for upstream service defined in its service-defaults (how the upstream wants to be addressed)
|
|
||||||
// 3. Protocol defined for the upstream in the service-defaults.(upstream_config.defaults|upstream_config.overrides) of the downstream
|
|
||||||
// (how the downstream wants to address it)
|
|
||||||
protocol := proxyConfGlobalProtocol
|
|
||||||
|
|
||||||
upstreamSvcDefaults := entries.GetServiceDefaults(
|
|
||||||
structs.NewServiceID(upstream.ID, &upstream.EnterpriseMeta),
|
|
||||||
)
|
|
||||||
if upstreamSvcDefaults != nil {
|
|
||||||
if upstreamSvcDefaults.Protocol != "" {
|
|
||||||
protocol = upstreamSvcDefaults.Protocol
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if protocol != "" {
|
|
||||||
resolvedCfg["protocol"] = protocol
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge centralized defaults for all upstreams before configuration for specific upstreams
|
|
||||||
if upstreamDefaults != nil {
|
|
||||||
upstreamDefaults.MergeInto(resolvedCfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The MeshGateway value from the proxy registration overrides the one from upstream_defaults
|
|
||||||
// because it is specific to the proxy instance.
|
|
||||||
//
|
|
||||||
// The goal is to flatten the mesh gateway mode in this order:
|
|
||||||
// 0. Value from centralized upstream_defaults
|
|
||||||
// 1. Value from local proxy registration
|
|
||||||
// 2. Value from centralized upstream_config
|
|
||||||
// 3. Value from local upstream definition. This last step is done in the client's service manager.
|
|
||||||
if !args.MeshGateway.IsZero() {
|
|
||||||
resolvedCfg["mesh_gateway"] = args.MeshGateway
|
|
||||||
}
|
|
||||||
|
|
||||||
if upstreamConfigs[upstream] != nil {
|
|
||||||
upstreamConfigs[upstream].MergeInto(resolvedCfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(resolvedCfg) > 0 {
|
|
||||||
usConfigs[upstream] = resolvedCfg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// don't allocate the slices just to not fill them
|
|
||||||
if len(usConfigs) == 0 {
|
|
||||||
return &thisReply, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if legacyUpstreams {
|
|
||||||
// For legacy upstreams we return a map that is only keyed on the string ID, since they precede namespaces
|
|
||||||
thisReply.UpstreamConfigs = make(map[string]map[string]interface{})
|
|
||||||
|
|
||||||
for us, conf := range usConfigs {
|
|
||||||
thisReply.UpstreamConfigs[us.ID] = conf
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
thisReply.UpstreamIDConfigs = make(structs.OpaqueUpstreamConfigs, 0, len(usConfigs))
|
|
||||||
|
|
||||||
for us, conf := range usConfigs {
|
|
||||||
thisReply.UpstreamIDConfigs = append(thisReply.UpstreamIDConfigs,
|
|
||||||
structs.OpaqueUpstreamConfig{Upstream: us, Config: conf})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &thisReply, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeServiceConfig merges the service into defaults to produce the final effective
|
// MergeServiceConfig merges the service into defaults to produce the final effective
|
||||||
// config for the specified service.
|
// config for the specified service.
|
||||||
func MergeServiceConfig(defaults *structs.ServiceConfigResponse, service *structs.NodeService) (*structs.NodeService, error) {
|
func MergeServiceConfig(defaults *structs.ServiceConfigResponse, service *structs.NodeService) (*structs.NodeService, error) {
|
||||||
|
|
|
@ -3,60 +3,13 @@ package consul
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/configentry"
|
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
|
||||||
"github.com/mitchellh/copystructure"
|
"github.com/mitchellh/copystructure"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_ComputeResolvedServiceConfig(t *testing.T) {
|
|
||||||
type args struct {
|
|
||||||
scReq *structs.ServiceConfigRequest
|
|
||||||
upstreamIDs []structs.ServiceID
|
|
||||||
entries *configentry.ResolvedServiceConfigSet
|
|
||||||
}
|
|
||||||
|
|
||||||
sid := structs.ServiceID{
|
|
||||||
ID: "sid",
|
|
||||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
want *structs.ServiceConfigResponse
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "proxy with maxinboundsconnections",
|
|
||||||
args: args{
|
|
||||||
scReq: &structs.ServiceConfigRequest{
|
|
||||||
Name: "sid",
|
|
||||||
},
|
|
||||||
entries: &configentry.ResolvedServiceConfigSet{
|
|
||||||
ServiceDefaults: map[structs.ServiceID]*structs.ServiceConfigEntry{
|
|
||||||
sid: {
|
|
||||||
MaxInboundConnections: 20,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
want: &structs.ServiceConfigResponse{
|
|
||||||
ProxyConfig: map[string]interface{}{
|
|
||||||
"max_inbound_connections": 20,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := computeResolvedServiceConfig(tt.args.scReq, tt.args.upstreamIDs,
|
|
||||||
false, tt.args.entries, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, tt.want, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_MergeServiceConfig_TransparentProxy(t *testing.T) {
|
func Test_MergeServiceConfig_TransparentProxy(t *testing.T) {
|
||||||
type args struct {
|
type args struct {
|
||||||
defaults *structs.ServiceConfigResponse
|
defaults *structs.ServiceConfigResponse
|
||||||
|
|
|
@ -370,9 +370,9 @@ type Server struct {
|
||||||
|
|
||||||
// peerStreamServer is a server used to handle peering streams from external clusters.
|
// peerStreamServer is a server used to handle peering streams from external clusters.
|
||||||
peerStreamServer *peerstream.Server
|
peerStreamServer *peerstream.Server
|
||||||
|
|
||||||
// peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens.
|
// peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens.
|
||||||
peeringServer *peering.Server
|
peeringServer *peering.Server
|
||||||
peerStreamTracker *peerstream.Tracker
|
|
||||||
|
|
||||||
// embedded struct to hold all the enterprise specific data
|
// embedded struct to hold all the enterprise specific data
|
||||||
EnterpriseServer
|
EnterpriseServer
|
||||||
|
@ -724,11 +724,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
|
||||||
Logger: logger.Named("grpc-api.server-discovery"),
|
Logger: logger.Named("grpc-api.server-discovery"),
|
||||||
}).Register(s.externalGRPCServer)
|
}).Register(s.externalGRPCServer)
|
||||||
|
|
||||||
s.peerStreamTracker = peerstream.NewTracker()
|
|
||||||
s.peeringBackend = NewPeeringBackend(s)
|
s.peeringBackend = NewPeeringBackend(s)
|
||||||
s.peerStreamServer = peerstream.NewServer(peerstream.Config{
|
s.peerStreamServer = peerstream.NewServer(peerstream.Config{
|
||||||
Backend: s.peeringBackend,
|
Backend: s.peeringBackend,
|
||||||
Tracker: s.peerStreamTracker,
|
|
||||||
GetStore: func() peerstream.StateStore { return s.FSM().State() },
|
GetStore: func() peerstream.StateStore { return s.FSM().State() },
|
||||||
Logger: logger.Named("grpc-api.peerstream"),
|
Logger: logger.Named("grpc-api.peerstream"),
|
||||||
ACLResolver: s.ACLResolver,
|
ACLResolver: s.ACLResolver,
|
||||||
|
@ -742,7 +740,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
|
||||||
return s.ForwardGRPC(s.grpcConnPool, info, fn)
|
return s.ForwardGRPC(s.grpcConnPool, info, fn)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
s.peerStreamTracker.SetHeartbeatTimeout(s.peerStreamServer.Config.IncomingHeartbeatTimeout)
|
|
||||||
s.peerStreamServer.Register(s.externalGRPCServer)
|
s.peerStreamServer.Register(s.externalGRPCServer)
|
||||||
|
|
||||||
// Initialize internal gRPC server.
|
// Initialize internal gRPC server.
|
||||||
|
@ -791,7 +788,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler
|
||||||
|
|
||||||
p := peering.NewServer(peering.Config{
|
p := peering.NewServer(peering.Config{
|
||||||
Backend: s.peeringBackend,
|
Backend: s.peeringBackend,
|
||||||
Tracker: s.peerStreamTracker,
|
Tracker: s.peerStreamServer.Tracker,
|
||||||
Logger: deps.Logger.Named("grpc-api.peering"),
|
Logger: deps.Logger.Named("grpc-api.peering"),
|
||||||
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
|
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
|
||||||
// Only forward the request if the dc in the request matches the server's datacenter.
|
// Only forward the request if the dc in the request matches the server's datacenter.
|
||||||
|
|
|
@ -535,6 +535,12 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
|
||||||
if req.Peering.Name == "" {
|
if req.Peering.Name == "" {
|
||||||
return errors.New("Missing Peering Name")
|
return errors.New("Missing Peering Name")
|
||||||
}
|
}
|
||||||
|
if req.Peering.State == pbpeering.PeeringState_DELETING && (req.Peering.DeletedAt == nil || structs.IsZeroProtoTime(req.Peering.DeletedAt)) {
|
||||||
|
return errors.New("Missing deletion time for peering in deleting state")
|
||||||
|
}
|
||||||
|
if req.Peering.DeletedAt != nil && !structs.IsZeroProtoTime(req.Peering.DeletedAt) && req.Peering.State != pbpeering.PeeringState_DELETING {
|
||||||
|
return fmt.Errorf("Unexpected state for peering with deletion time: %s", pbpeering.PeeringStateToAPI(req.Peering.State))
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure the name is unique (cannot conflict with another peering with a different ID).
|
// Ensure the name is unique (cannot conflict with another peering with a different ID).
|
||||||
_, existing, err := peeringReadTxn(tx, nil, Query{
|
_, existing, err := peeringReadTxn(tx, nil, Query{
|
||||||
|
@ -546,11 +552,32 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
|
||||||
}
|
}
|
||||||
|
|
||||||
if existing != nil {
|
if existing != nil {
|
||||||
|
if req.Peering.ShouldDial() != existing.ShouldDial() {
|
||||||
|
return fmt.Errorf("Cannot switch peering dialing mode from %t to %t", existing.ShouldDial(), req.Peering.ShouldDial())
|
||||||
|
}
|
||||||
|
|
||||||
if req.Peering.ID != existing.ID {
|
if req.Peering.ID != existing.ID {
|
||||||
return fmt.Errorf("A peering already exists with the name %q and a different ID %q", req.Peering.Name, existing.ID)
|
return fmt.Errorf("A peering already exists with the name %q and a different ID %q", req.Peering.Name, existing.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Nothing to do if our peer wants to terminate the peering but the peering is already marked for deletion.
|
||||||
|
if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_TERMINATED {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// No-op deletion
|
||||||
|
if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_DELETING {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// No-op termination
|
||||||
|
if existing.State == pbpeering.PeeringState_TERMINATED && req.Peering.State == pbpeering.PeeringState_TERMINATED {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Prevent modifications to Peering marked for deletion.
|
// Prevent modifications to Peering marked for deletion.
|
||||||
if !existing.IsActive() {
|
// This blocks generating new peering tokens or re-establishing the peering until the peering is done deleting.
|
||||||
|
if existing.State == pbpeering.PeeringState_DELETING {
|
||||||
return fmt.Errorf("cannot write to peering that is marked for deletion")
|
return fmt.Errorf("cannot write to peering that is marked for deletion")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -582,8 +609,8 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
|
||||||
req.Peering.ModifyIndex = idx
|
req.Peering.ModifyIndex = idx
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure associated secrets are cleaned up when a peering is marked for deletion.
|
// Ensure associated secrets are cleaned up when a peering is marked for deletion or terminated.
|
||||||
if req.Peering.State == pbpeering.PeeringState_DELETING {
|
if !req.Peering.IsActive() {
|
||||||
if err := peeringSecretsDeleteTxn(tx, req.Peering.ID, req.Peering.ShouldDial()); err != nil {
|
if err := peeringSecretsDeleteTxn(tx, req.Peering.ID, req.Peering.ShouldDial()); err != nil {
|
||||||
return fmt.Errorf("failed to delete peering secrets: %w", err)
|
return fmt.Errorf("failed to delete peering secrets: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -950,6 +950,7 @@ func TestStore_Peering_Watch(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testFooPeerID,
|
ID: testFooPeerID,
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -976,6 +977,7 @@ func TestStore_Peering_Watch(t *testing.T) {
|
||||||
err := s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{
|
err := s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{
|
||||||
ID: testBarPeerID,
|
ID: testBarPeerID,
|
||||||
Name: "bar",
|
Name: "bar",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -1077,6 +1079,7 @@ func TestStore_PeeringList_Watch(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testFooPeerID,
|
ID: testFooPeerID,
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
},
|
},
|
||||||
|
@ -1112,16 +1115,22 @@ func TestStore_PeeringWrite(t *testing.T) {
|
||||||
// Each case depends on the previous.
|
// Each case depends on the previous.
|
||||||
s := NewStateStore(nil)
|
s := NewStateStore(nil)
|
||||||
|
|
||||||
|
testTime := time.Now()
|
||||||
|
|
||||||
|
type expectations struct {
|
||||||
|
peering *pbpeering.Peering
|
||||||
|
secrets *pbpeering.PeeringSecrets
|
||||||
|
err string
|
||||||
|
}
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
name string
|
name string
|
||||||
input *pbpeering.PeeringWriteRequest
|
input *pbpeering.PeeringWriteRequest
|
||||||
expectSecrets *pbpeering.PeeringSecrets
|
expect expectations
|
||||||
expectErr string
|
|
||||||
}
|
}
|
||||||
run := func(t *testing.T, tc testcase) {
|
run := func(t *testing.T, tc testcase) {
|
||||||
err := s.PeeringWrite(10, tc.input)
|
err := s.PeeringWrite(10, tc.input)
|
||||||
if tc.expectErr != "" {
|
if tc.expect.err != "" {
|
||||||
testutil.RequireErrorContains(t, err, tc.expectErr)
|
testutil.RequireErrorContains(t, err, tc.expect.err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1133,12 +1142,16 @@ func TestStore_PeeringWrite(t *testing.T) {
|
||||||
_, p, err := s.PeeringRead(nil, q)
|
_, p, err := s.PeeringRead(nil, q)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, p)
|
require.NotNil(t, p)
|
||||||
require.Equal(t, tc.input.Peering.State, p.State)
|
require.Equal(t, tc.expect.peering.State, p.State)
|
||||||
require.Equal(t, tc.input.Peering.Name, p.Name)
|
require.Equal(t, tc.expect.peering.Name, p.Name)
|
||||||
|
require.Equal(t, tc.expect.peering.Meta, p.Meta)
|
||||||
|
if tc.expect.peering.DeletedAt != nil {
|
||||||
|
require.Equal(t, tc.expect.peering.DeletedAt, p.DeletedAt)
|
||||||
|
}
|
||||||
|
|
||||||
secrets, err := s.PeeringSecretsRead(nil, tc.input.Peering.ID)
|
secrets, err := s.PeeringSecretsRead(nil, tc.input.Peering.ID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
prototest.AssertDeepEqual(t, tc.expectSecrets, secrets)
|
prototest.AssertDeepEqual(t, tc.expect.secrets, secrets)
|
||||||
}
|
}
|
||||||
tcs := []testcase{
|
tcs := []testcase{
|
||||||
{
|
{
|
||||||
|
@ -1147,24 +1160,64 @@ func TestStore_PeeringWrite(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testBazPeerID,
|
ID: testBazPeerID,
|
||||||
Name: "baz",
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_ESTABLISHING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
},
|
},
|
||||||
SecretsRequest: &pbpeering.SecretsWriteRequest{
|
SecretsRequest: &pbpeering.SecretsWriteRequest{
|
||||||
PeerID: testBazPeerID,
|
PeerID: testBazPeerID,
|
||||||
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
|
Request: &pbpeering.SecretsWriteRequest_Establish{
|
||||||
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
|
Establish: &pbpeering.SecretsWriteRequest_EstablishRequest{
|
||||||
EstablishmentSecret: testBazSecretID,
|
ActiveStreamSecret: testBazSecretID,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectSecrets: &pbpeering.PeeringSecrets{
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_ESTABLISHING,
|
||||||
|
},
|
||||||
|
secrets: &pbpeering.PeeringSecrets{
|
||||||
PeerID: testBazPeerID,
|
PeerID: testBazPeerID,
|
||||||
Establishment: &pbpeering.PeeringSecrets_Establishment{
|
Stream: &pbpeering.PeeringSecrets_Stream{
|
||||||
SecretID: testBazSecretID,
|
ActiveSecretID: testBazSecretID,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cannot change ID for baz",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: "123",
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_FAILING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
err: `A peering already exists with the name "baz" and a different ID`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cannot change dialer status for baz",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: "123",
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_FAILING,
|
||||||
|
// Excluding the peer server addresses leads to baz not being considered a dialer.
|
||||||
|
// PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
err: "Cannot switch peering dialing mode from true to false",
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "update baz",
|
name: "update baz",
|
||||||
input: &pbpeering.PeeringWriteRequest{
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
@ -1172,13 +1225,93 @@ func TestStore_PeeringWrite(t *testing.T) {
|
||||||
ID: testBazPeerID,
|
ID: testBazPeerID,
|
||||||
Name: "baz",
|
Name: "baz",
|
||||||
State: pbpeering.PeeringState_FAILING,
|
State: pbpeering.PeeringState_FAILING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectSecrets: &pbpeering.PeeringSecrets{
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_FAILING,
|
||||||
|
},
|
||||||
|
secrets: &pbpeering.PeeringSecrets{
|
||||||
PeerID: testBazPeerID,
|
PeerID: testBazPeerID,
|
||||||
Establishment: &pbpeering.PeeringSecrets_Establishment{
|
Stream: &pbpeering.PeeringSecrets_Stream{
|
||||||
SecretID: testBazSecretID,
|
ActiveSecretID: testBazSecretID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "if no state was included in request it is inherited from existing",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
// Send undefined state.
|
||||||
|
// State: pbpeering.PeeringState_FAILING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
// Previous failing state is picked up.
|
||||||
|
State: pbpeering.PeeringState_FAILING,
|
||||||
|
},
|
||||||
|
secrets: &pbpeering.PeeringSecrets{
|
||||||
|
PeerID: testBazPeerID,
|
||||||
|
Stream: &pbpeering.PeeringSecrets_Stream{
|
||||||
|
ActiveSecretID: testBazSecretID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mark baz as terminated",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_TERMINATED,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_TERMINATED,
|
||||||
|
},
|
||||||
|
// Secrets for baz should have been deleted
|
||||||
|
secrets: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cannot modify peering during no-op termination",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_TERMINATED,
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
|
||||||
|
// Attempt to add metadata
|
||||||
|
Meta: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_TERMINATED,
|
||||||
|
// Meta should be unchanged.
|
||||||
|
Meta: nil,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1189,12 +1322,66 @@ func TestStore_PeeringWrite(t *testing.T) {
|
||||||
ID: testBazPeerID,
|
ID: testBazPeerID,
|
||||||
Name: "baz",
|
Name: "baz",
|
||||||
State: pbpeering.PeeringState_DELETING,
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
DeletedAt: structs.TimeToProto(testTime),
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
DeletedAt: structs.TimeToProto(testTime),
|
||||||
|
},
|
||||||
|
secrets: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deleting a deleted peering is a no-op",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
// Still marked as deleting at the original testTime
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
DeletedAt: structs.TimeToProto(testTime),
|
||||||
|
},
|
||||||
// Secrets for baz should have been deleted
|
// Secrets for baz should have been deleted
|
||||||
expectSecrets: nil,
|
secrets: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "terminating a peering marked for deletion is a no-op",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_TERMINATED,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
// Still marked as deleting
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
},
|
||||||
|
// Secrets for baz should have been deleted
|
||||||
|
secrets: nil,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cannot update peering marked for deletion",
|
name: "cannot update peering marked for deletion",
|
||||||
|
@ -1202,14 +1389,18 @@ func TestStore_PeeringWrite(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testBazPeerID,
|
ID: testBazPeerID,
|
||||||
Name: "baz",
|
Name: "baz",
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
|
||||||
// Attempt to add metadata
|
// Attempt to add metadata
|
||||||
Meta: map[string]string{
|
Meta: map[string]string{
|
||||||
"source": "kubernetes",
|
"source": "kubernetes",
|
||||||
},
|
},
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectErr: "cannot write to peering that is marked for deletion",
|
expect: expectations{
|
||||||
|
err: "cannot write to peering that is marked for deletion",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cannot create peering marked for deletion",
|
name: "cannot create peering marked for deletion",
|
||||||
|
@ -1217,11 +1408,15 @@ func TestStore_PeeringWrite(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testFooPeerID,
|
ID: testFooPeerID,
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectErr: "cannot create a new peering marked for deletion",
|
expect: expectations{
|
||||||
|
err: "cannot create a new peering marked for deletion",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range tcs {
|
for _, tc := range tcs {
|
||||||
|
@ -1246,6 +1441,7 @@ func TestStore_PeeringDelete(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testFooPeerID,
|
ID: testFooPeerID,
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
|
@ -1759,6 +1955,7 @@ func TestStateStore_PeeringsForService(t *testing.T) {
|
||||||
copied := pbpeering.Peering{
|
copied := pbpeering.Peering{
|
||||||
ID: tp.peering.ID,
|
ID: tp.peering.ID,
|
||||||
Name: tp.peering.Name,
|
Name: tp.peering.Name,
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
}
|
}
|
||||||
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &copied}))
|
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &copied}))
|
||||||
|
@ -2201,6 +2398,7 @@ func TestStore_TrustBundleListByService(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: peerID1,
|
ID: peerID1,
|
||||||
Name: "peer1",
|
Name: "peer1",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
|
|
|
@ -60,6 +60,13 @@ func (s *Store) txnKVS(tx WriteTxn, idx uint64, op *structs.TxnKVOp) (structs.Tx
|
||||||
err = fmt.Errorf("key %q doesn't exist", op.DirEnt.Key)
|
err = fmt.Errorf("key %q doesn't exist", op.DirEnt.Key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case api.KVGetOrEmpty:
|
||||||
|
_, entry, err = kvsGetTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta)
|
||||||
|
if entry == nil && err == nil {
|
||||||
|
entry = &op.DirEnt
|
||||||
|
entry.Value = nil
|
||||||
|
}
|
||||||
|
|
||||||
case api.KVGetTree:
|
case api.KVGetTree:
|
||||||
var entries structs.DirEntries
|
var entries structs.DirEntries
|
||||||
_, entries, err = s.kvsListTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta)
|
_, entries, err = s.kvsListTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta)
|
||||||
|
@ -95,7 +102,7 @@ func (s *Store) txnKVS(tx WriteTxn, idx uint64, op *structs.TxnKVOp) (structs.Tx
|
||||||
// value (we have to clone so we don't modify the entry being used by
|
// value (we have to clone so we don't modify the entry being used by
|
||||||
// the state store).
|
// the state store).
|
||||||
if entry != nil {
|
if entry != nil {
|
||||||
if op.Verb == api.KVGet {
|
if op.Verb == api.KVGet || op.Verb == api.KVGetOrEmpty {
|
||||||
result := structs.TxnResult{KV: entry}
|
result := structs.TxnResult{KV: entry}
|
||||||
return structs.TxnResults{&result}, nil
|
return structs.TxnResults{&result}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -577,6 +577,22 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
&structs.TxnOp{
|
||||||
|
KV: &structs.TxnKVOp{
|
||||||
|
Verb: api.KVGetOrEmpty,
|
||||||
|
DirEnt: structs.DirEntry{
|
||||||
|
Key: "foo/update",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&structs.TxnOp{
|
||||||
|
KV: &structs.TxnKVOp{
|
||||||
|
Verb: api.KVGetOrEmpty,
|
||||||
|
DirEnt: structs.DirEntry{
|
||||||
|
Key: "foo/not-exists",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
&structs.TxnOp{
|
&structs.TxnOp{
|
||||||
KV: &structs.TxnKVOp{
|
KV: &structs.TxnKVOp{
|
||||||
Verb: api.KVCheckIndex,
|
Verb: api.KVCheckIndex,
|
||||||
|
@ -702,6 +718,22 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
&structs.TxnResult{
|
||||||
|
KV: &structs.DirEntry{
|
||||||
|
Key: "foo/update",
|
||||||
|
Value: []byte("stale"),
|
||||||
|
RaftIndex: structs.RaftIndex{
|
||||||
|
CreateIndex: 5,
|
||||||
|
ModifyIndex: 5,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&structs.TxnResult{
|
||||||
|
KV: &structs.DirEntry{
|
||||||
|
Key: "foo/not-exists",
|
||||||
|
Value: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
&structs.TxnResult{
|
&structs.TxnResult{
|
||||||
KV: &structs.DirEntry{
|
KV: &structs.DirEntry{
|
||||||
|
|
||||||
|
|
|
@ -41,8 +41,8 @@ var Gauges = []prometheus.GaugeDefinition{
|
||||||
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.",
|
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: []string{"consul", "kv", "entries"},
|
Name: []string{"consul", "state", "kv_entries"},
|
||||||
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.10.3.",
|
Help: "Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3.",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: []string{"consul", "state", "connect_instances"},
|
Name: []string{"consul", "state", "connect_instances"},
|
||||||
|
|
|
@ -5,12 +5,13 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/golang/protobuf/proto"
|
||||||
"google.golang.org/genproto/googleapis/rpc/code"
|
"google.golang.org/genproto/googleapis/rpc/code"
|
||||||
newproto "google.golang.org/protobuf/proto"
|
newproto "google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/types/known/anypb"
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/proto/pbpeering"
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
"github.com/hashicorp/consul/proto/pbpeerstream"
|
"github.com/hashicorp/consul/proto/pbpeerstream"
|
||||||
|
@ -35,7 +36,6 @@ import (
|
||||||
// Each cache.UpdateEvent will contain all instances for a service name.
|
// Each cache.UpdateEvent will contain all instances for a service name.
|
||||||
// If there are no instances in the event, we consider that to be a de-registration.
|
// If there are no instances in the event, we consider that to be a de-registration.
|
||||||
func makeServiceResponse(
|
func makeServiceResponse(
|
||||||
logger hclog.Logger,
|
|
||||||
mst *MutableStatus,
|
mst *MutableStatus,
|
||||||
update cache.UpdateEvent,
|
update cache.UpdateEvent,
|
||||||
) (*pbpeerstream.ReplicationMessage_Response, error) {
|
) (*pbpeerstream.ReplicationMessage_Response, error) {
|
||||||
|
@ -87,7 +87,6 @@ func makeServiceResponse(
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeCARootsResponse(
|
func makeCARootsResponse(
|
||||||
logger hclog.Logger,
|
|
||||||
update cache.UpdateEvent,
|
update cache.UpdateEvent,
|
||||||
) (*pbpeerstream.ReplicationMessage_Response, error) {
|
) (*pbpeerstream.ReplicationMessage_Response, error) {
|
||||||
any, _, err := marshalToProtoAny[*pbpeering.PeeringTrustBundle](update.Result)
|
any, _, err := marshalToProtoAny[*pbpeering.PeeringTrustBundle](update.Result)
|
||||||
|
@ -105,6 +104,24 @@ func makeCARootsResponse(
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeServerAddrsResponse(
|
||||||
|
update cache.UpdateEvent,
|
||||||
|
) (*pbpeerstream.ReplicationMessage_Response, error) {
|
||||||
|
any, _, err := marshalToProtoAny[*pbpeering.PeeringServerAddresses](update.Result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to marshal: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &pbpeerstream.ReplicationMessage_Response{
|
||||||
|
ResourceURL: pbpeerstream.TypeURLPeeringServerAddresses,
|
||||||
|
// TODO(peering): Nonce management
|
||||||
|
Nonce: "",
|
||||||
|
ResourceID: "server-addrs",
|
||||||
|
Operation: pbpeerstream.Operation_OPERATION_UPSERT,
|
||||||
|
Resource: any,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// marshalToProtoAny takes any input and returns:
|
// marshalToProtoAny takes any input and returns:
|
||||||
// the protobuf.Any type, the asserted T type, and any errors
|
// the protobuf.Any type, the asserted T type, and any errors
|
||||||
// during marshalling or type assertion.
|
// during marshalling or type assertion.
|
||||||
|
@ -127,7 +144,6 @@ func (s *Server) processResponse(
|
||||||
partition string,
|
partition string,
|
||||||
mutableStatus *MutableStatus,
|
mutableStatus *MutableStatus,
|
||||||
resp *pbpeerstream.ReplicationMessage_Response,
|
resp *pbpeerstream.ReplicationMessage_Response,
|
||||||
logger hclog.Logger,
|
|
||||||
) (*pbpeerstream.ReplicationMessage, error) {
|
) (*pbpeerstream.ReplicationMessage, error) {
|
||||||
if !pbpeerstream.KnownTypeURL(resp.ResourceURL) {
|
if !pbpeerstream.KnownTypeURL(resp.ResourceURL) {
|
||||||
err := fmt.Errorf("received response for unknown resource type %q", resp.ResourceURL)
|
err := fmt.Errorf("received response for unknown resource type %q", resp.ResourceURL)
|
||||||
|
@ -151,7 +167,7 @@ func (s *Server) processResponse(
|
||||||
), err
|
), err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.handleUpsert(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID, resp.Resource, logger); err != nil {
|
if err := s.handleUpsert(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID, resp.Resource); err != nil {
|
||||||
return makeNACKReply(
|
return makeNACKReply(
|
||||||
resp.ResourceURL,
|
resp.ResourceURL,
|
||||||
resp.Nonce,
|
resp.Nonce,
|
||||||
|
@ -163,7 +179,7 @@ func (s *Server) processResponse(
|
||||||
return makeACKReply(resp.ResourceURL, resp.Nonce), nil
|
return makeACKReply(resp.ResourceURL, resp.Nonce), nil
|
||||||
|
|
||||||
case pbpeerstream.Operation_OPERATION_DELETE:
|
case pbpeerstream.Operation_OPERATION_DELETE:
|
||||||
if err := s.handleDelete(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID, logger); err != nil {
|
if err := s.handleDelete(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID); err != nil {
|
||||||
return makeNACKReply(
|
return makeNACKReply(
|
||||||
resp.ResourceURL,
|
resp.ResourceURL,
|
||||||
resp.Nonce,
|
resp.Nonce,
|
||||||
|
@ -196,7 +212,6 @@ func (s *Server) handleUpsert(
|
||||||
resourceURL string,
|
resourceURL string,
|
||||||
resourceID string,
|
resourceID string,
|
||||||
resource *anypb.Any,
|
resource *anypb.Any,
|
||||||
logger hclog.Logger,
|
|
||||||
) error {
|
) error {
|
||||||
if resource.TypeUrl != resourceURL {
|
if resource.TypeUrl != resourceURL {
|
||||||
return fmt.Errorf("mismatched resourceURL %q and Any typeUrl %q", resourceURL, resource.TypeUrl)
|
return fmt.Errorf("mismatched resourceURL %q and Any typeUrl %q", resourceURL, resource.TypeUrl)
|
||||||
|
@ -229,12 +244,20 @@ func (s *Server) handleUpsert(
|
||||||
|
|
||||||
return s.handleUpsertRoots(peerName, partition, roots)
|
return s.handleUpsertRoots(peerName, partition, roots)
|
||||||
|
|
||||||
|
case pbpeerstream.TypeURLPeeringServerAddresses:
|
||||||
|
addrs := &pbpeering.PeeringServerAddresses{}
|
||||||
|
if err := resource.UnmarshalTo(addrs); err != nil {
|
||||||
|
return fmt.Errorf("failed to unmarshal resource: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.handleUpsertServerAddrs(peerName, partition, addrs)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected resourceURL: %s", resourceURL)
|
return fmt.Errorf("unexpected resourceURL: %s", resourceURL)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleUpdateService handles both deletion and upsert events for a service.
|
// handleUpdateService handles both deletion and upsert events for a service.
|
||||||
|
//
|
||||||
// On an UPSERT event:
|
// On an UPSERT event:
|
||||||
// - All nodes, services, checks in the input pbNodes are re-applied through Raft.
|
// - All nodes, services, checks in the input pbNodes are re-applied through Raft.
|
||||||
// - Any nodes, services, or checks in the catalog that were not in the input pbNodes get deleted.
|
// - Any nodes, services, or checks in the catalog that were not in the input pbNodes get deleted.
|
||||||
|
@ -449,13 +472,39 @@ func (s *Server) handleUpsertRoots(
|
||||||
return s.Backend.PeeringTrustBundleWrite(req)
|
return s.Backend.PeeringTrustBundleWrite(req)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleUpsertServerAddrs(
|
||||||
|
peerName string,
|
||||||
|
partition string,
|
||||||
|
addrs *pbpeering.PeeringServerAddresses,
|
||||||
|
) error {
|
||||||
|
q := state.Query{
|
||||||
|
Value: peerName,
|
||||||
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(partition),
|
||||||
|
}
|
||||||
|
_, existing, err := s.GetStore().PeeringRead(nil, q)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read peering: %w", err)
|
||||||
|
}
|
||||||
|
if existing == nil || !existing.IsActive() {
|
||||||
|
return fmt.Errorf("peering does not exist or has been marked for deletion")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone to avoid mutating the existing data
|
||||||
|
p := proto.Clone(existing).(*pbpeering.Peering)
|
||||||
|
p.PeerServerAddresses = addrs.GetAddresses()
|
||||||
|
|
||||||
|
req := &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: p,
|
||||||
|
}
|
||||||
|
return s.Backend.PeeringWrite(req)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Server) handleDelete(
|
func (s *Server) handleDelete(
|
||||||
peerName string,
|
peerName string,
|
||||||
partition string,
|
partition string,
|
||||||
mutableStatus *MutableStatus,
|
mutableStatus *MutableStatus,
|
||||||
resourceURL string,
|
resourceURL string,
|
||||||
resourceID string,
|
resourceID string,
|
||||||
logger hclog.Logger,
|
|
||||||
) error {
|
) error {
|
||||||
switch resourceURL {
|
switch resourceURL {
|
||||||
case pbpeerstream.TypeURLExportedService:
|
case pbpeerstream.TypeURLExportedService:
|
||||||
|
|
|
@ -26,11 +26,12 @@ const (
|
||||||
|
|
||||||
type Server struct {
|
type Server struct {
|
||||||
Config
|
Config
|
||||||
|
|
||||||
|
Tracker *Tracker
|
||||||
}
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Backend Backend
|
Backend Backend
|
||||||
Tracker *Tracker
|
|
||||||
GetStore func() StateStore
|
GetStore func() StateStore
|
||||||
Logger hclog.Logger
|
Logger hclog.Logger
|
||||||
ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error)
|
ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error)
|
||||||
|
@ -42,8 +43,8 @@ type Config struct {
|
||||||
// outgoingHeartbeatInterval is how often we send a heartbeat.
|
// outgoingHeartbeatInterval is how often we send a heartbeat.
|
||||||
outgoingHeartbeatInterval time.Duration
|
outgoingHeartbeatInterval time.Duration
|
||||||
|
|
||||||
// IncomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
|
// incomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
|
||||||
IncomingHeartbeatTimeout time.Duration
|
incomingHeartbeatTimeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate mockery --name ACLResolver --inpackage
|
//go:generate mockery --name ACLResolver --inpackage
|
||||||
|
@ -53,7 +54,6 @@ type ACLResolver interface {
|
||||||
|
|
||||||
func NewServer(cfg Config) *Server {
|
func NewServer(cfg Config) *Server {
|
||||||
requireNotNil(cfg.Backend, "Backend")
|
requireNotNil(cfg.Backend, "Backend")
|
||||||
requireNotNil(cfg.Tracker, "Tracker")
|
|
||||||
requireNotNil(cfg.GetStore, "GetStore")
|
requireNotNil(cfg.GetStore, "GetStore")
|
||||||
requireNotNil(cfg.Logger, "Logger")
|
requireNotNil(cfg.Logger, "Logger")
|
||||||
// requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required
|
// requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required
|
||||||
|
@ -63,11 +63,12 @@ func NewServer(cfg Config) *Server {
|
||||||
if cfg.outgoingHeartbeatInterval == 0 {
|
if cfg.outgoingHeartbeatInterval == 0 {
|
||||||
cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval
|
cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval
|
||||||
}
|
}
|
||||||
if cfg.IncomingHeartbeatTimeout == 0 {
|
if cfg.incomingHeartbeatTimeout == 0 {
|
||||||
cfg.IncomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
|
cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
|
||||||
}
|
}
|
||||||
return &Server{
|
return &Server{
|
||||||
Config: cfg,
|
Config: cfg,
|
||||||
|
Tracker: NewTracker(cfg.incomingHeartbeatTimeout),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,6 +105,7 @@ type Backend interface {
|
||||||
PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error
|
PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error
|
||||||
CatalogRegister(req *structs.RegisterRequest) error
|
CatalogRegister(req *structs.RegisterRequest) error
|
||||||
CatalogDeregister(req *structs.DeregisterRequest) error
|
CatalogDeregister(req *structs.DeregisterRequest) error
|
||||||
|
PeeringWrite(req *pbpeering.PeeringWriteRequest) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// StateStore provides a read-only interface for querying Peering data.
|
// StateStore provides a read-only interface for querying Peering data.
|
||||||
|
|
|
@ -161,8 +161,22 @@ func (s *Server) StreamResources(stream pbpeerstream.PeerStreamService_StreamRes
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return grpcstatus.Error(codes.InvalidArgument, "initial subscription for unknown PeerID: "+req.PeerID)
|
return grpcstatus.Error(codes.InvalidArgument, "initial subscription for unknown PeerID: "+req.PeerID)
|
||||||
}
|
}
|
||||||
|
if !p.IsActive() {
|
||||||
|
// If peering is terminated, then our peer sent the termination message.
|
||||||
|
// For other non-active states, send the termination message.
|
||||||
|
if p.State != pbpeering.PeeringState_TERMINATED {
|
||||||
|
term := &pbpeerstream.ReplicationMessage{
|
||||||
|
Payload: &pbpeerstream.ReplicationMessage_Terminated_{
|
||||||
|
Terminated: &pbpeerstream.ReplicationMessage_Terminated{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
logTraceSend(logger, term)
|
||||||
|
|
||||||
// TODO(peering): If the peering is marked as deleted, send a Terminated message and return
|
// we don't care if send fails; stream will be killed by termination message or grpc error
|
||||||
|
_ = stream.Send(term)
|
||||||
|
}
|
||||||
|
return grpcstatus.Error(codes.Aborted, "peering is marked as deleted: "+req.PeerID)
|
||||||
|
}
|
||||||
|
|
||||||
secrets, err := s.GetStore().PeeringSecretsRead(nil, req.PeerID)
|
secrets, err := s.GetStore().PeeringSecretsRead(nil, req.PeerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -347,6 +361,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
for _, resourceURL := range []string{
|
for _, resourceURL := range []string{
|
||||||
pbpeerstream.TypeURLExportedService,
|
pbpeerstream.TypeURLExportedService,
|
||||||
pbpeerstream.TypeURLPeeringTrustBundle,
|
pbpeerstream.TypeURLPeeringTrustBundle,
|
||||||
|
pbpeerstream.TypeURLPeeringServerAddresses,
|
||||||
} {
|
} {
|
||||||
sub := makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{
|
sub := makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{
|
||||||
ResourceURL: resourceURL,
|
ResourceURL: resourceURL,
|
||||||
|
@ -406,7 +421,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
|
|
||||||
// incomingHeartbeatCtx will complete if incoming heartbeats time out.
|
// incomingHeartbeatCtx will complete if incoming heartbeats time out.
|
||||||
incomingHeartbeatCtx, incomingHeartbeatCtxCancel :=
|
incomingHeartbeatCtx, incomingHeartbeatCtxCancel :=
|
||||||
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
|
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
|
||||||
// NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're
|
// NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're
|
||||||
// re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned
|
// re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned
|
||||||
// value, not the current value.
|
// value, not the current value.
|
||||||
|
@ -544,14 +559,11 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
// At this point we have a valid ResourceURL and we are subscribed to it.
|
// At this point we have a valid ResourceURL and we are subscribed to it.
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case req.ResponseNonce == "" && req.Error != nil:
|
case req.Error == nil: // ACK
|
||||||
return grpcstatus.Error(codes.InvalidArgument, "initial subscription request for a resource type must not contain an error")
|
|
||||||
|
|
||||||
case req.ResponseNonce != "" && req.Error == nil: // ACK
|
|
||||||
// TODO(peering): handle ACK fully
|
// TODO(peering): handle ACK fully
|
||||||
status.TrackAck()
|
status.TrackAck()
|
||||||
|
|
||||||
case req.ResponseNonce != "" && req.Error != nil: // NACK
|
case req.Error != nil: // NACK
|
||||||
// TODO(peering): handle NACK fully
|
// TODO(peering): handle NACK fully
|
||||||
logger.Warn("client peer was unable to apply resource", "code", req.Error.Code, "error", req.Error.Message)
|
logger.Warn("client peer was unable to apply resource", "code", req.Error.Code, "error", req.Error.Message)
|
||||||
status.TrackNack(fmt.Sprintf("client peer was unable to apply resource: %s", req.Error.Message))
|
status.TrackNack(fmt.Sprintf("client peer was unable to apply resource: %s", req.Error.Message))
|
||||||
|
@ -567,7 +579,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
|
|
||||||
if resp := msg.GetResponse(); resp != nil {
|
if resp := msg.GetResponse(); resp != nil {
|
||||||
// TODO(peering): Ensure there's a nonce
|
// TODO(peering): Ensure there's a nonce
|
||||||
reply, err := s.processResponse(streamReq.PeerName, streamReq.Partition, status, resp, logger)
|
reply, err := s.processResponse(streamReq.PeerName, streamReq.Partition, status, resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("failed to persist resource", "resourceURL", resp.ResourceURL, "resourceID", resp.ResourceID)
|
logger.Error("failed to persist resource", "resourceURL", resp.ResourceURL, "resourceID", resp.ResourceID)
|
||||||
status.TrackRecvError(err.Error())
|
status.TrackRecvError(err.Error())
|
||||||
|
@ -575,6 +587,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
status.TrackRecvResourceSuccess()
|
status.TrackRecvResourceSuccess()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We are replying ACK or NACK depending on whether we successfully processed the response.
|
||||||
if err := streamSend(reply); err != nil {
|
if err := streamSend(reply); err != nil {
|
||||||
return fmt.Errorf("failed to send to stream: %v", err)
|
return fmt.Errorf("failed to send to stream: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -605,14 +618,14 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
// They just can't trace the execution properly for some reason (possibly golang/go#29587).
|
// They just can't trace the execution properly for some reason (possibly golang/go#29587).
|
||||||
//nolint:govet
|
//nolint:govet
|
||||||
incomingHeartbeatCtx, incomingHeartbeatCtxCancel =
|
incomingHeartbeatCtx, incomingHeartbeatCtxCancel =
|
||||||
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
|
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
case update := <-subCh:
|
case update := <-subCh:
|
||||||
var resp *pbpeerstream.ReplicationMessage_Response
|
var resp *pbpeerstream.ReplicationMessage_Response
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(update.CorrelationID, subExportedService):
|
case strings.HasPrefix(update.CorrelationID, subExportedService):
|
||||||
resp, err = makeServiceResponse(logger, status, update)
|
resp, err = makeServiceResponse(status, update)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Log the error and skip this response to avoid locking up peering due to a bad update event.
|
// Log the error and skip this response to avoid locking up peering due to a bad update event.
|
||||||
logger.Error("failed to create service response", "error", err)
|
logger.Error("failed to create service response", "error", err)
|
||||||
|
@ -623,13 +636,20 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
// TODO(Peering): figure out how to sync this separately
|
// TODO(Peering): figure out how to sync this separately
|
||||||
|
|
||||||
case update.CorrelationID == subCARoot:
|
case update.CorrelationID == subCARoot:
|
||||||
resp, err = makeCARootsResponse(logger, update)
|
resp, err = makeCARootsResponse(update)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Log the error and skip this response to avoid locking up peering due to a bad update event.
|
// Log the error and skip this response to avoid locking up peering due to a bad update event.
|
||||||
logger.Error("failed to create ca roots response", "error", err)
|
logger.Error("failed to create ca roots response", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case update.CorrelationID == subServerAddrs:
|
||||||
|
resp, err = makeServerAddrsResponse(update)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("failed to create server address response", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
logger.Warn("unrecognized update type from subscription manager: " + update.CorrelationID)
|
logger.Warn("unrecognized update type from subscription manager: " + update.CorrelationID)
|
||||||
continue
|
continue
|
||||||
|
@ -640,9 +660,9 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
|
|
||||||
replResp := makeReplicationResponse(resp)
|
replResp := makeReplicationResponse(resp)
|
||||||
if err := streamSend(replResp); err != nil {
|
if err := streamSend(replResp); err != nil {
|
||||||
|
// note: govet warns of context leak but it is cleaned up in a defer
|
||||||
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
|
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
|
||||||
}
|
}
|
||||||
status.TrackSendSuccess()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,7 +126,7 @@ func TestStreamResources_Server_LeaderBecomesFollower(t *testing.T) {
|
||||||
|
|
||||||
// Receive a subscription from a peer. This message arrives while the
|
// Receive a subscription from a peer. This message arrives while the
|
||||||
// server is a leader and should work.
|
// server is a leader and should work.
|
||||||
testutil.RunStep(t, "send subscription request to leader and consume its two requests", func(t *testing.T) {
|
testutil.RunStep(t, "send subscription request to leader and consume its three requests", func(t *testing.T) {
|
||||||
sub := &pbpeerstream.ReplicationMessage{
|
sub := &pbpeerstream.ReplicationMessage{
|
||||||
Payload: &pbpeerstream.ReplicationMessage_Open_{
|
Payload: &pbpeerstream.ReplicationMessage_Open_{
|
||||||
Open: &pbpeerstream.ReplicationMessage_Open{
|
Open: &pbpeerstream.ReplicationMessage_Open{
|
||||||
|
@ -145,6 +145,10 @@ func TestStreamResources_Server_LeaderBecomesFollower(t *testing.T) {
|
||||||
msg2, err := client.Recv()
|
msg2, err := client.Recv()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEmpty(t, msg2)
|
require.NotEmpty(t, msg2)
|
||||||
|
|
||||||
|
msg3, err := client.Recv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEmpty(t, msg3)
|
||||||
})
|
})
|
||||||
|
|
||||||
// The ACK will be a new request but at this point the server is not the
|
// The ACK will be a new request but at this point the server is not the
|
||||||
|
@ -499,9 +503,8 @@ func TestStreamResources_Server_Terminate(t *testing.T) {
|
||||||
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||||
}
|
}
|
||||||
|
|
||||||
srv, store := newTestServer(t, func(c *Config) {
|
srv, store := newTestServer(t, nil)
|
||||||
c.Tracker.SetClock(it.Now)
|
srv.Tracker.setClock(it.Now)
|
||||||
})
|
|
||||||
|
|
||||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||||
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
||||||
|
@ -552,9 +555,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||||
}
|
}
|
||||||
|
|
||||||
srv, store := newTestServer(t, func(c *Config) {
|
srv, store := newTestServer(t, nil)
|
||||||
c.Tracker.SetClock(it.Now)
|
srv.Tracker.setClock(it.Now)
|
||||||
})
|
|
||||||
|
|
||||||
// Set the initial roots and CA configuration.
|
// Set the initial roots and CA configuration.
|
||||||
_, rootA := writeInitialRootsAndCA(t, store)
|
_, rootA := writeInitialRootsAndCA(t, store)
|
||||||
|
@ -572,7 +574,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
var lastSendAck, lastSendSuccess time.Time
|
var lastSendAck time.Time
|
||||||
|
|
||||||
testutil.RunStep(t, "ack tracked as success", func(t *testing.T) {
|
testutil.RunStep(t, "ack tracked as success", func(t *testing.T) {
|
||||||
ack := &pbpeerstream.ReplicationMessage{
|
ack := &pbpeerstream.ReplicationMessage{
|
||||||
|
@ -587,16 +589,13 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 2, 0, time.UTC)
|
lastSendAck = it.FutureNow(1)
|
||||||
lastSendSuccess = time.Date(2000, time.January, 1, 0, 0, 3, 0, time.UTC)
|
|
||||||
err := client.Send(ack)
|
err := client.Send(ack)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
expect := Status{
|
expect := Status{
|
||||||
Connected: true,
|
Connected: true,
|
||||||
LastAck: lastSendAck,
|
LastAck: lastSendAck,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -624,8 +623,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 4, 0, time.UTC)
|
lastNack = it.FutureNow(1)
|
||||||
lastNack = time.Date(2000, time.January, 1, 0, 0, 5, 0, time.UTC)
|
|
||||||
err := client.Send(nack)
|
err := client.Send(nack)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -636,8 +634,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
LastAck: lastSendAck,
|
LastAck: lastSendAck,
|
||||||
LastNack: lastNack,
|
LastNack: lastNack,
|
||||||
LastNackMessage: lastNackMsg,
|
LastNackMessage: lastNackMsg,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -707,8 +703,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
ImportedServices: map[string]struct{}{
|
ImportedServices: map[string]struct{}{
|
||||||
api.String(): {},
|
api.String(): {},
|
||||||
},
|
},
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -770,8 +764,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
ImportedServices: map[string]struct{}{
|
ImportedServices: map[string]struct{}{
|
||||||
api.String(): {},
|
api.String(): {},
|
||||||
},
|
},
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -805,8 +797,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
ImportedServices: map[string]struct{}{
|
ImportedServices: map[string]struct{}{
|
||||||
api.String(): {},
|
api.String(): {},
|
||||||
},
|
},
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -839,8 +829,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
ImportedServices: map[string]struct{}{
|
ImportedServices: map[string]struct{}{
|
||||||
api.String(): {},
|
api.String(): {},
|
||||||
},
|
},
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -1142,9 +1130,9 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
srv, store := newTestServer(t, func(c *Config) {
|
srv, store := newTestServer(t, func(c *Config) {
|
||||||
c.Tracker.SetClock(it.Now)
|
c.incomingHeartbeatTimeout = 50 * time.Millisecond
|
||||||
c.IncomingHeartbeatTimeout = 5 * time.Millisecond
|
|
||||||
})
|
})
|
||||||
|
srv.Tracker.setClock(it.Now)
|
||||||
|
|
||||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||||
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
||||||
|
@ -1190,9 +1178,9 @@ func TestStreamResources_Server_SendsHeartbeats(t *testing.T) {
|
||||||
outgoingHeartbeatInterval := 5 * time.Millisecond
|
outgoingHeartbeatInterval := 5 * time.Millisecond
|
||||||
|
|
||||||
srv, store := newTestServer(t, func(c *Config) {
|
srv, store := newTestServer(t, func(c *Config) {
|
||||||
c.Tracker.SetClock(it.Now)
|
|
||||||
c.outgoingHeartbeatInterval = outgoingHeartbeatInterval
|
c.outgoingHeartbeatInterval = outgoingHeartbeatInterval
|
||||||
})
|
})
|
||||||
|
srv.Tracker.setClock(it.Now)
|
||||||
|
|
||||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||||
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
||||||
|
@ -1249,9 +1237,9 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) {
|
||||||
incomingHeartbeatTimeout := 10 * time.Millisecond
|
incomingHeartbeatTimeout := 10 * time.Millisecond
|
||||||
|
|
||||||
srv, store := newTestServer(t, func(c *Config) {
|
srv, store := newTestServer(t, func(c *Config) {
|
||||||
c.Tracker.SetClock(it.Now)
|
c.incomingHeartbeatTimeout = incomingHeartbeatTimeout
|
||||||
c.IncomingHeartbeatTimeout = incomingHeartbeatTimeout
|
|
||||||
})
|
})
|
||||||
|
srv.Tracker.setClock(it.Now)
|
||||||
|
|
||||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||||
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
||||||
|
@ -1328,7 +1316,7 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) {
|
||||||
|
|
||||||
// makeClient sets up a *MockClient with the initial subscription
|
// makeClient sets up a *MockClient with the initial subscription
|
||||||
// message handshake.
|
// message handshake.
|
||||||
func makeClient(t *testing.T, srv pbpeerstream.PeerStreamServiceServer, peerID string) *MockClient {
|
func makeClient(t *testing.T, srv *testServer, peerID string) *MockClient {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
client := NewMockClient(context.Background())
|
client := NewMockClient(context.Background())
|
||||||
|
@ -1340,7 +1328,7 @@ func makeClient(t *testing.T, srv pbpeerstream.PeerStreamServiceServer, peerID s
|
||||||
// Pass errors from server handler into ErrCh so that they can be seen by the client on Recv().
|
// Pass errors from server handler into ErrCh so that they can be seen by the client on Recv().
|
||||||
// This matches gRPC's behavior when an error is returned by a server.
|
// This matches gRPC's behavior when an error is returned by a server.
|
||||||
if err := srv.StreamResources(client.ReplicationStream); err != nil {
|
if err := srv.StreamResources(client.ReplicationStream); err != nil {
|
||||||
errCh <- srv.StreamResources(client.ReplicationStream)
|
errCh <- err
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -1359,11 +1347,19 @@ func makeClient(t *testing.T, srv pbpeerstream.PeerStreamServiceServer, peerID s
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
receivedSub2, err := client.Recv()
|
receivedSub2, err := client.Recv()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
receivedSub3, err := client.Recv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Issue a services and roots subscription pair to server
|
// This is required when the client subscribes to server address replication messages.
|
||||||
|
// We assert for the handler to be called at least once but the data doesn't matter.
|
||||||
|
srv.mockSnapshotHandler.expect("", 0, 0, nil)
|
||||||
|
|
||||||
|
// Issue services, roots, and server address subscription to server.
|
||||||
|
// Note that server address may not come as an initial message
|
||||||
for _, resourceURL := range []string{
|
for _, resourceURL := range []string{
|
||||||
pbpeerstream.TypeURLExportedService,
|
pbpeerstream.TypeURLExportedService,
|
||||||
pbpeerstream.TypeURLPeeringTrustBundle,
|
pbpeerstream.TypeURLPeeringTrustBundle,
|
||||||
|
pbpeerstream.TypeURLPeeringServerAddresses,
|
||||||
} {
|
} {
|
||||||
init := &pbpeerstream.ReplicationMessage{
|
init := &pbpeerstream.ReplicationMessage{
|
||||||
Payload: &pbpeerstream.ReplicationMessage_Request_{
|
Payload: &pbpeerstream.ReplicationMessage_Request_{
|
||||||
|
@ -1399,10 +1395,22 @@ func makeClient(t *testing.T, srv pbpeerstream.PeerStreamServiceServer, peerID s
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Payload: &pbpeerstream.ReplicationMessage_Request_{
|
||||||
|
Request: &pbpeerstream.ReplicationMessage_Request{
|
||||||
|
ResourceURL: pbpeerstream.TypeURLPeeringServerAddresses,
|
||||||
|
// The PeerID field is only set for the messages coming FROM
|
||||||
|
// the establishing side and are going to be empty from the
|
||||||
|
// other side.
|
||||||
|
PeerID: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
got := []*pbpeerstream.ReplicationMessage{
|
got := []*pbpeerstream.ReplicationMessage{
|
||||||
receivedSub1,
|
receivedSub1,
|
||||||
receivedSub2,
|
receivedSub2,
|
||||||
|
receivedSub3,
|
||||||
}
|
}
|
||||||
prototest.AssertElementsMatch(t, expect, got)
|
prototest.AssertElementsMatch(t, expect, got)
|
||||||
|
|
||||||
|
@ -1459,6 +1467,10 @@ func (b *testStreamBackend) PeeringSecretsWrite(req *pbpeering.SecretsWriteReque
|
||||||
return b.store.PeeringSecretsWrite(1, req)
|
return b.store.PeeringSecretsWrite(1, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *testStreamBackend) PeeringWrite(req *pbpeering.PeeringWriteRequest) error {
|
||||||
|
return b.store.PeeringWrite(1, req)
|
||||||
|
}
|
||||||
|
|
||||||
// CatalogRegister mocks catalog registrations through Raft by copying the logic of FSM.applyRegister.
|
// CatalogRegister mocks catalog registrations through Raft by copying the logic of FSM.applyRegister.
|
||||||
func (b *testStreamBackend) CatalogRegister(req *structs.RegisterRequest) error {
|
func (b *testStreamBackend) CatalogRegister(req *structs.RegisterRequest) error {
|
||||||
return b.store.EnsureRegistration(1, req)
|
return b.store.EnsureRegistration(1, req)
|
||||||
|
@ -1512,7 +1524,7 @@ func Test_makeServiceResponse_ExportedServicesCount(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}}
|
}}
|
||||||
_, err := makeServiceResponse(srv.Logger, mst, update)
|
_, err := makeServiceResponse(mst, update)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, mst.GetExportedServicesCount())
|
require.Equal(t, 1, mst.GetExportedServicesCount())
|
||||||
|
@ -1524,7 +1536,7 @@ func Test_makeServiceResponse_ExportedServicesCount(t *testing.T) {
|
||||||
Result: &pbservice.IndexedCheckServiceNodes{
|
Result: &pbservice.IndexedCheckServiceNodes{
|
||||||
Nodes: []*pbservice.CheckServiceNode{},
|
Nodes: []*pbservice.CheckServiceNode{},
|
||||||
}}
|
}}
|
||||||
_, err := makeServiceResponse(srv.Logger, mst, update)
|
_, err := makeServiceResponse(mst, update)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 0, mst.GetExportedServicesCount())
|
require.Equal(t, 0, mst.GetExportedServicesCount())
|
||||||
|
@ -1555,7 +1567,7 @@ func Test_processResponse_Validation(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
run := func(t *testing.T, tc testCase) {
|
run := func(t *testing.T, tc testCase) {
|
||||||
reply, err := srv.processResponse(peerName, "", mst, tc.in, srv.Logger)
|
reply, err := srv.processResponse(peerName, "", mst, tc.in)
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
|
@ -1881,7 +1893,7 @@ func Test_processResponse_handleUpsert_handleDelete(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Simulate an update arriving for billing/api.
|
// Simulate an update arriving for billing/api.
|
||||||
_, err = srv.processResponse(peerName, acl.DefaultPartitionName, mst, in, srv.Logger)
|
_, err = srv.processResponse(peerName, acl.DefaultPartitionName, mst, in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for svc, expect := range tc.expect {
|
for svc, expect := range tc.expect {
|
||||||
|
@ -2747,11 +2759,16 @@ func requireEqualInstances(t *testing.T, expect, got structs.CheckServiceNodes)
|
||||||
|
|
||||||
type testServer struct {
|
type testServer struct {
|
||||||
*Server
|
*Server
|
||||||
|
|
||||||
|
// mockSnapshotHandler is solely used for handling autopilot events
|
||||||
|
// which don't come from the state store.
|
||||||
|
mockSnapshotHandler *mockSnapshotHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.Store) {
|
func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.Store) {
|
||||||
|
t.Helper()
|
||||||
publisher := stream.NewEventPublisher(10 * time.Second)
|
publisher := stream.NewEventPublisher(10 * time.Second)
|
||||||
store := newStateStore(t, publisher)
|
store, handler := newStateStore(t, publisher)
|
||||||
|
|
||||||
ports := freeport.GetN(t, 1) // {grpc}
|
ports := freeport.GetN(t, 1) // {grpc}
|
||||||
|
|
||||||
|
@ -2760,7 +2777,6 @@ func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.
|
||||||
store: store,
|
store: store,
|
||||||
pub: publisher,
|
pub: publisher,
|
||||||
},
|
},
|
||||||
Tracker: NewTracker(),
|
|
||||||
GetStore: func() StateStore { return store },
|
GetStore: func() StateStore { return store },
|
||||||
Logger: testutil.Logger(t),
|
Logger: testutil.Logger(t),
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
|
@ -2789,6 +2805,7 @@ func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.
|
||||||
|
|
||||||
return &testServer{
|
return &testServer{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
|
mockSnapshotHandler: handler,
|
||||||
}, store
|
}, store
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,20 +14,27 @@ type Tracker struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
streams map[string]*MutableStatus
|
streams map[string]*MutableStatus
|
||||||
|
|
||||||
|
// heartbeatTimeout is the max duration a connection is allowed to be
|
||||||
|
// disconnected before the stream health is reported as non-healthy
|
||||||
|
heartbeatTimeout time.Duration
|
||||||
|
|
||||||
// timeNow is a shim for testing.
|
// timeNow is a shim for testing.
|
||||||
timeNow func() time.Time
|
timeNow func() time.Time
|
||||||
|
|
||||||
heartbeatTimeout time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTracker() *Tracker {
|
func NewTracker(heartbeatTimeout time.Duration) *Tracker {
|
||||||
|
if heartbeatTimeout == 0 {
|
||||||
|
heartbeatTimeout = defaultIncomingHeartbeatTimeout
|
||||||
|
}
|
||||||
return &Tracker{
|
return &Tracker{
|
||||||
streams: make(map[string]*MutableStatus),
|
streams: make(map[string]*MutableStatus),
|
||||||
timeNow: time.Now,
|
timeNow: time.Now,
|
||||||
|
heartbeatTimeout: heartbeatTimeout,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Tracker) SetClock(clock func() time.Time) {
|
// setClock is used for debugging purposes only.
|
||||||
|
func (t *Tracker) setClock(clock func() time.Time) {
|
||||||
if clock == nil {
|
if clock == nil {
|
||||||
t.timeNow = time.Now
|
t.timeNow = time.Now
|
||||||
} else {
|
} else {
|
||||||
|
@ -35,12 +42,6 @@ func (t *Tracker) SetClock(clock func() time.Time) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Tracker) SetHeartbeatTimeout(heartbeatTimeout time.Duration) {
|
|
||||||
t.mu.Lock()
|
|
||||||
defer t.mu.Unlock()
|
|
||||||
t.heartbeatTimeout = heartbeatTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register a stream for a given peer but do not mark it as connected.
|
// Register a stream for a given peer but do not mark it as connected.
|
||||||
func (t *Tracker) Register(id string) (*MutableStatus, error) {
|
func (t *Tracker) Register(id string) (*MutableStatus, error) {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
|
@ -52,7 +53,7 @@ func (t *Tracker) Register(id string) (*MutableStatus, error) {
|
||||||
func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) {
|
func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) {
|
||||||
status, ok := t.streams[id]
|
status, ok := t.streams[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
status = newMutableStatus(t.timeNow, t.heartbeatTimeout, initAsConnected)
|
status = newMutableStatus(t.timeNow, initAsConnected)
|
||||||
t.streams[id] = status
|
t.streams[id] = status
|
||||||
return status, true, nil
|
return status, true, nil
|
||||||
}
|
}
|
||||||
|
@ -136,6 +137,39 @@ func (t *Tracker) DeleteStatus(id string) {
|
||||||
delete(t.streams, id)
|
delete(t.streams, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsHealthy is a calculates the health of a peering status.
|
||||||
|
// We define a peering as unhealthy if its status has been in the following
|
||||||
|
// states for longer than the configured incomingHeartbeatTimeout.
|
||||||
|
// - If it is disconnected
|
||||||
|
// - If the last received Nack is newer than last received Ack
|
||||||
|
// - If the last received error is newer than last received success
|
||||||
|
//
|
||||||
|
// If none of these conditions apply, we call the peering healthy.
|
||||||
|
func (t *Tracker) IsHealthy(s Status) bool {
|
||||||
|
// If stream is in a disconnected state for longer than the configured
|
||||||
|
// heartbeat timeout, report as unhealthy.
|
||||||
|
if !s.DisconnectTime.IsZero() &&
|
||||||
|
t.timeNow().Sub(s.DisconnectTime) > t.heartbeatTimeout {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If last Nack is after last Ack, it means the peer is unable to
|
||||||
|
// handle our replication message.
|
||||||
|
if s.LastNack.After(s.LastAck) &&
|
||||||
|
t.timeNow().Sub(s.LastAck) > t.heartbeatTimeout {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If last recv error is newer than last recv success, we were unable
|
||||||
|
// to handle the peer's replication message.
|
||||||
|
if s.LastRecvError.After(s.LastRecvResourceSuccess) &&
|
||||||
|
t.timeNow().Sub(s.LastRecvError) > t.heartbeatTimeout {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
type MutableStatus struct {
|
type MutableStatus struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
@ -152,8 +186,6 @@ type MutableStatus struct {
|
||||||
// Status contains information about the replication stream to a peer cluster.
|
// Status contains information about the replication stream to a peer cluster.
|
||||||
// TODO(peering): There's a lot of fields here...
|
// TODO(peering): There's a lot of fields here...
|
||||||
type Status struct {
|
type Status struct {
|
||||||
heartbeatTimeout time.Duration
|
|
||||||
|
|
||||||
// Connected is true when there is an open stream for the peer.
|
// Connected is true when there is an open stream for the peer.
|
||||||
Connected bool
|
Connected bool
|
||||||
|
|
||||||
|
@ -182,9 +214,6 @@ type Status struct {
|
||||||
// LastSendErrorMessage tracks the last error message when sending into the stream.
|
// LastSendErrorMessage tracks the last error message when sending into the stream.
|
||||||
LastSendErrorMessage string
|
LastSendErrorMessage string
|
||||||
|
|
||||||
// LastSendSuccess tracks the time of the last success response sent into the stream.
|
|
||||||
LastSendSuccess time.Time
|
|
||||||
|
|
||||||
// LastRecvHeartbeat tracks when we last received a heartbeat from our peer.
|
// LastRecvHeartbeat tracks when we last received a heartbeat from our peer.
|
||||||
LastRecvHeartbeat time.Time
|
LastRecvHeartbeat time.Time
|
||||||
|
|
||||||
|
@ -214,39 +243,10 @@ func (s *Status) GetExportedServicesCount() uint64 {
|
||||||
return uint64(len(s.ExportedServices))
|
return uint64(len(s.ExportedServices))
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsHealthy is a convenience func that returns true/ false for a peering status.
|
func newMutableStatus(now func() time.Time, connected bool) *MutableStatus {
|
||||||
// We define a peering as unhealthy if its status satisfies one of the following:
|
|
||||||
// - If heartbeat hasn't been received within the IncomingHeartbeatTimeout
|
|
||||||
// - If the last sent error is newer than last sent success
|
|
||||||
// - If the last received error is newer than last received success
|
|
||||||
// If none of these conditions apply, we call the peering healthy.
|
|
||||||
func (s *Status) IsHealthy() bool {
|
|
||||||
if time.Now().Sub(s.LastRecvHeartbeat) > s.heartbeatTimeout {
|
|
||||||
// 1. If heartbeat hasn't been received for a while - report unhealthy
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.LastSendError.After(s.LastSendSuccess) {
|
|
||||||
// 2. If last sent error is newer than last sent success - report unhealthy
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.LastRecvError.After(s.LastRecvResourceSuccess) {
|
|
||||||
// 3. If last recv error is newer than last recv success - report unhealthy
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMutableStatus(now func() time.Time, heartbeatTimeout time.Duration, connected bool) *MutableStatus {
|
|
||||||
if heartbeatTimeout.Microseconds() == 0 {
|
|
||||||
heartbeatTimeout = defaultIncomingHeartbeatTimeout
|
|
||||||
}
|
|
||||||
return &MutableStatus{
|
return &MutableStatus{
|
||||||
Status: Status{
|
Status: Status{
|
||||||
Connected: connected,
|
Connected: connected,
|
||||||
heartbeatTimeout: heartbeatTimeout,
|
|
||||||
NeverConnected: !connected,
|
NeverConnected: !connected,
|
||||||
},
|
},
|
||||||
timeNow: now,
|
timeNow: now,
|
||||||
|
@ -271,12 +271,6 @@ func (s *MutableStatus) TrackSendError(error string) {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MutableStatus) TrackSendSuccess() {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.LastSendSuccess = s.timeNow().UTC()
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrackRecvResourceSuccess tracks receiving a replicated resource.
|
// TrackRecvResourceSuccess tracks receiving a replicated resource.
|
||||||
func (s *MutableStatus) TrackRecvResourceSuccess() {
|
func (s *MutableStatus) TrackRecvResourceSuccess() {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/sdk/testutil"
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
@ -14,74 +15,92 @@ const (
|
||||||
aPeerID = "63b60245-c475-426b-b314-4588d210859d"
|
aPeerID = "63b60245-c475-426b-b314-4588d210859d"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStatus_IsHealthy(t *testing.T) {
|
func TestTracker_IsHealthy(t *testing.T) {
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
name string
|
name string
|
||||||
dontConnect bool
|
tracker *Tracker
|
||||||
modifierFunc func(status *MutableStatus)
|
modifierFunc func(status *MutableStatus)
|
||||||
expectedVal bool
|
expectedVal bool
|
||||||
heartbeatTimeout time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tcs := []testcase{
|
tcs := []testcase{
|
||||||
{
|
{
|
||||||
name: "never connected, unhealthy",
|
name: "disconnect time within timeout",
|
||||||
expectedVal: false,
|
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
|
||||||
dontConnect: true,
|
expectedVal: true,
|
||||||
|
modifierFunc: func(status *MutableStatus) {
|
||||||
|
status.DisconnectTime = time.Now()
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "no heartbeat, unhealthy",
|
name: "disconnect time past timeout",
|
||||||
expectedVal: false,
|
tracker: NewTracker(1 * time.Millisecond),
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "heartbeat is not received, unhealthy",
|
|
||||||
expectedVal: false,
|
expectedVal: false,
|
||||||
modifierFunc: func(status *MutableStatus) {
|
modifierFunc: func(status *MutableStatus) {
|
||||||
// set heartbeat
|
status.DisconnectTime = time.Now().Add(-1 * time.Minute)
|
||||||
status.LastRecvHeartbeat = time.Now().Add(-1 * time.Second)
|
|
||||||
},
|
|
||||||
heartbeatTimeout: 1 * time.Second,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "send error before send success",
|
|
||||||
expectedVal: false,
|
|
||||||
modifierFunc: func(status *MutableStatus) {
|
|
||||||
// set heartbeat
|
|
||||||
status.LastRecvHeartbeat = time.Now()
|
|
||||||
|
|
||||||
status.LastSendSuccess = time.Now()
|
|
||||||
status.LastSendError = time.Now()
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "received error before received success",
|
name: "receive error before receive success within timeout",
|
||||||
|
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
|
||||||
|
expectedVal: true,
|
||||||
|
modifierFunc: func(status *MutableStatus) {
|
||||||
|
now := time.Now()
|
||||||
|
status.LastRecvResourceSuccess = now
|
||||||
|
status.LastRecvError = now.Add(1 * time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "receive error before receive success within timeout",
|
||||||
|
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
|
||||||
|
expectedVal: true,
|
||||||
|
modifierFunc: func(status *MutableStatus) {
|
||||||
|
now := time.Now()
|
||||||
|
status.LastRecvResourceSuccess = now
|
||||||
|
status.LastRecvError = now.Add(1 * time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "receive error before receive success past timeout",
|
||||||
|
tracker: NewTracker(1 * time.Millisecond),
|
||||||
expectedVal: false,
|
expectedVal: false,
|
||||||
modifierFunc: func(status *MutableStatus) {
|
modifierFunc: func(status *MutableStatus) {
|
||||||
// set heartbeat
|
now := time.Now().Add(-2 * time.Second)
|
||||||
status.LastRecvHeartbeat = time.Now()
|
status.LastRecvResourceSuccess = now
|
||||||
|
status.LastRecvError = now.Add(1 * time.Second)
|
||||||
status.LastRecvResourceSuccess = time.Now()
|
},
|
||||||
status.LastRecvError = time.Now()
|
},
|
||||||
|
{
|
||||||
|
name: "nack before ack within timeout",
|
||||||
|
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
|
||||||
|
expectedVal: true,
|
||||||
|
modifierFunc: func(status *MutableStatus) {
|
||||||
|
now := time.Now()
|
||||||
|
status.LastAck = now
|
||||||
|
status.LastNack = now.Add(1 * time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nack before ack past timeout",
|
||||||
|
tracker: NewTracker(1 * time.Millisecond),
|
||||||
|
expectedVal: false,
|
||||||
|
modifierFunc: func(status *MutableStatus) {
|
||||||
|
now := time.Now().Add(-2 * time.Second)
|
||||||
|
status.LastAck = now
|
||||||
|
status.LastNack = now.Add(1 * time.Second)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "healthy",
|
name: "healthy",
|
||||||
|
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
|
||||||
expectedVal: true,
|
expectedVal: true,
|
||||||
modifierFunc: func(status *MutableStatus) {
|
|
||||||
// set heartbeat
|
|
||||||
status.LastRecvHeartbeat = time.Now()
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tcs {
|
for _, tc := range tcs {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
tracker := NewTracker()
|
tracker := tc.tracker
|
||||||
if tc.heartbeatTimeout.Microseconds() != 0 {
|
|
||||||
tracker.SetHeartbeatTimeout(tc.heartbeatTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !tc.dontConnect {
|
|
||||||
st, err := tracker.Connected(aPeerID)
|
st, err := tracker.Connected(aPeerID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, st.Connected)
|
require.True(t, st.Connected)
|
||||||
|
@ -90,19 +109,13 @@ func TestStatus_IsHealthy(t *testing.T) {
|
||||||
tc.modifierFunc(st)
|
tc.modifierFunc(st)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, tc.expectedVal, st.IsHealthy())
|
assert.Equal(t, tc.expectedVal, tracker.IsHealthy(st.GetStatus()))
|
||||||
|
|
||||||
} else {
|
|
||||||
st, found := tracker.StreamStatus(aPeerID)
|
|
||||||
require.False(t, found)
|
|
||||||
require.Equal(t, tc.expectedVal, st.IsHealthy())
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||||
tracker := NewTracker()
|
tracker := NewTracker(defaultIncomingHeartbeatTimeout)
|
||||||
peerID := "63b60245-c475-426b-b314-4588d210859d"
|
peerID := "63b60245-c475-426b-b314-4588d210859d"
|
||||||
|
|
||||||
it := incrementalTime{
|
it := incrementalTime{
|
||||||
|
@ -121,7 +134,6 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||||
|
|
||||||
expect := Status{
|
expect := Status{
|
||||||
Connected: true,
|
Connected: true,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
status, ok := tracker.StreamStatus(peerID)
|
status, ok := tracker.StreamStatus(peerID)
|
||||||
|
@ -149,7 +161,6 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||||
expect := Status{
|
expect := Status{
|
||||||
Connected: true,
|
Connected: true,
|
||||||
LastAck: lastSuccess,
|
LastAck: lastSuccess,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
}
|
}
|
||||||
require.Equal(t, expect, status)
|
require.Equal(t, expect, status)
|
||||||
})
|
})
|
||||||
|
@ -162,7 +173,6 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||||
Connected: false,
|
Connected: false,
|
||||||
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
|
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
|
||||||
LastAck: lastSuccess,
|
LastAck: lastSuccess,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
}
|
}
|
||||||
status, ok := tracker.StreamStatus(peerID)
|
status, ok := tracker.StreamStatus(peerID)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
@ -176,7 +186,6 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||||
expect := Status{
|
expect := Status{
|
||||||
Connected: true,
|
Connected: true,
|
||||||
LastAck: lastSuccess,
|
LastAck: lastSuccess,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
|
|
||||||
// DisconnectTime gets cleared on re-connect.
|
// DisconnectTime gets cleared on re-connect.
|
||||||
}
|
}
|
||||||
|
@ -203,7 +212,7 @@ func TestTracker_connectedStreams(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
run := func(t *testing.T, tc testCase) {
|
run := func(t *testing.T, tc testCase) {
|
||||||
tracker := NewTracker()
|
tracker := NewTracker(defaultIncomingHeartbeatTimeout)
|
||||||
if tc.setup != nil {
|
if tc.setup != nil {
|
||||||
tc.setup(t, tracker)
|
tc.setup(t, tracker)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
@ -12,6 +13,7 @@ import (
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilotevents"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/consul/stream"
|
"github.com/hashicorp/consul/agent/consul/stream"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
@ -42,6 +44,7 @@ type subscriptionManager struct {
|
||||||
getStore func() StateStore
|
getStore func() StateStore
|
||||||
serviceSubReady <-chan struct{}
|
serviceSubReady <-chan struct{}
|
||||||
trustBundlesSubReady <-chan struct{}
|
trustBundlesSubReady <-chan struct{}
|
||||||
|
serverAddrsSubReady <-chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(peering): Maybe centralize so that there is a single manager per datacenter, rather than per peering.
|
// TODO(peering): Maybe centralize so that there is a single manager per datacenter, rather than per peering.
|
||||||
|
@ -67,6 +70,7 @@ func newSubscriptionManager(
|
||||||
getStore: getStore,
|
getStore: getStore,
|
||||||
serviceSubReady: remoteSubTracker.SubscribedChan(pbpeerstream.TypeURLExportedService),
|
serviceSubReady: remoteSubTracker.SubscribedChan(pbpeerstream.TypeURLExportedService),
|
||||||
trustBundlesSubReady: remoteSubTracker.SubscribedChan(pbpeerstream.TypeURLPeeringTrustBundle),
|
trustBundlesSubReady: remoteSubTracker.SubscribedChan(pbpeerstream.TypeURLPeeringTrustBundle),
|
||||||
|
serverAddrsSubReady: remoteSubTracker.SubscribedChan(pbpeerstream.TypeURLPeeringServerAddresses),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,6 +87,7 @@ func (m *subscriptionManager) subscribe(ctx context.Context, peerID, peerName, p
|
||||||
|
|
||||||
// Wrap our bare state store queries in goroutines that emit events.
|
// Wrap our bare state store queries in goroutines that emit events.
|
||||||
go m.notifyExportedServicesForPeerID(ctx, state, peerID)
|
go m.notifyExportedServicesForPeerID(ctx, state, peerID)
|
||||||
|
go m.notifyServerAddrUpdates(ctx, state.updateCh)
|
||||||
if m.config.ConnectEnabled {
|
if m.config.ConnectEnabled {
|
||||||
go m.notifyMeshGatewaysForPartition(ctx, state, state.partition)
|
go m.notifyMeshGatewaysForPartition(ctx, state, state.partition)
|
||||||
// If connect is enabled, watch for updates to CA roots.
|
// If connect is enabled, watch for updates to CA roots.
|
||||||
|
@ -262,6 +267,17 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
|
||||||
|
|
||||||
state.sendPendingEvents(ctx, m.logger, pending)
|
state.sendPendingEvents(ctx, m.logger, pending)
|
||||||
|
|
||||||
|
case u.CorrelationID == subServerAddrs:
|
||||||
|
addrs, ok := u.Result.(*pbpeering.PeeringServerAddresses)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
||||||
|
}
|
||||||
|
pending := &pendingPayload{}
|
||||||
|
if err := pending.Add(serverAddrsPayloadID, u.CorrelationID, addrs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
state.sendPendingEvents(ctx, m.logger, pending)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown correlation ID: %s", u.CorrelationID)
|
return fmt.Errorf("unknown correlation ID: %s", u.CorrelationID)
|
||||||
}
|
}
|
||||||
|
@ -333,6 +349,8 @@ func (m *subscriptionManager) notifyRootCAUpdatesForPartition(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const subCARoot = "roots"
|
||||||
|
|
||||||
// subscribeCARoots subscribes to state.EventTopicCARoots for changes to CA roots.
|
// subscribeCARoots subscribes to state.EventTopicCARoots for changes to CA roots.
|
||||||
// Upon receiving an event it will send the payload in updateCh.
|
// Upon receiving an event it will send the payload in updateCh.
|
||||||
func (m *subscriptionManager) subscribeCARoots(
|
func (m *subscriptionManager) subscribeCARoots(
|
||||||
|
@ -414,8 +432,6 @@ func (m *subscriptionManager) subscribeCARoots(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const subCARoot = "roots"
|
|
||||||
|
|
||||||
func (m *subscriptionManager) syncNormalServices(
|
func (m *subscriptionManager) syncNormalServices(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
state *subscriptionState,
|
state *subscriptionState,
|
||||||
|
@ -721,3 +737,112 @@ const syntheticProxyNameSuffix = "-sidecar-proxy"
|
||||||
func generateProxyNameForDiscoveryChain(sn structs.ServiceName) structs.ServiceName {
|
func generateProxyNameForDiscoveryChain(sn structs.ServiceName) structs.ServiceName {
|
||||||
return structs.NewServiceName(sn.Name+syntheticProxyNameSuffix, &sn.EnterpriseMeta)
|
return structs.NewServiceName(sn.Name+syntheticProxyNameSuffix, &sn.EnterpriseMeta)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const subServerAddrs = "server-addrs"
|
||||||
|
|
||||||
|
func (m *subscriptionManager) notifyServerAddrUpdates(
|
||||||
|
ctx context.Context,
|
||||||
|
updateCh chan<- cache.UpdateEvent,
|
||||||
|
) {
|
||||||
|
// Wait until this is subscribed-to.
|
||||||
|
select {
|
||||||
|
case <-m.serverAddrsSubReady:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var idx uint64
|
||||||
|
// TODO(peering): retry logic; fail past a threshold
|
||||||
|
for {
|
||||||
|
var err error
|
||||||
|
// Typically, this function will block inside `m.subscribeServerAddrs` and only return on error.
|
||||||
|
// Errors are logged and the watch is retried.
|
||||||
|
idx, err = m.subscribeServerAddrs(ctx, idx, updateCh)
|
||||||
|
if errors.Is(err, stream.ErrSubForceClosed) {
|
||||||
|
m.logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt resume")
|
||||||
|
} else if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
m.logger.Warn("failed to subscribe to server addresses, will attempt resume", "error", err.Error())
|
||||||
|
} else {
|
||||||
|
m.logger.Trace(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *subscriptionManager) subscribeServerAddrs(
|
||||||
|
ctx context.Context,
|
||||||
|
idx uint64,
|
||||||
|
updateCh chan<- cache.UpdateEvent,
|
||||||
|
) (uint64, error) {
|
||||||
|
// following code adapted from serverdiscovery/watch_servers.go
|
||||||
|
sub, err := m.backend.Subscribe(&stream.SubscribeRequest{
|
||||||
|
Topic: autopilotevents.EventTopicReadyServers,
|
||||||
|
Subject: stream.SubjectNone,
|
||||||
|
Token: "", // using anonymous token for now
|
||||||
|
Index: idx,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to subscribe to ReadyServers events: %w", err)
|
||||||
|
}
|
||||||
|
defer sub.Unsubscribe()
|
||||||
|
|
||||||
|
for {
|
||||||
|
event, err := sub.Next(ctx)
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, context.Canceled):
|
||||||
|
return 0, err
|
||||||
|
case err != nil:
|
||||||
|
return idx, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We do not send framing events (e.g. EndOfSnapshot, NewSnapshotToFollow)
|
||||||
|
// because we send a full list of ready servers on every event, rather than expecting
|
||||||
|
// clients to maintain a state-machine in the way they do for service health.
|
||||||
|
if event.IsFramingEvent() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: this check isn't strictly necessary because the event publishing
|
||||||
|
// machinery will ensure the index increases monotonically, but it can be
|
||||||
|
// tricky to faithfully reproduce this in tests (e.g. the EventPublisher
|
||||||
|
// garbage collects topic buffers and snapshots aggressively when streams
|
||||||
|
// disconnect) so this avoids a bunch of confusing setup code.
|
||||||
|
if event.Index <= idx {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
idx = event.Index
|
||||||
|
|
||||||
|
payload, ok := event.Payload.(autopilotevents.EventPayloadReadyServers)
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("unexpected event payload type: %T", payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
var serverAddrs = make([]string, 0, len(payload))
|
||||||
|
|
||||||
|
for _, srv := range payload {
|
||||||
|
if srv.ExtGRPCPort == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
grpcAddr := srv.Address + ":" + strconv.Itoa(srv.ExtGRPCPort)
|
||||||
|
serverAddrs = append(serverAddrs, grpcAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(serverAddrs) == 0 {
|
||||||
|
m.logger.Warn("did not find any server addresses with external gRPC ports to publish")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
updateCh <- cache.UpdateEvent{
|
||||||
|
CorrelationID: subServerAddrs,
|
||||||
|
Result: &pbpeering.PeeringServerAddresses{
|
||||||
|
Addresses: serverAddrs,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -3,14 +3,17 @@ package peerstream
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sort"
|
"sort"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/autopilotevents"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/consul/stream"
|
"github.com/hashicorp/consul/agent/consul/stream"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
@ -627,20 +630,100 @@ func TestSubscriptionManager_CARoots(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSubscriptionManager_ServerAddrs(t *testing.T) {
|
||||||
|
backend := newTestSubscriptionBackend(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
// Create a peering
|
||||||
|
_, id := backend.ensurePeering(t, "my-peering")
|
||||||
|
partition := acl.DefaultEnterpriseMeta().PartitionOrEmpty()
|
||||||
|
|
||||||
|
payload := autopilotevents.EventPayloadReadyServers{
|
||||||
|
autopilotevents.ReadyServerInfo{
|
||||||
|
ID: "9aeb73f6-e83e-43c1-bdc9-ca5e43efe3e4",
|
||||||
|
Address: "198.18.0.1",
|
||||||
|
Version: "1.13.1",
|
||||||
|
ExtGRPCPort: 8502,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// mock handler only gets called once during the initial subscription
|
||||||
|
backend.handler.expect("", 0, 1, payload)
|
||||||
|
|
||||||
|
// Only configure a tracker for server address events.
|
||||||
|
tracker := newResourceSubscriptionTracker()
|
||||||
|
tracker.Subscribe(pbpeerstream.TypeURLPeeringServerAddresses)
|
||||||
|
|
||||||
|
mgr := newSubscriptionManager(ctx,
|
||||||
|
testutil.Logger(t),
|
||||||
|
Config{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
ConnectEnabled: true,
|
||||||
|
},
|
||||||
|
connect.TestTrustDomain,
|
||||||
|
backend,
|
||||||
|
func() StateStore {
|
||||||
|
return backend.store
|
||||||
|
},
|
||||||
|
tracker)
|
||||||
|
subCh := mgr.subscribe(ctx, id, "my-peering", partition)
|
||||||
|
|
||||||
|
testutil.RunStep(t, "initial events", func(t *testing.T) {
|
||||||
|
expectEvents(t, subCh,
|
||||||
|
func(t *testing.T, got cache.UpdateEvent) {
|
||||||
|
require.Equal(t, subServerAddrs, got.CorrelationID)
|
||||||
|
addrs, ok := got.Result.(*pbpeering.PeeringServerAddresses)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
require.Equal(t, []string{"198.18.0.1:8502"}, addrs.GetAddresses())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "added server", func(t *testing.T) {
|
||||||
|
payload = append(payload, autopilotevents.ReadyServerInfo{
|
||||||
|
ID: "eec8721f-c42b-48da-a5a5-07565158015e",
|
||||||
|
Address: "198.18.0.2",
|
||||||
|
Version: "1.13.1",
|
||||||
|
ExtGRPCPort: 9502,
|
||||||
|
})
|
||||||
|
backend.Publish([]stream.Event{
|
||||||
|
{
|
||||||
|
Topic: autopilotevents.EventTopicReadyServers,
|
||||||
|
Index: 2,
|
||||||
|
Payload: payload,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
expectEvents(t, subCh,
|
||||||
|
func(t *testing.T, got cache.UpdateEvent) {
|
||||||
|
require.Equal(t, subServerAddrs, got.CorrelationID)
|
||||||
|
addrs, ok := got.Result.(*pbpeering.PeeringServerAddresses)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
require.Equal(t, []string{"198.18.0.1:8502", "198.18.0.2:9502"}, addrs.GetAddresses())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
type testSubscriptionBackend struct {
|
type testSubscriptionBackend struct {
|
||||||
state.EventPublisher
|
state.EventPublisher
|
||||||
store *state.Store
|
store *state.Store
|
||||||
|
handler *mockSnapshotHandler
|
||||||
|
|
||||||
lastIdx uint64
|
lastIdx uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestSubscriptionBackend(t *testing.T) *testSubscriptionBackend {
|
func newTestSubscriptionBackend(t *testing.T) *testSubscriptionBackend {
|
||||||
publisher := stream.NewEventPublisher(10 * time.Second)
|
publisher := stream.NewEventPublisher(10 * time.Second)
|
||||||
store := newStateStore(t, publisher)
|
store, handler := newStateStore(t, publisher)
|
||||||
|
|
||||||
backend := &testSubscriptionBackend{
|
backend := &testSubscriptionBackend{
|
||||||
EventPublisher: publisher,
|
EventPublisher: publisher,
|
||||||
store: store,
|
store: store,
|
||||||
|
handler: handler,
|
||||||
}
|
}
|
||||||
|
|
||||||
backend.ensureCAConfig(t, &structs.CAConfiguration{
|
backend.ensureCAConfig(t, &structs.CAConfiguration{
|
||||||
|
@ -739,20 +822,35 @@ func setupTestPeering(t *testing.T, store *state.Store, name string, index uint6
|
||||||
return p.ID
|
return p.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStateStore(t *testing.T, publisher *stream.EventPublisher) *state.Store {
|
func newStateStore(t *testing.T, publisher *stream.EventPublisher) (*state.Store, *mockSnapshotHandler) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
t.Cleanup(cancel)
|
|
||||||
|
|
||||||
gc, err := state.NewTombstoneGC(time.Second, time.Millisecond)
|
gc, err := state.NewTombstoneGC(time.Second, time.Millisecond)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
handler := newMockSnapshotHandler(t)
|
||||||
|
|
||||||
store := state.NewStateStoreWithEventPublisher(gc, publisher)
|
store := state.NewStateStoreWithEventPublisher(gc, publisher)
|
||||||
require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealth, store.ServiceHealthSnapshot, false))
|
require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealth, store.ServiceHealthSnapshot, false))
|
||||||
require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealthConnect, store.ServiceHealthSnapshot, false))
|
require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealthConnect, store.ServiceHealthSnapshot, false))
|
||||||
require.NoError(t, publisher.RegisterHandler(state.EventTopicCARoots, store.CARootsSnapshot, false))
|
require.NoError(t, publisher.RegisterHandler(state.EventTopicCARoots, store.CARootsSnapshot, false))
|
||||||
go publisher.Run(ctx)
|
require.NoError(t, publisher.RegisterHandler(autopilotevents.EventTopicReadyServers, handler.handle, false))
|
||||||
|
|
||||||
return store
|
// WaitGroup used to make sure that the publisher returns
|
||||||
|
// before handler's t.Cleanup is called (otherwise an event
|
||||||
|
// might fire during an assertion and cause a data race).
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(func() {
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
})
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
publisher.Run(ctx)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return store, handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func expectEvents(
|
func expectEvents(
|
||||||
|
@ -870,3 +968,39 @@ func pbCheck(node, svcID, svcName, status string, entMeta *pbcommon.EnterpriseMe
|
||||||
EnterpriseMeta: entMeta,
|
EnterpriseMeta: entMeta,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mockSnapshotHandler is copied from server_discovery/server_test.go
|
||||||
|
type mockSnapshotHandler struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockSnapshotHandler(t *testing.T) *mockSnapshotHandler {
|
||||||
|
handler := &mockSnapshotHandler{}
|
||||||
|
t.Cleanup(func() {
|
||||||
|
handler.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
return handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockSnapshotHandler) handle(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
||||||
|
ret := m.Called(req, buf)
|
||||||
|
return ret.Get(0).(uint64), ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockSnapshotHandler) expect(token string, requestIndex uint64, eventIndex uint64, payload autopilotevents.EventPayloadReadyServers) {
|
||||||
|
m.On("handle", stream.SubscribeRequest{
|
||||||
|
Topic: autopilotevents.EventTopicReadyServers,
|
||||||
|
Subject: stream.SubjectNone,
|
||||||
|
Token: token,
|
||||||
|
Index: requestIndex,
|
||||||
|
}, mock.Anything).Run(func(args mock.Arguments) {
|
||||||
|
buf := args.Get(1).(stream.SnapshotAppender)
|
||||||
|
buf.Append([]stream.Event{
|
||||||
|
{
|
||||||
|
Topic: autopilotevents.EventTopicReadyServers,
|
||||||
|
Index: eventIndex,
|
||||||
|
Payload: payload,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}).Return(eventIndex, nil)
|
||||||
|
}
|
||||||
|
|
|
@ -93,6 +93,9 @@ func (s *subscriptionState) cleanupEventVersions(logger hclog.Logger) {
|
||||||
case id == caRootsPayloadID:
|
case id == caRootsPayloadID:
|
||||||
keep = true
|
keep = true
|
||||||
|
|
||||||
|
case id == serverAddrsPayloadID:
|
||||||
|
keep = true
|
||||||
|
|
||||||
case strings.HasPrefix(id, servicePayloadIDPrefix):
|
case strings.HasPrefix(id, servicePayloadIDPrefix):
|
||||||
name := strings.TrimPrefix(id, servicePayloadIDPrefix)
|
name := strings.TrimPrefix(id, servicePayloadIDPrefix)
|
||||||
sn := structs.ServiceNameFromString(name)
|
sn := structs.ServiceNameFromString(name)
|
||||||
|
@ -129,6 +132,7 @@ type pendingEvent struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
serverAddrsPayloadID = "server-addrs"
|
||||||
caRootsPayloadID = "roots"
|
caRootsPayloadID = "roots"
|
||||||
meshGatewayPayloadID = "mesh-gateway"
|
meshGatewayPayloadID = "mesh-gateway"
|
||||||
servicePayloadIDPrefix = "service:"
|
servicePayloadIDPrefix = "service:"
|
||||||
|
|
|
@ -81,6 +81,10 @@ type HTTPHandlers struct {
|
||||||
configReloaders []ConfigReloader
|
configReloaders []ConfigReloader
|
||||||
h http.Handler
|
h http.Handler
|
||||||
metricsProxyCfg atomic.Value
|
metricsProxyCfg atomic.Value
|
||||||
|
|
||||||
|
// proxyTransport is used by UIMetricsProxy to keep
|
||||||
|
// a managed pool of connections.
|
||||||
|
proxyTransport http.RoundTripper
|
||||||
}
|
}
|
||||||
|
|
||||||
// endpoint is a Consul-specific HTTP handler that takes the usual arguments in
|
// endpoint is a Consul-specific HTTP handler that takes the usual arguments in
|
||||||
|
|
|
@ -4,11 +4,9 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/hashicorp/go-hclog"
|
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
"github.com/hashicorp/consul/agent/consul/stream"
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
"github.com/hashicorp/consul/agent/proxycfg"
|
"github.com/hashicorp/consul/agent/proxycfg"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/agent/submatview"
|
"github.com/hashicorp/consul/agent/submatview"
|
||||||
|
@ -17,15 +15,16 @@ import (
|
||||||
"github.com/hashicorp/consul/proto/pbsubscribe"
|
"github.com/hashicorp/consul/proto/pbsubscribe"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServerDataSourceDeps contains the dependencies needed for sourcing data from
|
// CacheConfigEntry satisfies the proxycfg.ConfigEntry interface by sourcing
|
||||||
// server-local sources (e.g. materialized views).
|
// data from the agent cache.
|
||||||
type ServerDataSourceDeps struct {
|
func CacheConfigEntry(c *cache.Cache) proxycfg.ConfigEntry {
|
||||||
Datacenter string
|
return &cacheProxyDataSource[*structs.ConfigEntryQuery]{c, cachetype.ConfigEntryName}
|
||||||
ViewStore *submatview.Store
|
}
|
||||||
EventPublisher *stream.EventPublisher
|
|
||||||
Logger hclog.Logger
|
// CacheConfigEntryList satisfies the proxycfg.ConfigEntryList interface by
|
||||||
ACLResolver submatview.ACLResolver
|
// sourcing data from the agent cache.
|
||||||
GetStore func() Store
|
func CacheConfigEntryList(c *cache.Cache) proxycfg.ConfigEntryList {
|
||||||
|
return &cacheProxyDataSource[*structs.ConfigEntryQuery]{c, cachetype.ConfigEntryListName}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerConfigEntry satisfies the proxycfg.ConfigEntry interface by sourcing
|
// ServerConfigEntry satisfies the proxycfg.ConfigEntry interface by sourcing
|
||||||
|
|
|
@ -3,20 +3,35 @@ package proxycfgglue
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/proto/pbpeering"
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
"github.com/hashicorp/consul/agent/configentry"
|
"github.com/hashicorp/consul/agent/configentry"
|
||||||
"github.com/hashicorp/consul/agent/consul/discoverychain"
|
"github.com/hashicorp/consul/agent/consul/discoverychain"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/stream"
|
||||||
"github.com/hashicorp/consul/agent/consul/watch"
|
"github.com/hashicorp/consul/agent/consul/watch"
|
||||||
"github.com/hashicorp/consul/agent/proxycfg"
|
"github.com/hashicorp/consul/agent/proxycfg"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/agent/submatview"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ServerDataSourceDeps contains the dependencies needed for sourcing data from
|
||||||
|
// server-local sources (e.g. materialized views).
|
||||||
|
type ServerDataSourceDeps struct {
|
||||||
|
Datacenter string
|
||||||
|
ViewStore *submatview.Store
|
||||||
|
EventPublisher *stream.EventPublisher
|
||||||
|
Logger hclog.Logger
|
||||||
|
ACLResolver submatview.ACLResolver
|
||||||
|
GetStore func() Store
|
||||||
|
}
|
||||||
|
|
||||||
// Store is the state store interface required for server-local data sources.
|
// Store is the state store interface required for server-local data sources.
|
||||||
type Store interface {
|
type Store interface {
|
||||||
watch.StateStore
|
watch.StateStore
|
||||||
|
@ -25,7 +40,9 @@ type Store interface {
|
||||||
FederationStateList(ws memdb.WatchSet) (uint64, []*structs.FederationState, error)
|
FederationStateList(ws memdb.WatchSet) (uint64, []*structs.FederationState, error)
|
||||||
GatewayServices(ws memdb.WatchSet, gateway string, entMeta *acl.EnterpriseMeta) (uint64, structs.GatewayServices, error)
|
GatewayServices(ws memdb.WatchSet, gateway string, entMeta *acl.EnterpriseMeta) (uint64, structs.GatewayServices, error)
|
||||||
IntentionTopology(ws memdb.WatchSet, target structs.ServiceName, downstreams bool, defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType) (uint64, structs.ServiceList, error)
|
IntentionTopology(ws memdb.WatchSet, target structs.ServiceName, downstreams bool, defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType) (uint64, structs.ServiceList, error)
|
||||||
|
ReadResolvedServiceConfigEntries(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, upstreamIDs []structs.ServiceID, proxyMode structs.ProxyMode) (uint64, *configentry.ResolvedServiceConfigSet, error)
|
||||||
ServiceDiscoveryChain(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, req discoverychain.CompileRequest) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error)
|
ServiceDiscoveryChain(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, req discoverychain.CompileRequest) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error)
|
||||||
|
ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error)
|
||||||
PeeringTrustBundleRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.PeeringTrustBundle, error)
|
PeeringTrustBundleRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.PeeringTrustBundle, error)
|
||||||
PeeringTrustBundleList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error)
|
PeeringTrustBundleList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error)
|
||||||
TrustBundleListByService(ws memdb.WatchSet, service, dc string, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error)
|
TrustBundleListByService(ws memdb.WatchSet, service, dc string, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error)
|
||||||
|
@ -34,24 +51,18 @@ type Store interface {
|
||||||
|
|
||||||
// CacheCARoots satisfies the proxycfg.CARoots interface by sourcing data from
|
// CacheCARoots satisfies the proxycfg.CARoots interface by sourcing data from
|
||||||
// the agent cache.
|
// the agent cache.
|
||||||
|
//
|
||||||
|
// Note: there isn't a server-local equivalent of this data source because
|
||||||
|
// "agentless" proxies obtain certificates via SDS served by consul-dataplane.
|
||||||
func CacheCARoots(c *cache.Cache) proxycfg.CARoots {
|
func CacheCARoots(c *cache.Cache) proxycfg.CARoots {
|
||||||
return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.ConnectCARootName}
|
return &cacheProxyDataSource[*structs.DCSpecificRequest]{c, cachetype.ConnectCARootName}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CacheConfigEntry satisfies the proxycfg.ConfigEntry interface by sourcing
|
|
||||||
// data from the agent cache.
|
|
||||||
func CacheConfigEntry(c *cache.Cache) proxycfg.ConfigEntry {
|
|
||||||
return &cacheProxyDataSource[*structs.ConfigEntryQuery]{c, cachetype.ConfigEntryName}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CacheConfigEntryList satisfies the proxycfg.ConfigEntryList interface by
|
|
||||||
// sourcing data from the agent cache.
|
|
||||||
func CacheConfigEntryList(c *cache.Cache) proxycfg.ConfigEntryList {
|
|
||||||
return &cacheProxyDataSource[*structs.ConfigEntryQuery]{c, cachetype.ConfigEntryListName}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CacheDatacenters satisfies the proxycfg.Datacenters interface by sourcing
|
// CacheDatacenters satisfies the proxycfg.Datacenters interface by sourcing
|
||||||
// data from the agent cache.
|
// data from the agent cache.
|
||||||
|
//
|
||||||
|
// Note: there isn't a server-local equivalent of this data source because it
|
||||||
|
// relies on polling (so a more efficient method isn't available).
|
||||||
func CacheDatacenters(c *cache.Cache) proxycfg.Datacenters {
|
func CacheDatacenters(c *cache.Cache) proxycfg.Datacenters {
|
||||||
return &cacheProxyDataSource[*structs.DatacentersRequest]{c, cachetype.CatalogDatacentersName}
|
return &cacheProxyDataSource[*structs.DatacentersRequest]{c, cachetype.CatalogDatacentersName}
|
||||||
}
|
}
|
||||||
|
@ -64,46 +75,31 @@ func CacheServiceGateways(c *cache.Cache) proxycfg.GatewayServices {
|
||||||
|
|
||||||
// CacheHTTPChecks satisifies the proxycfg.HTTPChecks interface by sourcing
|
// CacheHTTPChecks satisifies the proxycfg.HTTPChecks interface by sourcing
|
||||||
// data from the agent cache.
|
// data from the agent cache.
|
||||||
|
//
|
||||||
|
// Note: there isn't a server-local equivalent of this data source because only
|
||||||
|
// services registered to the local agent can be health checked by it.
|
||||||
func CacheHTTPChecks(c *cache.Cache) proxycfg.HTTPChecks {
|
func CacheHTTPChecks(c *cache.Cache) proxycfg.HTTPChecks {
|
||||||
return &cacheProxyDataSource[*cachetype.ServiceHTTPChecksRequest]{c, cachetype.ServiceHTTPChecksName}
|
return &cacheProxyDataSource[*cachetype.ServiceHTTPChecksRequest]{c, cachetype.ServiceHTTPChecksName}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CacheIntentionUpstreams satisfies the proxycfg.IntentionUpstreams interface
|
|
||||||
// by sourcing data from the agent cache.
|
|
||||||
func CacheIntentionUpstreams(c *cache.Cache) proxycfg.IntentionUpstreams {
|
|
||||||
return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.IntentionUpstreamsName}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CacheIntentionUpstreamsDestination satisfies the proxycfg.IntentionUpstreamsDestination interface
|
|
||||||
// by sourcing data from the agent cache.
|
|
||||||
func CacheIntentionUpstreamsDestination(c *cache.Cache) proxycfg.IntentionUpstreams {
|
|
||||||
return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.IntentionUpstreamsDestinationName}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CacheInternalServiceDump satisfies the proxycfg.InternalServiceDump
|
|
||||||
// interface by sourcing data from the agent cache.
|
|
||||||
func CacheInternalServiceDump(c *cache.Cache) proxycfg.InternalServiceDump {
|
|
||||||
return &cacheProxyDataSource[*structs.ServiceDumpRequest]{c, cachetype.InternalServiceDumpName}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CacheLeafCertificate satisifies the proxycfg.LeafCertificate interface by
|
// CacheLeafCertificate satisifies the proxycfg.LeafCertificate interface by
|
||||||
// sourcing data from the agent cache.
|
// sourcing data from the agent cache.
|
||||||
|
//
|
||||||
|
// Note: there isn't a server-local equivalent of this data source because
|
||||||
|
// "agentless" proxies obtain certificates via SDS served by consul-dataplane.
|
||||||
func CacheLeafCertificate(c *cache.Cache) proxycfg.LeafCertificate {
|
func CacheLeafCertificate(c *cache.Cache) proxycfg.LeafCertificate {
|
||||||
return &cacheProxyDataSource[*cachetype.ConnectCALeafRequest]{c, cachetype.ConnectCALeafName}
|
return &cacheProxyDataSource[*cachetype.ConnectCALeafRequest]{c, cachetype.ConnectCALeafName}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CachePrepraredQuery satisfies the proxycfg.PreparedQuery interface by
|
// CachePrepraredQuery satisfies the proxycfg.PreparedQuery interface by
|
||||||
// sourcing data from the agent cache.
|
// sourcing data from the agent cache.
|
||||||
|
//
|
||||||
|
// Note: there isn't a server-local equivalent of this data source because it
|
||||||
|
// relies on polling (so a more efficient method isn't available).
|
||||||
func CachePrepraredQuery(c *cache.Cache) proxycfg.PreparedQuery {
|
func CachePrepraredQuery(c *cache.Cache) proxycfg.PreparedQuery {
|
||||||
return &cacheProxyDataSource[*structs.PreparedQueryExecuteRequest]{c, cachetype.PreparedQueryName}
|
return &cacheProxyDataSource[*structs.PreparedQueryExecuteRequest]{c, cachetype.PreparedQueryName}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CacheResolvedServiceConfig satisfies the proxycfg.ResolvedServiceConfig
|
|
||||||
// interface by sourcing data from the agent cache.
|
|
||||||
func CacheResolvedServiceConfig(c *cache.Cache) proxycfg.ResolvedServiceConfig {
|
|
||||||
return &cacheProxyDataSource[*structs.ServiceConfigRequest]{c, cachetype.ResolvedServiceConfigName}
|
|
||||||
}
|
|
||||||
|
|
||||||
// cacheProxyDataSource implements a generic wrapper around the agent cache to
|
// cacheProxyDataSource implements a generic wrapper around the agent cache to
|
||||||
// provide data to the proxycfg.Manager.
|
// provide data to the proxycfg.Manager.
|
||||||
type cacheProxyDataSource[ReqType cache.Request] struct {
|
type cacheProxyDataSource[ReqType cache.Request] struct {
|
||||||
|
@ -131,6 +127,15 @@ func dispatchCacheUpdate(ch chan<- proxycfg.UpdateEvent) cache.Callback {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) {
|
||||||
|
return func(ctx context.Context, correlationID string, result ResultType, err error) {
|
||||||
|
select {
|
||||||
|
case ch <- newUpdateEvent(correlationID, result, err):
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func newUpdateEvent(correlationID string, result any, err error) proxycfg.UpdateEvent {
|
func newUpdateEvent(correlationID string, result any, err error) proxycfg.UpdateEvent {
|
||||||
// This roughly matches the logic in agent/submatview.LocalMaterializer.isTerminalError.
|
// This roughly matches the logic in agent/submatview.LocalMaterializer.isTerminalError.
|
||||||
if acl.IsErrNotFound(err) {
|
if acl.IsErrNotFound(err) {
|
||||||
|
|
|
@ -5,20 +5,45 @@ import (
|
||||||
|
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
"github.com/hashicorp/consul/agent/consul/watch"
|
"github.com/hashicorp/consul/agent/consul/watch"
|
||||||
"github.com/hashicorp/consul/agent/proxycfg"
|
"github.com/hashicorp/consul/agent/proxycfg"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// CacheIntentionUpstreams satisfies the proxycfg.IntentionUpstreams interface
|
||||||
|
// by sourcing upstreams for the given service, inferred from intentions, from
|
||||||
|
// the agent cache.
|
||||||
|
func CacheIntentionUpstreams(c *cache.Cache) proxycfg.IntentionUpstreams {
|
||||||
|
return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.IntentionUpstreamsName}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheIntentionUpstreamsDestination satisfies the proxycfg.IntentionUpstreams
|
||||||
|
// interface by sourcing upstreams for the given destination, inferred from
|
||||||
|
// intentions, from the agent cache.
|
||||||
|
func CacheIntentionUpstreamsDestination(c *cache.Cache) proxycfg.IntentionUpstreams {
|
||||||
|
return &cacheProxyDataSource[*structs.ServiceSpecificRequest]{c, cachetype.IntentionUpstreamsDestinationName}
|
||||||
|
}
|
||||||
|
|
||||||
// ServerIntentionUpstreams satisfies the proxycfg.IntentionUpstreams interface
|
// ServerIntentionUpstreams satisfies the proxycfg.IntentionUpstreams interface
|
||||||
// by sourcing data from a blocking query against the server's state store.
|
// by sourcing upstreams for the given service, inferred from intentions, from
|
||||||
|
// the server's state store.
|
||||||
func ServerIntentionUpstreams(deps ServerDataSourceDeps) proxycfg.IntentionUpstreams {
|
func ServerIntentionUpstreams(deps ServerDataSourceDeps) proxycfg.IntentionUpstreams {
|
||||||
return serverIntentionUpstreams{deps}
|
return serverIntentionUpstreams{deps, structs.IntentionTargetService}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerIntentionUpstreamsDestination satisfies the proxycfg.IntentionUpstreams
|
||||||
|
// interface by sourcing upstreams for the given destination, inferred from
|
||||||
|
// intentions, from the server's state store.
|
||||||
|
func ServerIntentionUpstreamsDestination(deps ServerDataSourceDeps) proxycfg.IntentionUpstreams {
|
||||||
|
return serverIntentionUpstreams{deps, structs.IntentionTargetDestination}
|
||||||
}
|
}
|
||||||
|
|
||||||
type serverIntentionUpstreams struct {
|
type serverIntentionUpstreams struct {
|
||||||
deps ServerDataSourceDeps
|
deps ServerDataSourceDeps
|
||||||
|
target structs.IntentionTargetType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
|
func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
|
||||||
|
@ -32,7 +57,7 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi
|
||||||
}
|
}
|
||||||
defaultDecision := authz.IntentionDefaultAllow(nil)
|
defaultDecision := authz.IntentionDefaultAllow(nil)
|
||||||
|
|
||||||
index, services, err := store.IntentionTopology(ws, target, false, defaultDecision, structs.IntentionTargetService)
|
index, services, err := store.IntentionTopology(ws, target, false, defaultDecision, s.target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, nil, err
|
return 0, nil, err
|
||||||
}
|
}
|
||||||
|
@ -51,12 +76,3 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi
|
||||||
dispatchBlockingQueryUpdate[*structs.IndexedServiceList](ch),
|
dispatchBlockingQueryUpdate[*structs.IndexedServiceList](ch),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) {
|
|
||||||
return func(ctx context.Context, correlationID string, result ResultType, err error) {
|
|
||||||
select {
|
|
||||||
case ch <- newUpdateEvent(correlationID, result, err):
|
|
||||||
case <-ctx.Done():
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,99 @@
|
||||||
|
package proxycfgglue
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-bexpr"
|
||||||
|
"github.com/hashicorp/go-memdb"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/watch"
|
||||||
|
"github.com/hashicorp/consul/agent/proxycfg"
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CacheInternalServiceDump satisfies the proxycfg.InternalServiceDump
|
||||||
|
// interface by sourcing data from the agent cache.
|
||||||
|
func CacheInternalServiceDump(c *cache.Cache) proxycfg.InternalServiceDump {
|
||||||
|
return &cacheInternalServiceDump{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cacheInternalServiceDump wraps the underlying cache-type to return a simpler
|
||||||
|
// subset of the response (as this is all we use in proxycfg).
|
||||||
|
type cacheInternalServiceDump struct {
|
||||||
|
c *cache.Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cacheInternalServiceDump) Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
|
||||||
|
dispatch := dispatchCacheUpdate(ch)
|
||||||
|
|
||||||
|
return c.c.NotifyCallback(ctx, cachetype.InternalServiceDumpName, req, correlationID,
|
||||||
|
func(ctx context.Context, event cache.UpdateEvent) {
|
||||||
|
if r, _ := event.Result.(*structs.IndexedNodesWithGateways); r != nil {
|
||||||
|
event.Result = &structs.IndexedCheckServiceNodes{
|
||||||
|
Nodes: r.Nodes,
|
||||||
|
QueryMeta: r.QueryMeta,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dispatch(ctx, event)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInternalServiceDump satisfies the proxycfg.InternalServiceDump
|
||||||
|
// interface by sourcing data from a blocking query against the server's
|
||||||
|
// state store.
|
||||||
|
func ServerInternalServiceDump(deps ServerDataSourceDeps, remoteSource proxycfg.InternalServiceDump) proxycfg.InternalServiceDump {
|
||||||
|
return &serverInternalServiceDump{deps, remoteSource}
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverInternalServiceDump struct {
|
||||||
|
deps ServerDataSourceDeps
|
||||||
|
remoteSource proxycfg.InternalServiceDump
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverInternalServiceDump) Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
|
||||||
|
if req.Datacenter != s.deps.Datacenter {
|
||||||
|
return s.remoteSource.Notify(ctx, req, correlationID, ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
filter, err := bexpr.CreateFilter(req.Filter, nil, structs.CheckServiceNodes{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is just the small subset of the Internal.ServiceDump RPC handler used
|
||||||
|
// by proxycfg.
|
||||||
|
return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore,
|
||||||
|
func(ws memdb.WatchSet, store Store) (uint64, *structs.IndexedCheckServiceNodes, error) {
|
||||||
|
authz, err := s.deps.ACLResolver.ResolveTokenAndDefaultMeta(req.Token, &req.EnterpriseMeta, nil)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
idx, nodes, err := store.ServiceDump(ws, req.ServiceKind, req.UseServiceKind, &req.EnterpriseMeta, structs.DefaultPeerKeyword)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
raw, err := filter.Execute(nodes)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, fmt.Errorf("could not filter local service dump: %w", err)
|
||||||
|
}
|
||||||
|
nodes = raw.(structs.CheckServiceNodes)
|
||||||
|
|
||||||
|
aclfilter.New(authz, s.deps.Logger).Filter(&nodes)
|
||||||
|
|
||||||
|
return idx, &structs.IndexedCheckServiceNodes{
|
||||||
|
Nodes: nodes,
|
||||||
|
QueryMeta: structs.QueryMeta{
|
||||||
|
Index: idx,
|
||||||
|
Backend: structs.QueryBackendBlocking,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
dispatchBlockingQueryUpdate[*structs.IndexedCheckServiceNodes](ch),
|
||||||
|
)
|
||||||
|
}
|
|
@ -0,0 +1,139 @@
|
||||||
|
package proxycfgglue
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
|
"github.com/hashicorp/consul/agent/proxycfg"
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestServerInternalServiceDump(t *testing.T) {
|
||||||
|
t.Run("remote queries are delegated to the remote source", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
var (
|
||||||
|
req = &structs.ServiceDumpRequest{Datacenter: "dc2"}
|
||||||
|
correlationID = "correlation-id"
|
||||||
|
ch = make(chan<- proxycfg.UpdateEvent)
|
||||||
|
result = errors.New("KABOOM")
|
||||||
|
)
|
||||||
|
|
||||||
|
remoteSource := newMockInternalServiceDump(t)
|
||||||
|
remoteSource.On("Notify", ctx, req, correlationID, ch).Return(result)
|
||||||
|
|
||||||
|
dataSource := ServerInternalServiceDump(ServerDataSourceDeps{Datacenter: "dc1"}, remoteSource)
|
||||||
|
err := dataSource.Notify(ctx, req, correlationID, ch)
|
||||||
|
require.Equal(t, result, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("local queries are served from the state store", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
nextIndex := indexGenerator()
|
||||||
|
|
||||||
|
store := state.NewStateStore(nil)
|
||||||
|
|
||||||
|
services := []*structs.NodeService{
|
||||||
|
{
|
||||||
|
Service: "mgw",
|
||||||
|
Kind: structs.ServiceKindMeshGateway,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Service: "web",
|
||||||
|
Kind: structs.ServiceKindTypical,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Service: "db",
|
||||||
|
Kind: structs.ServiceKindTypical,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for idx, service := range services {
|
||||||
|
require.NoError(t, store.EnsureRegistration(nextIndex(), &structs.RegisterRequest{
|
||||||
|
Node: fmt.Sprintf("node-%d", idx),
|
||||||
|
Service: service,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
authz := newStaticResolver(
|
||||||
|
policyAuthorizer(t, `
|
||||||
|
service "mgw" { policy = "read" }
|
||||||
|
service "web" { policy = "read" }
|
||||||
|
service "db" { policy = "read" }
|
||||||
|
node_prefix "node-" { policy = "read" }
|
||||||
|
`),
|
||||||
|
)
|
||||||
|
|
||||||
|
dataSource := ServerInternalServiceDump(ServerDataSourceDeps{
|
||||||
|
GetStore: func() Store { return store },
|
||||||
|
ACLResolver: authz,
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
t.Run("filter by kind", func(t *testing.T) {
|
||||||
|
eventCh := make(chan proxycfg.UpdateEvent)
|
||||||
|
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{
|
||||||
|
ServiceKind: structs.ServiceKindMeshGateway,
|
||||||
|
UseServiceKind: true,
|
||||||
|
}, "", eventCh))
|
||||||
|
|
||||||
|
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
|
||||||
|
require.Len(t, result.Nodes, 1)
|
||||||
|
require.Equal(t, "mgw", result.Nodes[0].Service.Service)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("bexpr filtering", func(t *testing.T) {
|
||||||
|
eventCh := make(chan proxycfg.UpdateEvent)
|
||||||
|
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{
|
||||||
|
QueryOptions: structs.QueryOptions{Filter: `Service.Service == "web"`},
|
||||||
|
}, "", eventCh))
|
||||||
|
|
||||||
|
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
|
||||||
|
require.Len(t, result.Nodes, 1)
|
||||||
|
require.Equal(t, "web", result.Nodes[0].Service.Service)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("all services", func(t *testing.T) {
|
||||||
|
eventCh := make(chan proxycfg.UpdateEvent)
|
||||||
|
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{}, "", eventCh))
|
||||||
|
|
||||||
|
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
|
||||||
|
require.Len(t, result.Nodes, 3)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("access denied", func(t *testing.T) {
|
||||||
|
authz.SwapAuthorizer(acl.DenyAll())
|
||||||
|
|
||||||
|
eventCh := make(chan proxycfg.UpdateEvent)
|
||||||
|
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{}, "", eventCh))
|
||||||
|
|
||||||
|
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
|
||||||
|
require.Empty(t, result.Nodes)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockInternalServiceDump(t *testing.T) *mockInternalServiceDump {
|
||||||
|
mock := &mockInternalServiceDump{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockInternalServiceDump struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockInternalServiceDump) Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
|
||||||
|
return m.Called(ctx, req, correlationID, ch).Error(0)
|
||||||
|
}
|
|
@ -0,0 +1,70 @@
|
||||||
|
package proxycfgglue
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-memdb"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
|
"github.com/hashicorp/consul/agent/configentry"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/watch"
|
||||||
|
"github.com/hashicorp/consul/agent/proxycfg"
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CacheResolvedServiceConfig satisfies the proxycfg.ResolvedServiceConfig
|
||||||
|
// interface by sourcing data from the agent cache.
|
||||||
|
func CacheResolvedServiceConfig(c *cache.Cache) proxycfg.ResolvedServiceConfig {
|
||||||
|
return &cacheProxyDataSource[*structs.ServiceConfigRequest]{c, cachetype.ResolvedServiceConfigName}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerResolvedServiceConfig satisfies the proxycfg.ResolvedServiceConfig
|
||||||
|
// interface by sourcing data from a blocking query against the server's state
|
||||||
|
// store.
|
||||||
|
func ServerResolvedServiceConfig(deps ServerDataSourceDeps, remoteSource proxycfg.ResolvedServiceConfig) proxycfg.ResolvedServiceConfig {
|
||||||
|
return &serverResolvedServiceConfig{deps, remoteSource}
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverResolvedServiceConfig struct {
|
||||||
|
deps ServerDataSourceDeps
|
||||||
|
remoteSource proxycfg.ResolvedServiceConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverResolvedServiceConfig) Notify(ctx context.Context, req *structs.ServiceConfigRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
|
||||||
|
if req.Datacenter != s.deps.Datacenter {
|
||||||
|
return s.remoteSource.Notify(ctx, req, correlationID, ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(req.Upstreams) != 0 {
|
||||||
|
return errors.New("ServerResolvedServiceConfig does not support the legacy Upstreams parameter")
|
||||||
|
}
|
||||||
|
|
||||||
|
return watch.ServerLocalNotify(ctx, correlationID, s.deps.GetStore,
|
||||||
|
func(ws memdb.WatchSet, store Store) (uint64, *structs.ServiceConfigResponse, error) {
|
||||||
|
authz, err := s.deps.ACLResolver.ResolveTokenAndDefaultMeta(req.Token, &req.EnterpriseMeta, nil)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(req.Name, nil); err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
idx, entries, err := store.ReadResolvedServiceConfigEntries(ws, req.Name, &req.EnterpriseMeta, req.UpstreamIDs, req.Mode)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reply, err := configentry.ComputeResolvedServiceConfig(req, req.UpstreamIDs, false, entries, s.deps.Logger)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
reply.Index = idx
|
||||||
|
|
||||||
|
return idx, reply, nil
|
||||||
|
},
|
||||||
|
dispatchBlockingQueryUpdate[*structs.ServiceConfigResponse](ch),
|
||||||
|
)
|
||||||
|
}
|
|
@ -0,0 +1,116 @@
|
||||||
|
package proxycfgglue
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
|
"github.com/hashicorp/consul/agent/proxycfg"
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestServerResolvedServiceConfig(t *testing.T) {
|
||||||
|
t.Run("remote queries are delegated to the remote source", func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
ctx = context.Background()
|
||||||
|
req = &structs.ServiceConfigRequest{Datacenter: "dc2"}
|
||||||
|
correlationID = "correlation-id"
|
||||||
|
ch = make(chan<- proxycfg.UpdateEvent)
|
||||||
|
result = errors.New("KABOOM")
|
||||||
|
)
|
||||||
|
|
||||||
|
remoteSource := newMockResolvedServiceConfig(t)
|
||||||
|
remoteSource.On("Notify", ctx, req, correlationID, ch).Return(result)
|
||||||
|
|
||||||
|
dataSource := ServerResolvedServiceConfig(ServerDataSourceDeps{Datacenter: "dc1"}, remoteSource)
|
||||||
|
err := dataSource.Notify(ctx, req, correlationID, ch)
|
||||||
|
require.Equal(t, result, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("local queries are served from the state store", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
const (
|
||||||
|
serviceName = "web"
|
||||||
|
datacenter = "dc1"
|
||||||
|
)
|
||||||
|
|
||||||
|
store := state.NewStateStore(nil)
|
||||||
|
nextIndex := indexGenerator()
|
||||||
|
|
||||||
|
require.NoError(t, store.EnsureConfigEntry(nextIndex(), &structs.ServiceConfigEntry{
|
||||||
|
Name: serviceName,
|
||||||
|
Protocol: "http",
|
||||||
|
}))
|
||||||
|
|
||||||
|
authz := newStaticResolver(
|
||||||
|
policyAuthorizer(t, fmt.Sprintf(`service "%s" { policy = "read" }`, serviceName)),
|
||||||
|
)
|
||||||
|
|
||||||
|
dataSource := ServerResolvedServiceConfig(ServerDataSourceDeps{
|
||||||
|
Datacenter: datacenter,
|
||||||
|
ACLResolver: authz,
|
||||||
|
GetStore: func() Store { return store },
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
eventCh := make(chan proxycfg.UpdateEvent)
|
||||||
|
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceConfigRequest{Datacenter: datacenter, Name: serviceName}, "", eventCh))
|
||||||
|
|
||||||
|
testutil.RunStep(t, "initial state", func(t *testing.T) {
|
||||||
|
result := getEventResult[*structs.ServiceConfigResponse](t, eventCh)
|
||||||
|
require.Equal(t, map[string]any{"protocol": "http"}, result.ProxyConfig)
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "write proxy defaults", func(t *testing.T) {
|
||||||
|
require.NoError(t, store.EnsureConfigEntry(nextIndex(), &structs.ProxyConfigEntry{
|
||||||
|
Name: structs.ProxyConfigGlobal,
|
||||||
|
Mode: structs.ProxyModeDirect,
|
||||||
|
}))
|
||||||
|
result := getEventResult[*structs.ServiceConfigResponse](t, eventCh)
|
||||||
|
require.Equal(t, structs.ProxyModeDirect, result.Mode)
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "delete service config", func(t *testing.T) {
|
||||||
|
require.NoError(t, store.DeleteConfigEntry(nextIndex(), structs.ServiceDefaults, serviceName, nil))
|
||||||
|
|
||||||
|
result := getEventResult[*structs.ServiceConfigResponse](t, eventCh)
|
||||||
|
require.Empty(t, result.ProxyConfig)
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "revoke access", func(t *testing.T) {
|
||||||
|
authz.SwapAuthorizer(acl.DenyAll())
|
||||||
|
|
||||||
|
require.NoError(t, store.EnsureConfigEntry(nextIndex(), &structs.ServiceConfigEntry{
|
||||||
|
Name: serviceName,
|
||||||
|
Protocol: "http",
|
||||||
|
}))
|
||||||
|
|
||||||
|
expectNoEvent(t, eventCh)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockResolvedServiceConfig(t *testing.T) *mockResolvedServiceConfig {
|
||||||
|
mock := &mockResolvedServiceConfig{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockResolvedServiceConfig struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockResolvedServiceConfig) Notify(ctx context.Context, req *structs.ServiceConfigRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error {
|
||||||
|
return m.Called(ctx, req, correlationID, ch).Error(0)
|
||||||
|
}
|
|
@ -280,16 +280,6 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
|
||||||
}
|
}
|
||||||
snap.Roots = roots
|
snap.Roots = roots
|
||||||
|
|
||||||
case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
|
|
||||||
resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
||||||
}
|
|
||||||
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
|
|
||||||
if resp.Bundle != nil {
|
|
||||||
snap.ConnectProxy.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
|
|
||||||
}
|
|
||||||
|
|
||||||
case u.CorrelationID == peeringTrustBundlesWatchID:
|
case u.CorrelationID == peeringTrustBundlesWatchID:
|
||||||
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
|
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -369,6 +359,17 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
|
||||||
// Clean up data
|
// Clean up data
|
||||||
//
|
//
|
||||||
|
|
||||||
|
peeredChainTargets := make(map[UpstreamID]struct{})
|
||||||
|
for _, discoChain := range snap.ConnectProxy.DiscoveryChain {
|
||||||
|
for _, target := range discoChain.Targets {
|
||||||
|
if target.Peer == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
uid := NewUpstreamIDFromTargetID(target.ID)
|
||||||
|
peeredChainTargets[uid] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
validPeerNames := make(map[string]struct{})
|
validPeerNames := make(map[string]struct{})
|
||||||
|
|
||||||
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
|
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
|
||||||
|
@ -383,6 +384,11 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
|
||||||
validPeerNames[uid.Peer] = struct{}{}
|
validPeerNames[uid.Peer] = struct{}{}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// Peered upstream came from a discovery chain target
|
||||||
|
if _, ok := peeredChainTargets[uid]; ok {
|
||||||
|
validPeerNames[uid.Peer] = struct{}{}
|
||||||
|
return true
|
||||||
|
}
|
||||||
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid)
|
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid)
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
@ -463,8 +469,14 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, ok := seenUpstreams[uid]; !ok {
|
if _, ok := seenUpstreams[uid]; !ok {
|
||||||
for _, cancelFn := range targets {
|
for targetID, cancelFn := range targets {
|
||||||
cancelFn()
|
cancelFn()
|
||||||
|
|
||||||
|
targetUID := NewUpstreamIDFromTargetID(targetID)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
||||||
|
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
delete(snap.ConnectProxy.WatchedUpstreams, uid)
|
delete(snap.ConnectProxy.WatchedUpstreams, uid)
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,10 +89,10 @@ type DataSources struct {
|
||||||
|
|
||||||
// IntentionUpstreamsDestination provides intention-inferred upstream updates on a
|
// IntentionUpstreamsDestination provides intention-inferred upstream updates on a
|
||||||
// notification channel.
|
// notification channel.
|
||||||
IntentionUpstreamsDestination IntentionUpstreamsDestination
|
IntentionUpstreamsDestination IntentionUpstreams
|
||||||
|
|
||||||
// InternalServiceDump provides updates about a (gateway) service on a
|
// InternalServiceDump provides updates about services of a given kind (e.g.
|
||||||
// notification channel.
|
// mesh gateways) on a notification channel.
|
||||||
InternalServiceDump InternalServiceDump
|
InternalServiceDump InternalServiceDump
|
||||||
|
|
||||||
// LeafCertificate provides updates about the service's leaf certificate on a
|
// LeafCertificate provides updates about the service's leaf certificate on a
|
||||||
|
@ -197,14 +197,8 @@ type IntentionUpstreams interface {
|
||||||
Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- UpdateEvent) error
|
Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- UpdateEvent) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// IntentionUpstreamsDestination is the interface used to consume updates about upstreams destination
|
// InternalServiceDump is the interface used to consume updates about services
|
||||||
// inferred from service intentions.
|
// of a given kind (e.g. mesh gateways).
|
||||||
type IntentionUpstreamsDestination interface {
|
|
||||||
Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- UpdateEvent) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalServiceDump is the interface used to consume updates about a (gateway)
|
|
||||||
// service via the internal ServiceDump RPC.
|
|
||||||
type InternalServiceDump interface {
|
type InternalServiceDump interface {
|
||||||
Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- UpdateEvent) error
|
Notify(ctx context.Context, req *structs.ServiceDumpRequest, correlationID string, ch chan<- UpdateEvent) error
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
|
"github.com/hashicorp/consul/agent/proxycfg/internal/watch"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
)
|
)
|
||||||
|
|
||||||
type handlerIngressGateway struct {
|
type handlerIngressGateway struct {
|
||||||
|
@ -66,6 +68,9 @@ func (s *handlerIngressGateway) initialize(ctx context.Context) (ConfigSnapshot,
|
||||||
snap.IngressGateway.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc)
|
snap.IngressGateway.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc)
|
||||||
snap.IngressGateway.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
|
snap.IngressGateway.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
|
||||||
snap.IngressGateway.Listeners = make(map[IngressListenerKey]structs.IngressListener)
|
snap.IngressGateway.Listeners = make(map[IngressListenerKey]structs.IngressListener)
|
||||||
|
snap.IngressGateway.UpstreamPeerTrustBundles = watch.NewMap[string, *pbpeering.PeeringTrustBundle]()
|
||||||
|
snap.IngressGateway.PeerUpstreamEndpoints = watch.NewMap[UpstreamID, structs.CheckServiceNodes]()
|
||||||
|
snap.IngressGateway.PeerUpstreamEndpointsUseHostnames = make(map[UpstreamID]struct{})
|
||||||
return snap, nil
|
return snap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,6 +157,12 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent,
|
||||||
delete(snap.IngressGateway.WatchedUpstreams[uid], targetID)
|
delete(snap.IngressGateway.WatchedUpstreams[uid], targetID)
|
||||||
delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID)
|
delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID)
|
||||||
cancelUpstreamFn()
|
cancelUpstreamFn()
|
||||||
|
|
||||||
|
targetUID := NewUpstreamIDFromTargetID(targetID)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
snap.IngressGateway.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
||||||
|
snap.IngressGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cancelFn()
|
cancelFn()
|
||||||
|
|
|
@ -491,7 +491,7 @@ func (s *handlerMeshGateway) handleUpdate(ctx context.Context, u UpdateEvent, sn
|
||||||
}
|
}
|
||||||
|
|
||||||
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
|
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
|
||||||
resp, ok := u.Result.(*structs.IndexedNodesWithGateways)
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("invalid type for response: %T", u.Result)
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
||||||
}
|
}
|
||||||
|
|
|
@ -814,6 +814,18 @@ func (s *ConfigSnapshot) MeshConfigTLSOutgoing() *structs.MeshDirectionalTLSConf
|
||||||
return mesh.TLS.Outgoing
|
return mesh.TLS.Outgoing
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *ConfigSnapshot) ToConfigSnapshotUpstreams() (*ConfigSnapshotUpstreams, error) {
|
||||||
|
switch s.Kind {
|
||||||
|
case structs.ServiceKindConnectProxy:
|
||||||
|
return &s.ConnectProxy.ConfigSnapshotUpstreams, nil
|
||||||
|
case structs.ServiceKindIngressGateway:
|
||||||
|
return &s.IngressGateway.ConfigSnapshotUpstreams, nil
|
||||||
|
default:
|
||||||
|
// This is a coherence check and should never fail
|
||||||
|
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", s.Kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (u *ConfigSnapshotUpstreams) UpstreamPeerMeta(uid UpstreamID) structs.PeeringServiceMeta {
|
func (u *ConfigSnapshotUpstreams) UpstreamPeerMeta(uid UpstreamID) structs.PeeringServiceMeta {
|
||||||
nodes, _ := u.PeerUpstreamEndpoints.Get(uid)
|
nodes, _ := u.PeerUpstreamEndpoints.Get(uid)
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
|
|
|
@ -493,6 +493,11 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
Mode: structs.MeshGatewayModeNone,
|
Mode: structs.MeshGatewayModeNone,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
structs.Upstream{
|
||||||
|
DestinationType: structs.UpstreamDestTypeService,
|
||||||
|
DestinationName: "api-failover-to-peer",
|
||||||
|
LocalBindPort: 10007,
|
||||||
|
},
|
||||||
structs.Upstream{
|
structs.Upstream{
|
||||||
DestinationType: structs.UpstreamDestTypeService,
|
DestinationType: structs.UpstreamDestTypeService,
|
||||||
DestinationName: "api-dc2",
|
DestinationName: "api-dc2",
|
||||||
|
@ -552,6 +557,16 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
Mode: structs.MeshGatewayModeNone,
|
Mode: structs.MeshGatewayModeNone,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
|
fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
|
||||||
|
Name: "api-failover-to-peer",
|
||||||
|
EvaluateInDatacenter: "dc1",
|
||||||
|
EvaluateInNamespace: "default",
|
||||||
|
EvaluateInPartition: "default",
|
||||||
|
Datacenter: "dc1",
|
||||||
|
OverrideMeshGateway: structs.MeshGatewayConfig{
|
||||||
|
Mode: meshGatewayProxyConfigValue,
|
||||||
|
},
|
||||||
|
}),
|
||||||
fmt.Sprintf("discovery-chain:%s-dc2", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
|
fmt.Sprintf("discovery-chain:%s-dc2", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
|
||||||
Name: "api-dc2",
|
Name: "api-dc2",
|
||||||
EvaluateInDatacenter: "dc1",
|
EvaluateInDatacenter: "dc1",
|
||||||
|
@ -639,6 +654,26 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
},
|
},
|
||||||
Err: nil,
|
Err: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
CorrelationID: fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()),
|
||||||
|
Result: &structs.DiscoveryChainResponse{
|
||||||
|
Chain: discoverychain.TestCompileConfigEntries(t, "api-failover-to-peer", "default", "default", "dc1", "trustdomain.consul",
|
||||||
|
func(req *discoverychain.CompileRequest) {
|
||||||
|
req.OverrideMeshGateway.Mode = meshGatewayProxyConfigValue
|
||||||
|
}, &structs.ServiceResolverConfigEntry{
|
||||||
|
Kind: structs.ServiceResolver,
|
||||||
|
Name: "api-failover-to-peer",
|
||||||
|
Failover: map[string]structs.ServiceResolverFailover{
|
||||||
|
"*": {
|
||||||
|
Targets: []structs.ServiceResolverFailoverTarget{
|
||||||
|
{Peer: "cluster-01"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
Err: nil,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||||
require.True(t, snap.Valid())
|
require.True(t, snap.Valid())
|
||||||
|
@ -646,15 +681,18 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
require.Equal(t, indexedRoots, snap.Roots)
|
require.Equal(t, indexedRoots, snap.Roots)
|
||||||
|
|
||||||
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
|
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
|
||||||
require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain)
|
require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams)
|
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
|
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways)
|
require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
|
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
|
||||||
|
|
||||||
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
|
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
|
||||||
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
|
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
|
||||||
|
|
||||||
|
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len())
|
||||||
|
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len())
|
||||||
|
|
||||||
require.True(t, snap.ConnectProxy.IntentionsSet)
|
require.True(t, snap.ConnectProxy.IntentionsSet)
|
||||||
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
|
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
|
||||||
require.True(t, snap.ConnectProxy.MeshConfigSet)
|
require.True(t, snap.ConnectProxy.MeshConfigSet)
|
||||||
|
@ -667,6 +705,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
fmt.Sprintf("upstream-target:api-failover-remote.default.default.dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-remote", "", "dc2", true),
|
fmt.Sprintf("upstream-target:api-failover-remote.default.default.dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-remote", "", "dc2", true),
|
||||||
fmt.Sprintf("upstream-target:api-failover-local.default.default.dc2:%s-failover-local?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-local", "", "dc2", true),
|
fmt.Sprintf("upstream-target:api-failover-local.default.default.dc2:%s-failover-local?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-local", "", "dc2", true),
|
||||||
fmt.Sprintf("upstream-target:api-failover-direct.default.default.dc2:%s-failover-direct?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-direct", "", "dc2", true),
|
fmt.Sprintf("upstream-target:api-failover-direct.default.default.dc2:%s-failover-direct?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-direct", "", "dc2", true),
|
||||||
|
upstreamPeerWatchIDPrefix + fmt.Sprintf("%s-failover-to-peer?peer=cluster-01", apiUID.String()): genVerifyServiceSpecificPeeredRequest("api-failover-to-peer", "", "", "cluster-01", true),
|
||||||
fmt.Sprintf("mesh-gateway:dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc2"),
|
fmt.Sprintf("mesh-gateway:dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc2"),
|
||||||
fmt.Sprintf("mesh-gateway:dc1:%s-failover-local?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc1"),
|
fmt.Sprintf("mesh-gateway:dc1:%s-failover-local?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc1"),
|
||||||
},
|
},
|
||||||
|
@ -676,15 +715,18 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
require.Equal(t, indexedRoots, snap.Roots)
|
require.Equal(t, indexedRoots, snap.Roots)
|
||||||
|
|
||||||
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
|
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
|
||||||
require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain)
|
require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams)
|
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
|
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways)
|
require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
|
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
|
||||||
|
|
||||||
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
|
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
|
||||||
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
|
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
|
||||||
|
|
||||||
|
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len())
|
||||||
|
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len())
|
||||||
|
|
||||||
require.True(t, snap.ConnectProxy.IntentionsSet)
|
require.True(t, snap.ConnectProxy.IntentionsSet)
|
||||||
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
|
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
|
||||||
},
|
},
|
||||||
|
@ -885,7 +927,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
events: []UpdateEvent{
|
events: []UpdateEvent{
|
||||||
{
|
{
|
||||||
CorrelationID: "mesh-gateway:dc4",
|
CorrelationID: "mesh-gateway:dc4",
|
||||||
Result: &structs.IndexedNodesWithGateways{
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
Nodes: TestGatewayNodesDC4Hostname(t),
|
Nodes: TestGatewayNodesDC4Hostname(t),
|
||||||
},
|
},
|
||||||
Err: nil,
|
Err: nil,
|
||||||
|
|
|
@ -280,6 +280,31 @@ func TestUpstreamNodesDC2(t testing.T) structs.CheckServiceNodes {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUpstreamNodesPeerCluster01(t testing.T) structs.CheckServiceNodes {
|
||||||
|
peer := "cluster-01"
|
||||||
|
service := structs.TestNodeServiceWithNameInPeer(t, "web", peer)
|
||||||
|
return structs.CheckServiceNodes{
|
||||||
|
structs.CheckServiceNode{
|
||||||
|
Node: &structs.Node{
|
||||||
|
ID: "test1",
|
||||||
|
Node: "test1",
|
||||||
|
Address: "10.40.1.1",
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
Service: service,
|
||||||
|
},
|
||||||
|
structs.CheckServiceNode{
|
||||||
|
Node: &structs.Node{
|
||||||
|
ID: "test2",
|
||||||
|
Node: "test2",
|
||||||
|
Address: "10.40.1.2",
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
Service: service,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServiceNodes {
|
func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServiceNodes {
|
||||||
return structs.CheckServiceNodes{
|
return structs.CheckServiceNodes{
|
||||||
structs.CheckServiceNode{
|
structs.CheckServiceNode{
|
||||||
|
@ -949,7 +974,7 @@ func NewTestDataSources() *TestDataSources {
|
||||||
Intentions: NewTestDataSource[*structs.ServiceSpecificRequest, structs.Intentions](),
|
Intentions: NewTestDataSource[*structs.ServiceSpecificRequest, structs.Intentions](),
|
||||||
IntentionUpstreams: NewTestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList](),
|
IntentionUpstreams: NewTestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList](),
|
||||||
IntentionUpstreamsDestination: NewTestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList](),
|
IntentionUpstreamsDestination: NewTestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList](),
|
||||||
InternalServiceDump: NewTestDataSource[*structs.ServiceDumpRequest, *structs.IndexedNodesWithGateways](),
|
InternalServiceDump: NewTestDataSource[*structs.ServiceDumpRequest, *structs.IndexedCheckServiceNodes](),
|
||||||
LeafCertificate: NewTestDataSource[*cachetype.ConnectCALeafRequest, *structs.IssuedCert](),
|
LeafCertificate: NewTestDataSource[*cachetype.ConnectCALeafRequest, *structs.IssuedCert](),
|
||||||
PreparedQuery: NewTestDataSource[*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExecuteResponse](),
|
PreparedQuery: NewTestDataSource[*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExecuteResponse](),
|
||||||
ResolvedServiceConfig: NewTestDataSource[*structs.ServiceConfigRequest, *structs.ServiceConfigResponse](),
|
ResolvedServiceConfig: NewTestDataSource[*structs.ServiceConfigRequest, *structs.ServiceConfigResponse](),
|
||||||
|
@ -975,7 +1000,7 @@ type TestDataSources struct {
|
||||||
Intentions *TestDataSource[*structs.ServiceSpecificRequest, structs.Intentions]
|
Intentions *TestDataSource[*structs.ServiceSpecificRequest, structs.Intentions]
|
||||||
IntentionUpstreams *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList]
|
IntentionUpstreams *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList]
|
||||||
IntentionUpstreamsDestination *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList]
|
IntentionUpstreamsDestination *TestDataSource[*structs.ServiceSpecificRequest, *structs.IndexedServiceList]
|
||||||
InternalServiceDump *TestDataSource[*structs.ServiceDumpRequest, *structs.IndexedNodesWithGateways]
|
InternalServiceDump *TestDataSource[*structs.ServiceDumpRequest, *structs.IndexedCheckServiceNodes]
|
||||||
LeafCertificate *TestDataSource[*cachetype.ConnectCALeafRequest, *structs.IssuedCert]
|
LeafCertificate *TestDataSource[*cachetype.ConnectCALeafRequest, *structs.IssuedCert]
|
||||||
PeeredUpstreams *TestDataSource[*structs.PartitionSpecificRequest, *structs.IndexedPeeredServiceList]
|
PeeredUpstreams *TestDataSource[*structs.PartitionSpecificRequest, *structs.IndexedPeeredServiceList]
|
||||||
PreparedQuery *TestDataSource[*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExecuteResponse]
|
PreparedQuery *TestDataSource[*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExecuteResponse]
|
||||||
|
|
|
@ -316,19 +316,19 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st
|
||||||
baseEvents = testSpliceEvents(baseEvents, []UpdateEvent{
|
baseEvents = testSpliceEvents(baseEvents, []UpdateEvent{
|
||||||
{
|
{
|
||||||
CorrelationID: "mesh-gateway:dc2",
|
CorrelationID: "mesh-gateway:dc2",
|
||||||
Result: &structs.IndexedNodesWithGateways{
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
Nodes: TestGatewayNodesDC2(t),
|
Nodes: TestGatewayNodesDC2(t),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CorrelationID: "mesh-gateway:dc4",
|
CorrelationID: "mesh-gateway:dc4",
|
||||||
Result: &structs.IndexedNodesWithGateways{
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
Nodes: TestGatewayNodesDC4Hostname(t),
|
Nodes: TestGatewayNodesDC4Hostname(t),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CorrelationID: "mesh-gateway:dc6",
|
CorrelationID: "mesh-gateway:dc6",
|
||||||
Result: &structs.IndexedNodesWithGateways{
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
Nodes: TestGatewayNodesDC6Hostname(t),
|
Nodes: TestGatewayNodesDC6Hostname(t),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -376,7 +376,7 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st
|
||||||
// Have the cross-dc query mechanism not work for dc2 so
|
// Have the cross-dc query mechanism not work for dc2 so
|
||||||
// fedstates will infill.
|
// fedstates will infill.
|
||||||
CorrelationID: "mesh-gateway:dc2",
|
CorrelationID: "mesh-gateway:dc2",
|
||||||
Result: &structs.IndexedNodesWithGateways{
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
Nodes: nil,
|
Nodes: nil,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
"github.com/hashicorp/consul/agent/consul/discoverychain"
|
"github.com/hashicorp/consul/agent/consul/discoverychain"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupTestVariationConfigEntriesAndSnapshot(
|
func setupTestVariationConfigEntriesAndSnapshot(
|
||||||
|
@ -68,10 +69,28 @@ func setupTestVariationConfigEntriesAndSnapshot(
|
||||||
})
|
})
|
||||||
events = append(events, UpdateEvent{
|
events = append(events, UpdateEvent{
|
||||||
CorrelationID: "mesh-gateway:dc2:" + dbUID.String(),
|
CorrelationID: "mesh-gateway:dc2:" + dbUID.String(),
|
||||||
Result: &structs.IndexedNodesWithGateways{
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
Nodes: TestGatewayNodesDC2(t),
|
Nodes: TestGatewayNodesDC2(t),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
case "failover-to-cluster-peer":
|
||||||
|
events = append(events, UpdateEvent{
|
||||||
|
CorrelationID: "peer-trust-bundle:cluster-01",
|
||||||
|
Result: &pbpeering.TrustBundleReadResponse{
|
||||||
|
Bundle: &pbpeering.PeeringTrustBundle{
|
||||||
|
PeerName: "peer1",
|
||||||
|
TrustDomain: "peer1.domain",
|
||||||
|
ExportedPartition: "peer1ap",
|
||||||
|
RootPEMs: []string{"peer1-root-1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
events = append(events, UpdateEvent{
|
||||||
|
CorrelationID: "upstream-peer:db?peer=cluster-01",
|
||||||
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
|
Nodes: TestUpstreamNodesPeerCluster01(t),
|
||||||
|
},
|
||||||
|
})
|
||||||
case "failover-through-double-remote-gateway-triggered":
|
case "failover-through-double-remote-gateway-triggered":
|
||||||
events = append(events, UpdateEvent{
|
events = append(events, UpdateEvent{
|
||||||
CorrelationID: "upstream-target:db.default.default.dc1:" + dbUID.String(),
|
CorrelationID: "upstream-target:db.default.default.dc1:" + dbUID.String(),
|
||||||
|
@ -95,13 +114,13 @@ func setupTestVariationConfigEntriesAndSnapshot(
|
||||||
})
|
})
|
||||||
events = append(events, UpdateEvent{
|
events = append(events, UpdateEvent{
|
||||||
CorrelationID: "mesh-gateway:dc2:" + dbUID.String(),
|
CorrelationID: "mesh-gateway:dc2:" + dbUID.String(),
|
||||||
Result: &structs.IndexedNodesWithGateways{
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
Nodes: TestGatewayNodesDC2(t),
|
Nodes: TestGatewayNodesDC2(t),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
events = append(events, UpdateEvent{
|
events = append(events, UpdateEvent{
|
||||||
CorrelationID: "mesh-gateway:dc3:" + dbUID.String(),
|
CorrelationID: "mesh-gateway:dc3:" + dbUID.String(),
|
||||||
Result: &structs.IndexedNodesWithGateways{
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
Nodes: TestGatewayNodesDC3(t),
|
Nodes: TestGatewayNodesDC3(t),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -122,7 +141,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
|
||||||
})
|
})
|
||||||
events = append(events, UpdateEvent{
|
events = append(events, UpdateEvent{
|
||||||
CorrelationID: "mesh-gateway:dc1:" + dbUID.String(),
|
CorrelationID: "mesh-gateway:dc1:" + dbUID.String(),
|
||||||
Result: &structs.IndexedNodesWithGateways{
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
Nodes: TestGatewayNodesDC1(t),
|
Nodes: TestGatewayNodesDC1(t),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -149,7 +168,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
|
||||||
})
|
})
|
||||||
events = append(events, UpdateEvent{
|
events = append(events, UpdateEvent{
|
||||||
CorrelationID: "mesh-gateway:dc1:" + dbUID.String(),
|
CorrelationID: "mesh-gateway:dc1:" + dbUID.String(),
|
||||||
Result: &structs.IndexedNodesWithGateways{
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
Nodes: TestGatewayNodesDC1(t),
|
Nodes: TestGatewayNodesDC1(t),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -255,6 +274,21 @@ func setupTestVariationDiscoveryChain(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
case "failover-to-cluster-peer":
|
||||||
|
entries = append(entries,
|
||||||
|
&structs.ServiceResolverConfigEntry{
|
||||||
|
Kind: structs.ServiceResolver,
|
||||||
|
Name: "db",
|
||||||
|
ConnectTimeout: 33 * time.Second,
|
||||||
|
Failover: map[string]structs.ServiceResolverFailover{
|
||||||
|
"*": {
|
||||||
|
Targets: []structs.ServiceResolverFailoverTarget{
|
||||||
|
{Peer: "cluster-01"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
case "failover-through-double-remote-gateway-triggered":
|
case "failover-through-double-remote-gateway-triggered":
|
||||||
fallthrough
|
fallthrough
|
||||||
case "failover-through-double-remote-gateway":
|
case "failover-through-double-remote-gateway":
|
||||||
|
|
|
@ -9,7 +9,9 @@ import (
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
)
|
)
|
||||||
|
|
||||||
type handlerUpstreams struct {
|
type handlerUpstreams struct {
|
||||||
|
@ -21,9 +23,10 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
|
||||||
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
||||||
}
|
}
|
||||||
|
|
||||||
upstreamsSnapshot := &snap.ConnectProxy.ConfigSnapshotUpstreams
|
upstreamsSnapshot, err := snap.ToConfigSnapshotUpstreams()
|
||||||
if snap.Kind == structs.ServiceKindIngressGateway {
|
|
||||||
upstreamsSnapshot = &snap.IngressGateway.ConfigSnapshotUpstreams
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
@ -98,19 +101,16 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
|
||||||
|
|
||||||
uid := UpstreamIDFromString(uidString)
|
uid := UpstreamIDFromString(uidString)
|
||||||
|
|
||||||
filteredNodes := hostnameEndpoints(
|
s.setPeerEndpoints(upstreamsSnapshot, uid, resp.Nodes)
|
||||||
s.logger,
|
|
||||||
GatewayKey{ /*empty so it never matches*/ },
|
case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
|
||||||
resp.Nodes,
|
resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
|
||||||
)
|
if !ok {
|
||||||
if len(filteredNodes) > 0 {
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
||||||
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set {
|
|
||||||
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, resp.Nodes); set {
|
|
||||||
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
|
|
||||||
}
|
}
|
||||||
|
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
|
||||||
|
if resp.Bundle != nil {
|
||||||
|
upstreamsSnapshot.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
|
||||||
}
|
}
|
||||||
|
|
||||||
case strings.HasPrefix(u.CorrelationID, "upstream-target:"):
|
case strings.HasPrefix(u.CorrelationID, "upstream-target:"):
|
||||||
|
@ -186,7 +186,7 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
|
||||||
}
|
}
|
||||||
|
|
||||||
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
|
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
|
||||||
resp, ok := u.Result.(*structs.IndexedNodesWithGateways)
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("invalid type for response: %T", u.Result)
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
||||||
}
|
}
|
||||||
|
@ -216,6 +216,23 @@ func removeColonPrefix(s string) (string, string, bool) {
|
||||||
return s[0:idx], s[idx+1:], true
|
return s[0:idx], s[idx+1:], true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *handlerUpstreams) setPeerEndpoints(upstreamsSnapshot *ConfigSnapshotUpstreams, uid UpstreamID, nodes structs.CheckServiceNodes) {
|
||||||
|
filteredNodes := hostnameEndpoints(
|
||||||
|
s.logger,
|
||||||
|
GatewayKey{ /*empty so it never matches*/ },
|
||||||
|
nodes,
|
||||||
|
)
|
||||||
|
if len(filteredNodes) > 0 {
|
||||||
|
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set {
|
||||||
|
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, nodes); set {
|
||||||
|
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *handlerUpstreams) resetWatchesFromChain(
|
func (s *handlerUpstreams) resetWatchesFromChain(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
uid UpstreamID,
|
uid UpstreamID,
|
||||||
|
@ -255,6 +272,12 @@ func (s *handlerUpstreams) resetWatchesFromChain(
|
||||||
delete(snap.WatchedUpstreams[uid], targetID)
|
delete(snap.WatchedUpstreams[uid], targetID)
|
||||||
delete(snap.WatchedUpstreamEndpoints[uid], targetID)
|
delete(snap.WatchedUpstreamEndpoints[uid], targetID)
|
||||||
cancelFn()
|
cancelFn()
|
||||||
|
|
||||||
|
targetUID := NewUpstreamIDFromTargetID(targetID)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
snap.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
||||||
|
snap.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -274,6 +297,7 @@ func (s *handlerUpstreams) resetWatchesFromChain(
|
||||||
service: target.Service,
|
service: target.Service,
|
||||||
filter: target.Subset.Filter,
|
filter: target.Subset.Filter,
|
||||||
datacenter: target.Datacenter,
|
datacenter: target.Datacenter,
|
||||||
|
peer: target.Peer,
|
||||||
entMeta: target.GetEnterpriseMetadata(),
|
entMeta: target.GetEnterpriseMetadata(),
|
||||||
}
|
}
|
||||||
err := s.watchUpstreamTarget(ctx, snap, opts)
|
err := s.watchUpstreamTarget(ctx, snap, opts)
|
||||||
|
@ -384,6 +408,7 @@ type targetWatchOpts struct {
|
||||||
service string
|
service string
|
||||||
filter string
|
filter string
|
||||||
datacenter string
|
datacenter string
|
||||||
|
peer string
|
||||||
entMeta *acl.EnterpriseMeta
|
entMeta *acl.EnterpriseMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -397,11 +422,17 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
|
||||||
var finalMeta acl.EnterpriseMeta
|
var finalMeta acl.EnterpriseMeta
|
||||||
finalMeta.Merge(opts.entMeta)
|
finalMeta.Merge(opts.entMeta)
|
||||||
|
|
||||||
correlationID := "upstream-target:" + opts.chainID + ":" + opts.upstreamID.String()
|
uid := opts.upstreamID
|
||||||
|
correlationID := "upstream-target:" + opts.chainID + ":" + uid.String()
|
||||||
|
|
||||||
|
if opts.peer != "" {
|
||||||
|
uid = NewUpstreamIDFromTargetID(opts.chainID)
|
||||||
|
correlationID = upstreamPeerWatchIDPrefix + uid.String()
|
||||||
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{
|
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{
|
||||||
PeerName: opts.upstreamID.Peer,
|
PeerName: opts.peer,
|
||||||
Datacenter: opts.datacenter,
|
Datacenter: opts.datacenter,
|
||||||
QueryOptions: structs.QueryOptions{
|
QueryOptions: structs.QueryOptions{
|
||||||
Token: s.token,
|
Token: s.token,
|
||||||
|
@ -422,6 +453,31 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
|
||||||
}
|
}
|
||||||
snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel
|
snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel
|
||||||
|
|
||||||
|
if uid.Peer == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok := snap.PeerUpstreamEndpoints.IsWatched(uid); !ok {
|
||||||
|
snap.PeerUpstreamEndpoints.InitWatch(uid, cancel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether a watch for this peer exists to avoid duplicates.
|
||||||
|
if ok := snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok {
|
||||||
|
peerCtx, cancel := context.WithCancel(ctx)
|
||||||
|
if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{
|
||||||
|
Request: &pbpeering.TrustBundleReadRequest{
|
||||||
|
Name: uid.Peer,
|
||||||
|
Partition: uid.PartitionOrDefault(),
|
||||||
|
},
|
||||||
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||||
|
}, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil {
|
||||||
|
cancel()
|
||||||
|
return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -726,11 +726,12 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !existing.IsActive() {
|
if existing == nil || existing.State == pbpeering.PeeringState_DELETING {
|
||||||
// Return early when the Peering doesn't exist or is already marked for deletion.
|
// Return early when the Peering doesn't exist or is already marked for deletion.
|
||||||
// We don't return nil because the pb will fail to marshal.
|
// We don't return nil because the pb will fail to marshal.
|
||||||
return &pbpeering.PeeringDeleteResponse{}, nil
|
return &pbpeering.PeeringDeleteResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// We are using a write request due to needing to perform a deferred deletion.
|
// We are using a write request due to needing to perform a deferred deletion.
|
||||||
// The peering gets marked for deletion by setting the DeletedAt field,
|
// The peering gets marked for deletion by setting the DeletedAt field,
|
||||||
// and a leader routine will handle deleting the peering.
|
// and a leader routine will handle deleting the peering.
|
||||||
|
|
|
@ -621,13 +621,20 @@ func TestPeeringService_Read_ACLEnforcement(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeeringService_Delete(t *testing.T) {
|
func TestPeeringService_Delete(t *testing.T) {
|
||||||
|
tt := map[string]pbpeering.PeeringState{
|
||||||
|
"active peering": pbpeering.PeeringState_ACTIVE,
|
||||||
|
"terminated peering": pbpeering.PeeringState_TERMINATED,
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, overrideState := range tt {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
// TODO(peering): see note on newTestServer, refactor to not use this
|
// TODO(peering): see note on newTestServer, refactor to not use this
|
||||||
s := newTestServer(t, nil)
|
s := newTestServer(t, nil)
|
||||||
|
|
||||||
|
// A pointer is kept for the following peering so that we can modify the object without another PeeringWrite.
|
||||||
p := &pbpeering.Peering{
|
p := &pbpeering.Peering{
|
||||||
ID: testUUID(t),
|
ID: testUUID(t),
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
State: pbpeering.PeeringState_ESTABLISHING,
|
|
||||||
PeerCAPems: nil,
|
PeerCAPems: nil,
|
||||||
PeerServerName: "test",
|
PeerServerName: "test",
|
||||||
PeerServerAddresses: []string{"addr1"},
|
PeerServerAddresses: []string{"addr1"},
|
||||||
|
@ -637,6 +644,9 @@ func TestPeeringService_Delete(t *testing.T) {
|
||||||
require.Nil(t, p.DeletedAt)
|
require.Nil(t, p.DeletedAt)
|
||||||
require.True(t, p.IsActive())
|
require.True(t, p.IsActive())
|
||||||
|
|
||||||
|
// Overwrite the peering state to simulate deleting from a non-initial state.
|
||||||
|
p.State = overrideState
|
||||||
|
|
||||||
client := pbpeering.NewPeeringServiceClient(s.ClientConn(t))
|
client := pbpeering.NewPeeringServiceClient(s.ClientConn(t))
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
@ -653,6 +663,8 @@ func TestPeeringService_Delete(t *testing.T) {
|
||||||
// routine will clean it up.
|
// routine will clean it up.
|
||||||
require.Nil(r, resp)
|
require.Nil(r, resp)
|
||||||
})
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeeringService_Delete_ACLEnforcement(t *testing.T) {
|
func TestPeeringService_Delete_ACLEnforcement(t *testing.T) {
|
||||||
|
|
|
@ -2,6 +2,7 @@ package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/ipaddr"
|
"github.com/hashicorp/consul/ipaddr"
|
||||||
|
@ -13,6 +14,10 @@ func sidecarServiceID(serviceID string) string {
|
||||||
return serviceID + "-sidecar-proxy"
|
return serviceID + "-sidecar-proxy"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func serviceIDFromSidecarID(sidecarServiceID string) string {
|
||||||
|
return strings.Split(sidecarServiceID, "-")[0]
|
||||||
|
}
|
||||||
|
|
||||||
// sidecarServiceFromNodeService returns a *structs.NodeService representing a
|
// sidecarServiceFromNodeService returns a *structs.NodeService representing a
|
||||||
// sidecar service with all defaults populated based on the current agent
|
// sidecar service with all defaults populated based on the current agent
|
||||||
// config.
|
// config.
|
||||||
|
@ -30,7 +35,7 @@ func sidecarServiceID(serviceID string) string {
|
||||||
// registration. This will be the same as the token parameter passed unless the
|
// registration. This will be the same as the token parameter passed unless the
|
||||||
// SidecarService definition contains a distinct one.
|
// SidecarService definition contains a distinct one.
|
||||||
// TODO: return AddServiceRequest
|
// TODO: return AddServiceRequest
|
||||||
func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token string) (*structs.NodeService, []*structs.CheckType, string, error) {
|
func sidecarServiceFromNodeService(ns *structs.NodeService, token string) (*structs.NodeService, []*structs.CheckType, string, error) {
|
||||||
if ns.Connect.SidecarService == nil {
|
if ns.Connect.SidecarService == nil {
|
||||||
return nil, nil, "", nil
|
return nil, nil, "", nil
|
||||||
}
|
}
|
||||||
|
@ -114,30 +119,18 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if sidecar.Port < 1 {
|
|
||||||
port, err := a.sidecarPortFromServiceID(sidecar.CompoundServiceID())
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, "", err
|
|
||||||
}
|
|
||||||
sidecar.Port = port
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup checks
|
// Setup checks
|
||||||
checks, err := ns.Connect.SidecarService.CheckTypes()
|
checks, err := ns.Connect.SidecarService.CheckTypes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, "", err
|
return nil, nil, "", err
|
||||||
}
|
}
|
||||||
// Setup default check if none given
|
|
||||||
if len(checks) < 1 {
|
|
||||||
checks = sidecarDefaultChecks(ns.ID, sidecar.Proxy.LocalServiceAddress, sidecar.Port)
|
|
||||||
}
|
|
||||||
|
|
||||||
return sidecar, checks, token, nil
|
return sidecar, checks, token, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// sidecarPortFromServiceID is used to allocate a unique port for a sidecar proxy.
|
// sidecarPortFromServiceIDLocked is used to allocate a unique port for a sidecar proxy.
|
||||||
// This is called immediately before registration to avoid value collisions. This function assumes the state lock is already held.
|
// This is called immediately before registration to avoid value collisions. This function assumes the state lock is already held.
|
||||||
func (a *Agent) sidecarPortFromServiceID(sidecarCompoundServiceID structs.ServiceID) (int, error) {
|
func (a *Agent) sidecarPortFromServiceIDLocked(sidecarCompoundServiceID structs.ServiceID) (int, error) {
|
||||||
sidecarPort := 0
|
sidecarPort := 0
|
||||||
|
|
||||||
// Allocate port if needed (min and max inclusive).
|
// Allocate port if needed (min and max inclusive).
|
||||||
|
@ -202,14 +195,23 @@ func (a *Agent) sidecarPortFromServiceID(sidecarCompoundServiceID structs.Servic
|
||||||
return sidecarPort, nil
|
return sidecarPort, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func sidecarDefaultChecks(serviceID string, localServiceAddress string, port int) []*structs.CheckType {
|
func sidecarDefaultChecks(sidecarID string, sidecarAddress string, proxyServiceAddress string, port int) []*structs.CheckType {
|
||||||
// Setup default check if none given
|
// The check should use the sidecar's address because it makes a request to the sidecar.
|
||||||
|
// If the sidecar's address is empty, we fall back to the address of the local service, as set in
|
||||||
|
// sidecar.Proxy.LocalServiceAddress, in the hope that the proxy is also accessible on that address
|
||||||
|
// (which in most cases it is because it's running as a sidecar in the same network).
|
||||||
|
// We could instead fall back to the address of the service as set by (ns.Address), but I've kept it using
|
||||||
|
// sidecar.Proxy.LocalServiceAddress so as to not change things too much in the
|
||||||
|
// process of fixing #14433.
|
||||||
|
checkAddress := sidecarAddress
|
||||||
|
if checkAddress == "" {
|
||||||
|
checkAddress = proxyServiceAddress
|
||||||
|
}
|
||||||
|
serviceID := serviceIDFromSidecarID(sidecarID)
|
||||||
return []*structs.CheckType{
|
return []*structs.CheckType{
|
||||||
{
|
{
|
||||||
Name: "Connect Sidecar Listening",
|
Name: "Connect Sidecar Listening",
|
||||||
// Default to localhost rather than agent/service public IP. The checks
|
TCP: ipaddr.FormatAddressPort(checkAddress, port),
|
||||||
// can always be overridden if a non-loopback IP is needed.
|
|
||||||
TCP: ipaddr.FormatAddressPort(localServiceAddress, port),
|
|
||||||
Interval: 10 * time.Second,
|
Interval: 10 * time.Second,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -54,7 +54,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
|
||||||
Kind: structs.ServiceKindConnectProxy,
|
Kind: structs.ServiceKindConnectProxy,
|
||||||
ID: "web1-sidecar-proxy",
|
ID: "web1-sidecar-proxy",
|
||||||
Service: "web-sidecar-proxy",
|
Service: "web-sidecar-proxy",
|
||||||
Port: 2222,
|
Port: 0,
|
||||||
LocallyRegisteredAsSidecar: true,
|
LocallyRegisteredAsSidecar: true,
|
||||||
Proxy: structs.ConnectProxyConfig{
|
Proxy: structs.ConnectProxyConfig{
|
||||||
DestinationServiceName: "web",
|
DestinationServiceName: "web",
|
||||||
|
@ -63,17 +63,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
|
||||||
LocalServicePort: 1111,
|
LocalServicePort: 1111,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantChecks: []*structs.CheckType{
|
wantChecks: nil,
|
||||||
{
|
|
||||||
Name: "Connect Sidecar Listening",
|
|
||||||
TCP: "127.0.0.1:2222",
|
|
||||||
Interval: 10 * time.Second,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "Connect Sidecar Aliasing web1",
|
|
||||||
AliasService: "web1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantToken: "foo",
|
wantToken: "foo",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -157,7 +147,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
|
||||||
Kind: structs.ServiceKindConnectProxy,
|
Kind: structs.ServiceKindConnectProxy,
|
||||||
ID: "web1-sidecar-proxy",
|
ID: "web1-sidecar-proxy",
|
||||||
Service: "web-sidecar-proxy",
|
Service: "web-sidecar-proxy",
|
||||||
Port: 2222,
|
Port: 0,
|
||||||
Tags: []string{"foo"},
|
Tags: []string{"foo"},
|
||||||
Meta: map[string]string{"foo": "bar"},
|
Meta: map[string]string{"foo": "bar"},
|
||||||
LocallyRegisteredAsSidecar: true,
|
LocallyRegisteredAsSidecar: true,
|
||||||
|
@ -168,17 +158,7 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
|
||||||
LocalServicePort: 1111,
|
LocalServicePort: 1111,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantChecks: []*structs.CheckType{
|
wantChecks: nil,
|
||||||
{
|
|
||||||
Name: "Connect Sidecar Listening",
|
|
||||||
TCP: "127.0.0.1:2222",
|
|
||||||
Interval: 10 * time.Second,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "Connect Sidecar Aliasing web1",
|
|
||||||
AliasService: "web1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid check type",
|
name: "invalid check type",
|
||||||
|
@ -218,20 +198,11 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
hcl := `
|
|
||||||
ports {
|
|
||||||
sidecar_min_port = 2222
|
|
||||||
sidecar_max_port = 2222
|
|
||||||
}
|
|
||||||
`
|
|
||||||
a := StartTestAgent(t, TestAgent{Name: "jones", HCL: hcl})
|
|
||||||
defer a.Shutdown()
|
|
||||||
|
|
||||||
ns := tt.sd.NodeService()
|
ns := tt.sd.NodeService()
|
||||||
err := ns.Validate()
|
err := ns.Validate()
|
||||||
require.NoError(t, err, "Invalid test case - NodeService must validate")
|
require.NoError(t, err, "Invalid test case - NodeService must validate")
|
||||||
|
|
||||||
gotNS, gotChecks, gotToken, err := a.sidecarServiceFromNodeService(ns, tt.token)
|
gotNS, gotChecks, gotToken, err := sidecarServiceFromNodeService(ns, tt.token)
|
||||||
if tt.wantErr != "" {
|
if tt.wantErr != "" {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), tt.wantErr)
|
require.Contains(t, err.Error(), tt.wantErr)
|
||||||
|
@ -329,7 +300,7 @@ func TestAgent_SidecarPortFromServiceID(t *testing.T) {
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
}
|
}
|
||||||
a := StartTestAgent(t, TestAgent{Name: "jones", HCL: hcl})
|
a := NewTestAgent(t, hcl)
|
||||||
defer a.Shutdown()
|
defer a.Shutdown()
|
||||||
|
|
||||||
if tt.preRegister != nil {
|
if tt.preRegister != nil {
|
||||||
|
@ -337,7 +308,7 @@ func TestAgent_SidecarPortFromServiceID(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
gotPort, err := a.sidecarPortFromServiceID(structs.ServiceID{ID: tt.serviceID, EnterpriseMeta: tt.enterpriseMeta})
|
gotPort, err := a.sidecarPortFromServiceIDLocked(structs.ServiceID{ID: tt.serviceID, EnterpriseMeta: tt.enterpriseMeta})
|
||||||
|
|
||||||
if tt.wantErr != "" {
|
if tt.wantErr != "" {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
@ -350,3 +321,52 @@ func TestAgent_SidecarPortFromServiceID(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAgent_SidecarDefaultChecks(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
svcAddress string
|
||||||
|
proxyLocalSvcAddress string
|
||||||
|
port int
|
||||||
|
wantChecks []*structs.CheckType
|
||||||
|
}{{
|
||||||
|
name: "uses proxy address for check",
|
||||||
|
svcAddress: "123.123.123.123",
|
||||||
|
proxyLocalSvcAddress: "255.255.255.255",
|
||||||
|
port: 2222,
|
||||||
|
wantChecks: []*structs.CheckType{
|
||||||
|
{
|
||||||
|
Name: "Connect Sidecar Listening",
|
||||||
|
TCP: "123.123.123.123:2222",
|
||||||
|
Interval: 10 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Connect Sidecar Aliasing web1",
|
||||||
|
AliasService: "web1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "uses proxy.local_service_address for check if proxy address is empty",
|
||||||
|
proxyLocalSvcAddress: "1.2.3.4",
|
||||||
|
port: 2222,
|
||||||
|
wantChecks: []*structs.CheckType{
|
||||||
|
{
|
||||||
|
Name: "Connect Sidecar Listening",
|
||||||
|
TCP: "1.2.3.4:2222",
|
||||||
|
Interval: 10 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Connect Sidecar Aliasing web1",
|
||||||
|
AliasService: "web1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
gotChecks := sidecarDefaultChecks("web1", tt.svcAddress, tt.proxyLocalSvcAddress, tt.port)
|
||||||
|
require.Equal(t, tt.wantChecks, gotChecks)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -3,12 +3,13 @@ package structs
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/miekg/dns"
|
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
"github.com/mitchellh/hashstructure"
|
"github.com/mitchellh/hashstructure"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
|
@ -362,6 +363,13 @@ func (e *ProxyConfigEntry) Normalize() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
e.Kind = ProxyDefaults
|
e.Kind = ProxyDefaults
|
||||||
|
|
||||||
|
// proxy default config only accepts global configs
|
||||||
|
// this check is replicated in normalize() and validate(),
|
||||||
|
// since validate is not called by all the endpoints (e.g., delete)
|
||||||
|
if e.Name != "" && e.Name != ProxyConfigGlobal {
|
||||||
|
return fmt.Errorf("invalid name (%q), only %q is supported", e.Name, ProxyConfigGlobal)
|
||||||
|
}
|
||||||
e.Name = ProxyConfigGlobal
|
e.Name = ProxyConfigGlobal
|
||||||
|
|
||||||
e.EnterpriseMeta.Normalize()
|
e.EnterpriseMeta.Normalize()
|
||||||
|
@ -961,6 +969,11 @@ type PassiveHealthCheck struct {
|
||||||
// MaxFailures is the count of consecutive failures that results in a host
|
// MaxFailures is the count of consecutive failures that results in a host
|
||||||
// being removed from the pool.
|
// being removed from the pool.
|
||||||
MaxFailures uint32 `json:",omitempty" alias:"max_failures"`
|
MaxFailures uint32 `json:",omitempty" alias:"max_failures"`
|
||||||
|
|
||||||
|
// EnforcingConsecutive5xx is the % chance that a host will be actually ejected
|
||||||
|
// when an outlier status is detected through consecutive 5xx.
|
||||||
|
// This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.
|
||||||
|
EnforcingConsecutive5xx *uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (chk *PassiveHealthCheck) Clone() *PassiveHealthCheck {
|
func (chk *PassiveHealthCheck) Clone() *PassiveHealthCheck {
|
||||||
|
|
|
@ -964,11 +964,18 @@ func (e *ServiceResolverConfigEntry) Validate() error {
|
||||||
|
|
||||||
// TODO(rb): prevent subsets and default subsets from being defined?
|
// TODO(rb): prevent subsets and default subsets from being defined?
|
||||||
|
|
||||||
if r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" {
|
if r.isEmpty() {
|
||||||
return fmt.Errorf("Redirect is empty")
|
return fmt.Errorf("Redirect is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Service == "" {
|
switch {
|
||||||
|
case r.Peer != "" && r.ServiceSubset != "":
|
||||||
|
return fmt.Errorf("Redirect.Peer cannot be set with Redirect.ServiceSubset")
|
||||||
|
case r.Peer != "" && r.Partition != "":
|
||||||
|
return fmt.Errorf("Redirect.Partition cannot be set with Redirect.Peer")
|
||||||
|
case r.Peer != "" && r.Datacenter != "":
|
||||||
|
return fmt.Errorf("Redirect.Peer cannot be set with Redirect.Datacenter")
|
||||||
|
case r.Service == "":
|
||||||
if r.ServiceSubset != "" {
|
if r.ServiceSubset != "" {
|
||||||
return fmt.Errorf("Redirect.ServiceSubset defined without Redirect.Service")
|
return fmt.Errorf("Redirect.ServiceSubset defined without Redirect.Service")
|
||||||
}
|
}
|
||||||
|
@ -978,9 +985,12 @@ func (e *ServiceResolverConfigEntry) Validate() error {
|
||||||
if r.Partition != "" {
|
if r.Partition != "" {
|
||||||
return fmt.Errorf("Redirect.Partition defined without Redirect.Service")
|
return fmt.Errorf("Redirect.Partition defined without Redirect.Service")
|
||||||
}
|
}
|
||||||
} else if r.Service == e.Name {
|
if r.Peer != "" {
|
||||||
if r.ServiceSubset != "" && !isSubset(r.ServiceSubset) {
|
return fmt.Errorf("Redirect.Peer defined without Redirect.Service")
|
||||||
return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, r.Service)
|
}
|
||||||
|
case r.ServiceSubset != "" && (r.Service == "" || r.Service == e.Name):
|
||||||
|
if !isSubset(r.ServiceSubset) {
|
||||||
|
return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, e.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1231,6 +1241,10 @@ type ServiceResolverRedirect struct {
|
||||||
// Datacenter is the datacenter to resolve the service from instead of the
|
// Datacenter is the datacenter to resolve the service from instead of the
|
||||||
// current one (optional).
|
// current one (optional).
|
||||||
Datacenter string `json:",omitempty"`
|
Datacenter string `json:",omitempty"`
|
||||||
|
|
||||||
|
// Peer is the name of the cluster peer to resolve the service from instead
|
||||||
|
// of the current one (optional).
|
||||||
|
Peer string `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
||||||
|
@ -1240,9 +1254,14 @@ func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
||||||
Namespace: r.Namespace,
|
Namespace: r.Namespace,
|
||||||
Partition: r.Partition,
|
Partition: r.Partition,
|
||||||
Datacenter: r.Datacenter,
|
Datacenter: r.Datacenter,
|
||||||
|
Peer: r.Peer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *ServiceResolverRedirect) isEmpty() bool {
|
||||||
|
return r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" && r.Peer == ""
|
||||||
|
}
|
||||||
|
|
||||||
// There are some restrictions on what is allowed in here:
|
// There are some restrictions on what is allowed in here:
|
||||||
//
|
//
|
||||||
// - Service, ServiceSubset, Namespace, Datacenters, and Targets cannot all be
|
// - Service, ServiceSubset, Namespace, Datacenters, and Targets cannot all be
|
||||||
|
|
|
@ -72,6 +72,28 @@ func TestServiceResolverConfigEntry_OSS(t *testing.T) {
|
||||||
},
|
},
|
||||||
validateErr: `Bad Failover["*"]: Setting Namespace requires Consul Enterprise`,
|
validateErr: `Bad Failover["*"]: Setting Namespace requires Consul Enterprise`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "setting redirect Namespace on OSS",
|
||||||
|
entry: &ServiceResolverConfigEntry{
|
||||||
|
Kind: ServiceResolver,
|
||||||
|
Name: "test",
|
||||||
|
Redirect: &ServiceResolverRedirect{
|
||||||
|
Namespace: "ns1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateErr: `Redirect: Setting Namespace requires Consul Enterprise`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "setting redirect Partition on OSS",
|
||||||
|
entry: &ServiceResolverConfigEntry{
|
||||||
|
Kind: ServiceResolver,
|
||||||
|
Name: "test",
|
||||||
|
Redirect: &ServiceResolverRedirect{
|
||||||
|
Partition: "ap1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateErr: `Redirect: Setting Partition requires Consul Enterprise`,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bulk add a bunch of similar validation cases.
|
// Bulk add a bunch of similar validation cases.
|
||||||
|
|
|
@ -655,6 +655,41 @@ func TestServiceResolverConfigEntry(t *testing.T) {
|
||||||
},
|
},
|
||||||
validateErr: `Redirect.ServiceSubset "gone" is not a valid subset of "test"`,
|
validateErr: `Redirect.ServiceSubset "gone" is not a valid subset of "test"`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "redirect with peer and subset",
|
||||||
|
entry: &ServiceResolverConfigEntry{
|
||||||
|
Kind: ServiceResolver,
|
||||||
|
Name: "test",
|
||||||
|
Redirect: &ServiceResolverRedirect{
|
||||||
|
Peer: "cluster-01",
|
||||||
|
ServiceSubset: "gone",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateErr: `Redirect.Peer cannot be set with Redirect.ServiceSubset`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "redirect with peer and datacenter",
|
||||||
|
entry: &ServiceResolverConfigEntry{
|
||||||
|
Kind: ServiceResolver,
|
||||||
|
Name: "test",
|
||||||
|
Redirect: &ServiceResolverRedirect{
|
||||||
|
Peer: "cluster-01",
|
||||||
|
Datacenter: "dc2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateErr: `Redirect.Peer cannot be set with Redirect.Datacenter`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "redirect with peer and datacenter",
|
||||||
|
entry: &ServiceResolverConfigEntry{
|
||||||
|
Kind: ServiceResolver,
|
||||||
|
Name: "test",
|
||||||
|
Redirect: &ServiceResolverRedirect{
|
||||||
|
Peer: "cluster-01",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validateErr: `Redirect.Peer defined without Redirect.Service`,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "self redirect with valid subset",
|
name: "self redirect with valid subset",
|
||||||
entry: &ServiceResolverConfigEntry{
|
entry: &ServiceResolverConfigEntry{
|
||||||
|
@ -669,6 +704,17 @@ func TestServiceResolverConfigEntry(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "redirect to peer",
|
||||||
|
entry: &ServiceResolverConfigEntry{
|
||||||
|
Kind: ServiceResolver,
|
||||||
|
Name: "test",
|
||||||
|
Redirect: &ServiceResolverRedirect{
|
||||||
|
Service: "other",
|
||||||
|
Peer: "cluster-01",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "simple wildcard failover",
|
name: "simple wildcard failover",
|
||||||
entry: &ServiceResolverConfigEntry{
|
entry: &ServiceResolverConfigEntry{
|
||||||
|
|
|
@ -2756,6 +2756,7 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
|
||||||
"passive_health_check": &PassiveHealthCheck{
|
"passive_health_check": &PassiveHealthCheck{
|
||||||
MaxFailures: 13,
|
MaxFailures: 13,
|
||||||
Interval: 14 * time.Second,
|
Interval: 14 * time.Second,
|
||||||
|
EnforcingConsecutive5xx: uintPointer(80),
|
||||||
},
|
},
|
||||||
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
|
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
|
||||||
},
|
},
|
||||||
|
@ -2772,6 +2773,7 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
|
||||||
"passive_health_check": &PassiveHealthCheck{
|
"passive_health_check": &PassiveHealthCheck{
|
||||||
MaxFailures: 13,
|
MaxFailures: 13,
|
||||||
Interval: 14 * time.Second,
|
Interval: 14 * time.Second,
|
||||||
|
EnforcingConsecutive5xx: uintPointer(80),
|
||||||
},
|
},
|
||||||
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
|
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
|
||||||
},
|
},
|
||||||
|
@ -2944,6 +2946,28 @@ func TestParseUpstreamConfig(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProxyConfigEntry(t *testing.T) {
|
||||||
|
cases := map[string]configEntryTestcase{
|
||||||
|
"proxy config name provided is not global": {
|
||||||
|
entry: &ProxyConfigEntry{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
normalizeErr: `invalid name ("foo"), only "global" is supported`,
|
||||||
|
},
|
||||||
|
"proxy config has no name": {
|
||||||
|
entry: &ProxyConfigEntry{
|
||||||
|
Name: "",
|
||||||
|
},
|
||||||
|
expected: &ProxyConfigEntry{
|
||||||
|
Name: ProxyConfigGlobal,
|
||||||
|
Kind: ProxyDefaults,
|
||||||
|
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testConfigEntryNormalizeAndValidate(t, cases)
|
||||||
|
}
|
||||||
|
|
||||||
func requireContainsLower(t *testing.T, haystack, needle string) {
|
func requireContainsLower(t *testing.T, haystack, needle string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
require.Contains(t, strings.ToLower(haystack), strings.ToLower(needle))
|
require.Contains(t, strings.ToLower(haystack), strings.ToLower(needle))
|
||||||
|
@ -3046,3 +3070,7 @@ func testConfigEntryNormalizeAndValidate(t *testing.T, cases map[string]configEn
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func uintPointer(v uint32) *uint32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
|
@ -224,6 +224,10 @@ type IssuedCert struct {
|
||||||
// AgentURI is the cert URI value.
|
// AgentURI is the cert URI value.
|
||||||
AgentURI string `json:",omitempty"`
|
AgentURI string `json:",omitempty"`
|
||||||
|
|
||||||
|
// ServerURI is the URI value of a cert issued for a server agent.
|
||||||
|
// The same URI is shared by all servers in a Consul datacenter.
|
||||||
|
ServerURI string `json:",omitempty"`
|
||||||
|
|
||||||
// Kind is the kind of service for which the cert was issued.
|
// Kind is the kind of service for which the cert was issued.
|
||||||
Kind ServiceKind `json:",omitempty"`
|
Kind ServiceKind `json:",omitempty"`
|
||||||
// KindURI is the cert URI value.
|
// KindURI is the cert URI value.
|
||||||
|
|
|
@ -1257,8 +1257,9 @@ type NodeService struct {
|
||||||
// a pointer so that we never have to nil-check this.
|
// a pointer so that we never have to nil-check this.
|
||||||
Connect ServiceConnect
|
Connect ServiceConnect
|
||||||
|
|
||||||
|
// TODO: rename to reflect that this is used to express future intent to register.
|
||||||
// LocallyRegisteredAsSidecar is private as it is only used by a local agent
|
// LocallyRegisteredAsSidecar is private as it is only used by a local agent
|
||||||
// state to track if the service was registered from a nested sidecar_service
|
// state to track if the service was or will be registered from a nested sidecar_service
|
||||||
// block. We need to track that so we can know whether we need to deregister
|
// block. We need to track that so we can know whether we need to deregister
|
||||||
// it automatically too if it's removed from the service definition or if the
|
// it automatically too if it's removed from the service definition or if the
|
||||||
// parent service is deregistered. Relying only on ID would cause us to
|
// parent service is deregistered. Relying only on ID would cause us to
|
||||||
|
|
|
@ -53,6 +53,28 @@ func TestNodeServiceWithName(t testing.T, name string) *NodeService {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const peerTrustDomain = "1c053652-8512-4373-90cf-5a7f6263a994.consul"
|
||||||
|
|
||||||
|
func TestNodeServiceWithNameInPeer(t testing.T, name string, peer string) *NodeService {
|
||||||
|
service := "payments"
|
||||||
|
return &NodeService{
|
||||||
|
Kind: ServiceKindTypical,
|
||||||
|
Service: name,
|
||||||
|
Port: 8080,
|
||||||
|
Connect: ServiceConnect{
|
||||||
|
PeerMeta: &PeeringServiceMeta{
|
||||||
|
SNI: []string{
|
||||||
|
service + ".default.default." + peer + ".external." + peerTrustDomain,
|
||||||
|
},
|
||||||
|
SpiffeID: []string{
|
||||||
|
"spiffe://" + peerTrustDomain + "/ns/default/dc/" + peer + "-dc/svc/" + service,
|
||||||
|
},
|
||||||
|
Protocol: "tcp",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TestNodeServiceProxy returns a *NodeService representing a valid
|
// TestNodeServiceProxy returns a *NodeService representing a valid
|
||||||
// Connect proxy.
|
// Connect proxy.
|
||||||
func TestNodeServiceProxy(t testing.T) *NodeService {
|
func TestNodeServiceProxy(t testing.T) *NodeService {
|
||||||
|
|
|
@ -26,7 +26,7 @@ func TestUpstreams(t testing.T) Upstreams {
|
||||||
Config: map[string]interface{}{
|
Config: map[string]interface{}{
|
||||||
// Float because this is how it is decoded by JSON decoder so this
|
// Float because this is how it is decoded by JSON decoder so this
|
||||||
// enables the value returned to be compared directly to a decoded JSON
|
// enables the value returned to be compared directly to a decoded JSON
|
||||||
// response without spurios type loss.
|
// response without spurious type loss.
|
||||||
"connect_timeout_ms": float64(1000),
|
"connect_timeout_ms": float64(1000),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -185,6 +185,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
|
||||||
Address: node.Address,
|
Address: node.Address,
|
||||||
Datacenter: node.Datacenter,
|
Datacenter: node.Datacenter,
|
||||||
TaggedAddresses: node.TaggedAddresses,
|
TaggedAddresses: node.TaggedAddresses,
|
||||||
|
PeerName: node.PeerName,
|
||||||
Meta: node.Meta,
|
Meta: node.Meta,
|
||||||
RaftIndex: structs.RaftIndex{
|
RaftIndex: structs.RaftIndex{
|
||||||
ModifyIndex: node.ModifyIndex,
|
ModifyIndex: node.ModifyIndex,
|
||||||
|
@ -207,6 +208,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
|
||||||
Service: structs.NodeService{
|
Service: structs.NodeService{
|
||||||
ID: svc.ID,
|
ID: svc.ID,
|
||||||
Service: svc.Service,
|
Service: svc.Service,
|
||||||
|
Kind: structs.ServiceKind(svc.Kind),
|
||||||
Tags: svc.Tags,
|
Tags: svc.Tags,
|
||||||
Address: svc.Address,
|
Address: svc.Address,
|
||||||
Meta: svc.Meta,
|
Meta: svc.Meta,
|
||||||
|
@ -226,6 +228,39 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if svc.Proxy != nil {
|
||||||
|
out.Service.Service.Proxy = structs.ConnectProxyConfig{}
|
||||||
|
t := &out.Service.Service.Proxy
|
||||||
|
if svc.Proxy.DestinationServiceName != "" {
|
||||||
|
t.DestinationServiceName = svc.Proxy.DestinationServiceName
|
||||||
|
}
|
||||||
|
if svc.Proxy.DestinationServiceID != "" {
|
||||||
|
t.DestinationServiceID = svc.Proxy.DestinationServiceID
|
||||||
|
}
|
||||||
|
if svc.Proxy.LocalServiceAddress != "" {
|
||||||
|
t.LocalServiceAddress = svc.Proxy.LocalServiceAddress
|
||||||
|
}
|
||||||
|
if svc.Proxy.LocalServicePort != 0 {
|
||||||
|
t.LocalServicePort = svc.Proxy.LocalServicePort
|
||||||
|
}
|
||||||
|
if svc.Proxy.LocalServiceSocketPath != "" {
|
||||||
|
t.LocalServiceSocketPath = svc.Proxy.LocalServiceSocketPath
|
||||||
|
}
|
||||||
|
if svc.Proxy.MeshGateway.Mode != "" {
|
||||||
|
t.MeshGateway.Mode = structs.MeshGatewayMode(svc.Proxy.MeshGateway.Mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if svc.Proxy.TransparentProxy != nil {
|
||||||
|
if svc.Proxy.TransparentProxy.DialedDirectly {
|
||||||
|
t.TransparentProxy.DialedDirectly = svc.Proxy.TransparentProxy.DialedDirectly
|
||||||
|
}
|
||||||
|
|
||||||
|
if svc.Proxy.TransparentProxy.OutboundListenerPort != 0 {
|
||||||
|
t.TransparentProxy.OutboundListenerPort = svc.Proxy.TransparentProxy.OutboundListenerPort
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
opsRPC = append(opsRPC, out)
|
opsRPC = append(opsRPC, out)
|
||||||
|
|
||||||
case in.Check != nil:
|
case in.Check != nil:
|
||||||
|
@ -265,6 +300,8 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
|
||||||
ServiceID: check.ServiceID,
|
ServiceID: check.ServiceID,
|
||||||
ServiceName: check.ServiceName,
|
ServiceName: check.ServiceName,
|
||||||
ServiceTags: check.ServiceTags,
|
ServiceTags: check.ServiceTags,
|
||||||
|
PeerName: check.PeerName,
|
||||||
|
ExposedPort: check.ExposedPort,
|
||||||
Definition: structs.HealthCheckDefinition{
|
Definition: structs.HealthCheckDefinition{
|
||||||
HTTP: check.Definition.HTTP,
|
HTTP: check.Definition.HTTP,
|
||||||
TLSServerName: check.Definition.TLSServerName,
|
TLSServerName: check.Definition.TLSServerName,
|
||||||
|
|
|
@ -585,6 +585,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
|
||||||
"Output": "success",
|
"Output": "success",
|
||||||
"ServiceID": "",
|
"ServiceID": "",
|
||||||
"ServiceName": "",
|
"ServiceName": "",
|
||||||
|
"ExposedPort": 5678,
|
||||||
"Definition": {
|
"Definition": {
|
||||||
"IntervalDuration": "15s",
|
"IntervalDuration": "15s",
|
||||||
"TimeoutDuration": "15s",
|
"TimeoutDuration": "15s",
|
||||||
|
@ -600,12 +601,8 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
|
||||||
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
|
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
|
||||||
resp := httptest.NewRecorder()
|
resp := httptest.NewRecorder()
|
||||||
obj, err := a.srv.Txn(resp, req)
|
obj, err := a.srv.Txn(resp, req)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatalf("err: %v", err)
|
require.Equal(t, 200, resp.Code, resp.Body)
|
||||||
}
|
|
||||||
if resp.Code != 200 {
|
|
||||||
t.Fatalf("expected 200, got %d", resp.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
txnResp, ok := obj.(structs.TxnResponse)
|
txnResp, ok := obj.(structs.TxnResponse)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -668,6 +665,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
|
||||||
Status: api.HealthPassing,
|
Status: api.HealthPassing,
|
||||||
Notes: "Http based health check",
|
Notes: "Http based health check",
|
||||||
Output: "success",
|
Output: "success",
|
||||||
|
ExposedPort: 5678,
|
||||||
Definition: structs.HealthCheckDefinition{
|
Definition: structs.HealthCheckDefinition{
|
||||||
Interval: 15 * time.Second,
|
Interval: 15 * time.Second,
|
||||||
Timeout: 15 * time.Second,
|
Timeout: 15 * time.Second,
|
||||||
|
@ -686,3 +684,117 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
|
||||||
}
|
}
|
||||||
assert.Equal(t, expected, txnResp)
|
assert.Equal(t, expected, txnResp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTxnEndpoint_NodeService(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
a := NewTestAgent(t, "")
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
// Make sure the fields of a check are handled correctly when both creating and
|
||||||
|
// updating, and test both sets of duration fields to ensure backwards compatibility.
|
||||||
|
buf := bytes.NewBuffer([]byte(fmt.Sprintf(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"Service": {
|
||||||
|
"Verb": "set",
|
||||||
|
"Node": "%s",
|
||||||
|
"Service": {
|
||||||
|
"Service": "test",
|
||||||
|
"Port": 4444
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Service": {
|
||||||
|
"Verb": "set",
|
||||||
|
"Node": "%s",
|
||||||
|
"Service": {
|
||||||
|
"Service": "test-sidecar-proxy",
|
||||||
|
"Port": 20000,
|
||||||
|
"Kind": "connect-proxy",
|
||||||
|
"Proxy": {
|
||||||
|
"DestinationServiceName": "test",
|
||||||
|
"DestinationServiceID": "test",
|
||||||
|
"LocalServiceAddress": "127.0.0.1",
|
||||||
|
"LocalServicePort": 4444,
|
||||||
|
"upstreams": [
|
||||||
|
{
|
||||||
|
"DestinationName": "fake-backend",
|
||||||
|
"LocalBindPort": 25001
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
`, a.config.NodeName, a.config.NodeName)))
|
||||||
|
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
obj, err := a.srv.Txn(resp, req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 200, resp.Code)
|
||||||
|
|
||||||
|
txnResp, ok := obj.(structs.TxnResponse)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("bad type: %T", obj)
|
||||||
|
}
|
||||||
|
require.Equal(t, 2, len(txnResp.Results))
|
||||||
|
|
||||||
|
index := txnResp.Results[0].Service.ModifyIndex
|
||||||
|
expected := structs.TxnResponse{
|
||||||
|
Results: structs.TxnResults{
|
||||||
|
&structs.TxnResult{
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Service: "test",
|
||||||
|
ID: "test",
|
||||||
|
Port: 4444,
|
||||||
|
Weights: &structs.Weights{
|
||||||
|
Passing: 1,
|
||||||
|
Warning: 1,
|
||||||
|
},
|
||||||
|
RaftIndex: structs.RaftIndex{
|
||||||
|
CreateIndex: index,
|
||||||
|
ModifyIndex: index,
|
||||||
|
},
|
||||||
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&structs.TxnResult{
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Service: "test-sidecar-proxy",
|
||||||
|
ID: "test-sidecar-proxy",
|
||||||
|
Port: 20000,
|
||||||
|
Kind: "connect-proxy",
|
||||||
|
Weights: &structs.Weights{
|
||||||
|
Passing: 1,
|
||||||
|
Warning: 1,
|
||||||
|
},
|
||||||
|
Proxy: structs.ConnectProxyConfig{
|
||||||
|
DestinationServiceName: "test",
|
||||||
|
DestinationServiceID: "test",
|
||||||
|
LocalServiceAddress: "127.0.0.1",
|
||||||
|
LocalServicePort: 4444,
|
||||||
|
},
|
||||||
|
TaggedAddresses: map[string]structs.ServiceAddress{
|
||||||
|
"consul-virtual": {
|
||||||
|
Address: "240.0.0.1",
|
||||||
|
Port: 20000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RaftIndex: structs.RaftIndex{
|
||||||
|
CreateIndex: index,
|
||||||
|
ModifyIndex: index,
|
||||||
|
},
|
||||||
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.Equal(t, expected, txnResp)
|
||||||
|
}
|
||||||
|
|
|
@ -211,7 +211,9 @@ func (s *HTTPHandlers) UIServices(resp http.ResponseWriter, req *http.Request) (
|
||||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
if peer := req.URL.Query().Get("peer"); peer != "" {
|
||||||
|
args.PeerName = peer
|
||||||
|
}
|
||||||
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
|
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -769,6 +771,7 @@ func (s *HTTPHandlers) UIMetricsProxy(resp http.ResponseWriter, req *http.Reques
|
||||||
Director: func(r *http.Request) {
|
Director: func(r *http.Request) {
|
||||||
r.URL = u
|
r.URL = u
|
||||||
},
|
},
|
||||||
|
Transport: s.proxyTransport,
|
||||||
ErrorLog: log.StandardLogger(&hclog.StandardLoggerOptions{
|
ErrorLog: log.StandardLogger(&hclog.StandardLoggerOptions{
|
||||||
InferLevels: true,
|
InferLevels: true,
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -88,29 +88,26 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C
|
||||||
clusters = append(clusters, passthroughs...)
|
clusters = append(clusters, passthroughs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go
|
getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
|
||||||
// so that the sets of endpoints generated matches the sets of clusters.
|
|
||||||
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
|
||||||
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
||||||
|
|
||||||
explicit := upstream.HasLocalPortOrSocket()
|
explicit := upstream.HasLocalPortOrSocket()
|
||||||
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
||||||
if !implicit && !explicit {
|
return upstream, !implicit && !explicit
|
||||||
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
chainEndpoints, ok := cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid]
|
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go
|
||||||
if !ok {
|
// so that the sets of endpoints generated matches the sets of clusters.
|
||||||
// this should not happen
|
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
||||||
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
|
upstream, skip := getUpstream(uid)
|
||||||
|
if skip {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
||||||
uid,
|
uid,
|
||||||
upstream,
|
upstream,
|
||||||
chain,
|
chain,
|
||||||
chainEndpoints,
|
|
||||||
cfgSnap,
|
cfgSnap,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
@ -127,18 +124,15 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C
|
||||||
// upstream in endpoints.go so that the sets of endpoints generated matches
|
// upstream in endpoints.go so that the sets of endpoints generated matches
|
||||||
// the sets of clusters.
|
// the sets of clusters.
|
||||||
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
||||||
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
upstream, skip := getUpstream(uid)
|
||||||
|
if skip {
|
||||||
explicit := upstreamCfg.HasLocalPortOrSocket()
|
|
||||||
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
|
||||||
if !implicit && !explicit {
|
|
||||||
// Not associated with a known explicit or implicit upstream so it is skipped.
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid)
|
peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid)
|
||||||
|
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta)
|
||||||
|
|
||||||
upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, upstreamCfg, peerMeta, cfgSnap)
|
upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, cfg, peerMeta, cfgSnap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -652,17 +646,10 @@ func (s *ResourceGenerator) clustersFromSnapshotIngressGateway(cfgSnap *proxycfg
|
||||||
return nil, fmt.Errorf("no discovery chain for upstream %q", uid)
|
return nil, fmt.Errorf("no discovery chain for upstream %q", uid)
|
||||||
}
|
}
|
||||||
|
|
||||||
chainEndpoints, ok := cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid]
|
|
||||||
if !ok {
|
|
||||||
// this should not happen
|
|
||||||
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
|
|
||||||
}
|
|
||||||
|
|
||||||
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
||||||
uid,
|
uid,
|
||||||
&u,
|
&u,
|
||||||
chain,
|
chain,
|
||||||
chainEndpoints,
|
|
||||||
cfgSnap,
|
cfgSnap,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
@ -745,7 +732,7 @@ func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, nam
|
||||||
|
|
||||||
func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
||||||
uid proxycfg.UpstreamID,
|
uid proxycfg.UpstreamID,
|
||||||
upstream *structs.Upstream,
|
upstreamConfig structs.UpstreamConfig,
|
||||||
peerMeta structs.PeeringServiceMeta,
|
peerMeta structs.PeeringServiceMeta,
|
||||||
cfgSnap *proxycfg.ConfigSnapshot,
|
cfgSnap *proxycfg.ConfigSnapshot,
|
||||||
) (*envoy_cluster_v3.Cluster, error) {
|
) (*envoy_cluster_v3.Cluster, error) {
|
||||||
|
@ -754,16 +741,21 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta)
|
if upstreamConfig.EnvoyClusterJSON != "" {
|
||||||
if cfg.EnvoyClusterJSON != "" {
|
c, err = makeClusterFromUserConfig(upstreamConfig.EnvoyClusterJSON)
|
||||||
c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return c, err
|
return c, err
|
||||||
}
|
}
|
||||||
// In the happy path don't return yet as we need to inject TLS config still.
|
// In the happy path don't return yet as we need to inject TLS config still.
|
||||||
}
|
}
|
||||||
|
|
||||||
tbs, ok := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
|
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return c, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer)
|
||||||
if !ok {
|
if !ok {
|
||||||
// this should never happen since we loop through upstreams with
|
// this should never happen since we loop through upstreams with
|
||||||
// set trust bundles
|
// set trust bundles
|
||||||
|
@ -772,22 +764,29 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
||||||
|
|
||||||
clusterName := generatePeeredClusterName(uid, tbs)
|
clusterName := generatePeeredClusterName(uid, tbs)
|
||||||
|
|
||||||
|
outlierDetection := ToOutlierDetection(upstreamConfig.PassiveHealthCheck)
|
||||||
|
// We can't rely on health checks for services on cluster peers because they
|
||||||
|
// don't take into account service resolvers, splitters and routers. Setting
|
||||||
|
// MaxEjectionPercent too 100% gives outlier detection the power to eject the
|
||||||
|
// entire cluster.
|
||||||
|
outlierDetection.MaxEjectionPercent = &wrappers.UInt32Value{Value: 100}
|
||||||
|
|
||||||
s.Logger.Trace("generating cluster for", "cluster", clusterName)
|
s.Logger.Trace("generating cluster for", "cluster", clusterName)
|
||||||
if c == nil {
|
if c == nil {
|
||||||
c = &envoy_cluster_v3.Cluster{
|
c = &envoy_cluster_v3.Cluster{
|
||||||
Name: clusterName,
|
Name: clusterName,
|
||||||
ConnectTimeout: durationpb.New(time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond),
|
ConnectTimeout: durationpb.New(time.Duration(upstreamConfig.ConnectTimeoutMs) * time.Millisecond),
|
||||||
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
|
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
|
||||||
HealthyPanicThreshold: &envoy_type_v3.Percent{
|
HealthyPanicThreshold: &envoy_type_v3.Percent{
|
||||||
Value: 0, // disable panic threshold
|
Value: 0, // disable panic threshold
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
||||||
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
|
Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits),
|
||||||
},
|
},
|
||||||
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
|
OutlierDetection: outlierDetection,
|
||||||
}
|
}
|
||||||
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" {
|
if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" {
|
||||||
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
||||||
return c, err
|
return c, err
|
||||||
}
|
}
|
||||||
|
@ -821,12 +820,11 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
||||||
false, /*onlyPassing*/
|
false, /*onlyPassing*/
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rootPEMs := cfgSnap.RootPEMs()
|
rootPEMs := cfgSnap.RootPEMs()
|
||||||
if uid.Peer != "" {
|
if uid.Peer != "" {
|
||||||
tbs, _ := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
|
tbs, _ := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer)
|
||||||
rootPEMs = tbs.ConcatenatedRootPEMs()
|
rootPEMs = tbs.ConcatenatedRootPEMs()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -961,7 +959,6 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
uid proxycfg.UpstreamID,
|
uid proxycfg.UpstreamID,
|
||||||
upstream *structs.Upstream,
|
upstream *structs.Upstream,
|
||||||
chain *structs.CompiledDiscoveryChain,
|
chain *structs.CompiledDiscoveryChain,
|
||||||
chainEndpoints map[string]structs.CheckServiceNodes,
|
|
||||||
cfgSnap *proxycfg.ConfigSnapshot,
|
cfgSnap *proxycfg.ConfigSnapshot,
|
||||||
forMeshGateway bool,
|
forMeshGateway bool,
|
||||||
) ([]*envoy_cluster_v3.Cluster, error) {
|
) ([]*envoy_cluster_v3.Cluster, error) {
|
||||||
|
@ -978,7 +975,15 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
upstreamConfigMap = upstream.Config
|
upstreamConfigMap = upstream.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
|
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||||
|
|
||||||
|
// Mesh gateways are exempt because upstreamsSnapshot is only used for
|
||||||
|
// cluster peering targets and transative failover/redirects are unsupported.
|
||||||
|
if err != nil && !forMeshGateway {
|
||||||
|
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawUpstreamConfig, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Don't hard fail on a config typo, just warn. The parse func returns
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
||||||
// default config if there is an error so it's safe to continue.
|
// default config if there is an error so it's safe to continue.
|
||||||
|
@ -986,13 +991,28 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
"error", err)
|
"error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
finalizeUpstreamConfig := func(cfg structs.UpstreamConfig, connectTimeout time.Duration) structs.UpstreamConfig {
|
||||||
|
if cfg.Protocol == "" {
|
||||||
|
cfg.Protocol = chain.Protocol
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Protocol == "" {
|
||||||
|
cfg.Protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.ConnectTimeoutMs == 0 {
|
||||||
|
cfg.ConnectTimeoutMs = int(connectTimeout / time.Millisecond)
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
var escapeHatchCluster *envoy_cluster_v3.Cluster
|
var escapeHatchCluster *envoy_cluster_v3.Cluster
|
||||||
if !forMeshGateway {
|
if !forMeshGateway {
|
||||||
if cfg.EnvoyClusterJSON != "" {
|
if rawUpstreamConfig.EnvoyClusterJSON != "" {
|
||||||
if chain.Default {
|
if chain.Default {
|
||||||
// If you haven't done anything to setup the discovery chain, then
|
// If you haven't done anything to setup the discovery chain, then
|
||||||
// you can use the envoy_cluster_json escape hatch.
|
// you can use the envoy_cluster_json escape hatch.
|
||||||
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
|
escapeHatchCluster, err = makeClusterFromUserConfig(rawUpstreamConfig.EnvoyClusterJSON)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1006,14 +1026,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
|
|
||||||
var out []*envoy_cluster_v3.Cluster
|
var out []*envoy_cluster_v3.Cluster
|
||||||
for _, node := range chain.Nodes {
|
for _, node := range chain.Nodes {
|
||||||
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
|
switch {
|
||||||
|
case node == nil:
|
||||||
|
return nil, fmt.Errorf("impossible to process a nil node")
|
||||||
|
case node.Type != structs.DiscoveryGraphNodeTypeResolver:
|
||||||
continue
|
continue
|
||||||
|
case node.Resolver == nil:
|
||||||
|
return nil, fmt.Errorf("impossible to process a non-resolver node")
|
||||||
}
|
}
|
||||||
failover := node.Resolver.Failover
|
failover := node.Resolver.Failover
|
||||||
// These variables are prefixed with primary to avoid shaddowing bugs.
|
// These variables are prefixed with primary to avoid shaddowing bugs.
|
||||||
primaryTargetID := node.Resolver.Target
|
primaryTargetID := node.Resolver.Target
|
||||||
primaryTarget := chain.Targets[primaryTargetID]
|
primaryTarget := chain.Targets[primaryTargetID]
|
||||||
primaryClusterName := CustomizeClusterName(primaryTarget.Name, chain)
|
primaryClusterName := CustomizeClusterName(primaryTarget.Name, chain)
|
||||||
|
upstreamConfig := finalizeUpstreamConfig(rawUpstreamConfig, node.Resolver.ConnectTimeout)
|
||||||
if forMeshGateway {
|
if forMeshGateway {
|
||||||
primaryClusterName = meshGatewayExportedClusterNamePrefix + primaryClusterName
|
primaryClusterName = meshGatewayExportedClusterNamePrefix + primaryClusterName
|
||||||
}
|
}
|
||||||
|
@ -1026,22 +1052,38 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
type targetClusterOptions struct {
|
type targetClusterOption struct {
|
||||||
targetID string
|
targetID string
|
||||||
clusterName string
|
clusterName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the information required to make target clusters. When
|
// Construct the information required to make target clusters. When
|
||||||
// failover is configured, create the aggregate cluster.
|
// failover is configured, create the aggregate cluster.
|
||||||
var targetClustersOptions []targetClusterOptions
|
var targetClustersOptions []targetClusterOption
|
||||||
if failover != nil && !forMeshGateway {
|
if failover != nil && !forMeshGateway {
|
||||||
var failoverClusterNames []string
|
var failoverClusterNames []string
|
||||||
for _, tid := range append([]string{primaryTargetID}, failover.Targets...) {
|
for _, tid := range append([]string{primaryTargetID}, failover.Targets...) {
|
||||||
target := chain.Targets[tid]
|
target := chain.Targets[tid]
|
||||||
clusterName := CustomizeClusterName(target.Name, chain)
|
clusterName := target.Name
|
||||||
|
targetUID := proxycfg.NewUpstreamIDFromTargetID(tid)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer)
|
||||||
|
// We can't generate cluster on peers without the trust bundle. The
|
||||||
|
// trust bundle should be ready soon.
|
||||||
|
if !ok {
|
||||||
|
s.Logger.Debug("peer trust bundle not ready for discovery chain target",
|
||||||
|
"peer", targetUID.Peer,
|
||||||
|
"target", tid,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
clusterName = generatePeeredClusterName(targetUID, tbs)
|
||||||
|
}
|
||||||
|
clusterName = CustomizeClusterName(clusterName, chain)
|
||||||
clusterName = failoverClusterNamePrefix + clusterName
|
clusterName = failoverClusterNamePrefix + clusterName
|
||||||
|
|
||||||
targetClustersOptions = append(targetClustersOptions, targetClusterOptions{
|
targetClustersOptions = append(targetClustersOptions, targetClusterOption{
|
||||||
targetID: tid,
|
targetID: tid,
|
||||||
clusterName: clusterName,
|
clusterName: clusterName,
|
||||||
})
|
})
|
||||||
|
@ -1070,7 +1112,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
|
|
||||||
out = append(out, c)
|
out = append(out, c)
|
||||||
} else {
|
} else {
|
||||||
targetClustersOptions = append(targetClustersOptions, targetClusterOptions{
|
targetClustersOptions = append(targetClustersOptions, targetClusterOption{
|
||||||
targetID: primaryTargetID,
|
targetID: primaryTargetID,
|
||||||
clusterName: primaryClusterName,
|
clusterName: primaryClusterName,
|
||||||
})
|
})
|
||||||
|
@ -1089,11 +1131,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
Datacenter: target.Datacenter,
|
Datacenter: target.Datacenter,
|
||||||
Service: target.Service,
|
Service: target.Service,
|
||||||
}.URI().String()
|
}.URI().String()
|
||||||
if uid.Peer != "" {
|
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID)
|
||||||
return nil, fmt.Errorf("impossible to get a peer discovery chain")
|
s.Logger.Debug("generating cluster for", "cluster", targetInfo.clusterName)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
peerMeta := upstreamsSnapshot.UpstreamPeerMeta(targetUID)
|
||||||
|
upstreamCluster, err := s.makeUpstreamClusterForPeerService(targetUID, upstreamConfig, peerMeta, cfgSnap)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Override the cluster name to include the failover-target~ prefix.
|
||||||
|
upstreamCluster.Name = targetInfo.clusterName
|
||||||
|
out = append(out, upstreamCluster)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Logger.Trace("generating cluster for", "cluster", targetInfo.clusterName)
|
|
||||||
c := &envoy_cluster_v3.Cluster{
|
c := &envoy_cluster_v3.Cluster{
|
||||||
Name: targetInfo.clusterName,
|
Name: targetInfo.clusterName,
|
||||||
AltStatName: targetInfo.clusterName,
|
AltStatName: targetInfo.clusterName,
|
||||||
|
@ -1114,9 +1165,9 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
},
|
},
|
||||||
// TODO(peering): make circuit breakers or outlier detection work?
|
// TODO(peering): make circuit breakers or outlier detection work?
|
||||||
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
||||||
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
|
Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits),
|
||||||
},
|
},
|
||||||
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
|
OutlierDetection: ToOutlierDetection(upstreamConfig.PassiveHealthCheck),
|
||||||
}
|
}
|
||||||
|
|
||||||
var lb *structs.LoadBalancer
|
var lb *structs.LoadBalancer
|
||||||
|
@ -1127,19 +1178,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
return nil, fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", targetInfo.clusterName, err)
|
return nil, fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", targetInfo.clusterName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var proto string
|
if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" {
|
||||||
if !forMeshGateway {
|
|
||||||
proto = cfg.Protocol
|
|
||||||
}
|
|
||||||
if proto == "" {
|
|
||||||
proto = chain.Protocol
|
|
||||||
}
|
|
||||||
|
|
||||||
if proto == "" {
|
|
||||||
proto = "tcp"
|
|
||||||
}
|
|
||||||
|
|
||||||
if proto == "http2" || proto == "grpc" {
|
|
||||||
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1148,7 +1187,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
configureTLS := true
|
configureTLS := true
|
||||||
if forMeshGateway {
|
if forMeshGateway {
|
||||||
// We only initiate TLS if we're doing an L7 proxy.
|
// We only initiate TLS if we're doing an L7 proxy.
|
||||||
configureTLS = structs.IsProtocolHTTPLike(proto)
|
configureTLS = structs.IsProtocolHTTPLike(upstreamConfig.Protocol)
|
||||||
}
|
}
|
||||||
|
|
||||||
if configureTLS {
|
if configureTLS {
|
||||||
|
@ -1221,7 +1260,6 @@ func (s *ResourceGenerator) makeExportedUpstreamClustersForMeshGateway(cfgSnap *
|
||||||
proxycfg.NewUpstreamIDFromServiceName(svc),
|
proxycfg.NewUpstreamIDFromServiceName(svc),
|
||||||
nil,
|
nil,
|
||||||
chain,
|
chain,
|
||||||
nil,
|
|
||||||
cfgSnap,
|
cfgSnap,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
|
|
|
@ -169,6 +169,18 @@ func TestClustersFromSnapshot(t *testing.T) {
|
||||||
}, nil)
|
}, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "custom-passive-healthcheck",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
|
||||||
|
ns.Proxy.Upstreams[0].Config["passive_health_check"] = map[string]interface{}{
|
||||||
|
"enforcing_consecutive_5xx": float64(80),
|
||||||
|
"max_failures": float64(5),
|
||||||
|
"interval": float64(10),
|
||||||
|
}
|
||||||
|
}, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "custom-max-inbound-connections",
|
name: "custom-max-inbound-connections",
|
||||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
@ -257,6 +269,12 @@ func TestClustersFromSnapshot(t *testing.T) {
|
||||||
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
|
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "connect-proxy-with-chain-and-failover-to-cluster-peer",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
|
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
|
||||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
@ -495,6 +513,13 @@ func TestClustersFromSnapshot(t *testing.T) {
|
||||||
"failover", nil, nil, nil)
|
"failover", nil, nil, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "ingress-with-chain-and-failover-to-cluster-peer",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
|
||||||
|
"failover-to-cluster-peer", nil, nil, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
|
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
|
||||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
|
|
@ -27,6 +27,12 @@ type ProxyConfig struct {
|
||||||
// Note: This escape hatch is compatible with the discovery chain.
|
// Note: This escape hatch is compatible with the discovery chain.
|
||||||
PublicListenerJSON string `mapstructure:"envoy_public_listener_json"`
|
PublicListenerJSON string `mapstructure:"envoy_public_listener_json"`
|
||||||
|
|
||||||
|
// ListenerTracingJSON is a complete override ("escape hatch") for the
|
||||||
|
// listeners tracing configuration.
|
||||||
|
//
|
||||||
|
// Note: This escape hatch is compatible with the discovery chain.
|
||||||
|
ListenerTracingJSON string `mapstructure:"envoy_listener_tracing_json"`
|
||||||
|
|
||||||
// LocalClusterJSON is a complete override ("escape hatch") for the
|
// LocalClusterJSON is a complete override ("escape hatch") for the
|
||||||
// local application cluster.
|
// local application cluster.
|
||||||
//
|
//
|
||||||
|
@ -168,5 +174,10 @@ func ToOutlierDetection(p *structs.PassiveHealthCheck) *envoy_cluster_v3.Outlier
|
||||||
if p.MaxFailures != 0 {
|
if p.MaxFailures != 0 {
|
||||||
od.Consecutive_5Xx = &wrappers.UInt32Value{Value: p.MaxFailures}
|
od.Consecutive_5Xx = &wrappers.UInt32Value{Value: p.MaxFailures}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if p.EnforcingConsecutive5xx != nil {
|
||||||
|
od.EnforcingConsecutive_5Xx = &wrappers.UInt32Value{Value: *p.EnforcingConsecutive5xx}
|
||||||
|
}
|
||||||
|
|
||||||
return od
|
return od
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,14 +50,19 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Len()+
|
cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Len()+
|
||||||
len(cfgSnap.ConnectProxy.WatchedUpstreamEndpoints))
|
len(cfgSnap.ConnectProxy.WatchedUpstreamEndpoints))
|
||||||
|
|
||||||
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
|
getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
|
||||||
// so that the sets of endpoints generated matches the sets of clusters.
|
|
||||||
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
|
||||||
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
||||||
|
|
||||||
explicit := upstream.HasLocalPortOrSocket()
|
explicit := upstream.HasLocalPortOrSocket()
|
||||||
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
||||||
if !implicit && !explicit {
|
return upstream, !implicit && !explicit
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
|
||||||
|
// so that the sets of endpoints generated matches the sets of clusters.
|
||||||
|
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
||||||
|
upstream, skip := getUpstream(uid)
|
||||||
|
if skip {
|
||||||
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -70,6 +75,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
es, err := s.endpointsFromDiscoveryChain(
|
es, err := s.endpointsFromDiscoveryChain(
|
||||||
uid,
|
uid,
|
||||||
chain,
|
chain,
|
||||||
|
cfgSnap,
|
||||||
cfgSnap.Locality,
|
cfgSnap.Locality,
|
||||||
upstreamConfigMap,
|
upstreamConfigMap,
|
||||||
cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid],
|
cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid],
|
||||||
|
@ -86,12 +92,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
// upstream in clusters.go so that the sets of endpoints generated matches
|
// upstream in clusters.go so that the sets of endpoints generated matches
|
||||||
// the sets of clusters.
|
// the sets of clusters.
|
||||||
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
||||||
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
_, skip := getUpstream(uid)
|
||||||
|
if skip {
|
||||||
explicit := upstreamCfg.HasLocalPortOrSocket()
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
||||||
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
|
||||||
if !implicit && !explicit {
|
|
||||||
// Not associated with a known explicit or implicit upstream so it is skipped.
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,22 +107,14 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
|
|
||||||
clusterName := generatePeeredClusterName(uid, tbs)
|
clusterName := generatePeeredClusterName(uid, tbs)
|
||||||
|
|
||||||
// Also skip peer instances with a hostname as their address. EDS
|
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, clusterName, uid)
|
||||||
// cannot resolve hostnames, so we provide them through CDS instead.
|
|
||||||
if _, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpointsUseHostnames[uid]; ok {
|
if err != nil {
|
||||||
continue
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Get(uid)
|
if loadAssignment != nil {
|
||||||
if ok {
|
resources = append(resources, loadAssignment)
|
||||||
la := makeLoadAssignment(
|
|
||||||
clusterName,
|
|
||||||
[]loadAssignmentEndpointGroup{
|
|
||||||
{Endpoints: endpoints},
|
|
||||||
},
|
|
||||||
proxycfg.GatewayKey{ /*empty so it never matches*/ },
|
|
||||||
)
|
|
||||||
resources = append(resources, la)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,6 +370,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotIngressGateway(cfgSnap *proxycf
|
||||||
es, err := s.endpointsFromDiscoveryChain(
|
es, err := s.endpointsFromDiscoveryChain(
|
||||||
uid,
|
uid,
|
||||||
cfgSnap.IngressGateway.DiscoveryChain[uid],
|
cfgSnap.IngressGateway.DiscoveryChain[uid],
|
||||||
|
cfgSnap,
|
||||||
proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: u.DestinationPartition},
|
proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: u.DestinationPartition},
|
||||||
u.Config,
|
u.Config,
|
||||||
cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid],
|
cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid],
|
||||||
|
@ -412,9 +408,38 @@ func makePipeEndpoint(path string) *envoy_endpoint_v3.LbEndpoint {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, uid proxycfg.UpstreamID) (*envoy_endpoint_v3.ClusterLoadAssignment, error) {
|
||||||
|
var la *envoy_endpoint_v3.ClusterLoadAssignment
|
||||||
|
|
||||||
|
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||||
|
if err != nil {
|
||||||
|
return la, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also skip peer instances with a hostname as their address. EDS
|
||||||
|
// cannot resolve hostnames, so we provide them through CDS instead.
|
||||||
|
if _, ok := upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid]; ok {
|
||||||
|
return la, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoints, ok := upstreamsSnapshot.PeerUpstreamEndpoints.Get(uid)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
la = makeLoadAssignment(
|
||||||
|
clusterName,
|
||||||
|
[]loadAssignmentEndpointGroup{
|
||||||
|
{Endpoints: endpoints},
|
||||||
|
},
|
||||||
|
proxycfg.GatewayKey{ /*empty so it never matches*/ },
|
||||||
|
)
|
||||||
|
return la, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
||||||
uid proxycfg.UpstreamID,
|
uid proxycfg.UpstreamID,
|
||||||
chain *structs.CompiledDiscoveryChain,
|
chain *structs.CompiledDiscoveryChain,
|
||||||
|
cfgSnap *proxycfg.ConfigSnapshot,
|
||||||
gatewayKey proxycfg.GatewayKey,
|
gatewayKey proxycfg.GatewayKey,
|
||||||
upstreamConfigMap map[string]interface{},
|
upstreamConfigMap map[string]interface{},
|
||||||
upstreamEndpoints map[string]structs.CheckServiceNodes,
|
upstreamEndpoints map[string]structs.CheckServiceNodes,
|
||||||
|
@ -432,6 +457,14 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
||||||
upstreamConfigMap = make(map[string]interface{}) // TODO:needed?
|
upstreamConfigMap = make(map[string]interface{}) // TODO:needed?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||||
|
|
||||||
|
// Mesh gateways are exempt because upstreamsSnapshot is only used for
|
||||||
|
// cluster peering targets and transative failover/redirects are unsupported.
|
||||||
|
if err != nil && !forMeshGateway {
|
||||||
|
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind)
|
||||||
|
}
|
||||||
|
|
||||||
var resources []proto.Message
|
var resources []proto.Message
|
||||||
|
|
||||||
var escapeHatchCluster *envoy_cluster_v3.Cluster
|
var escapeHatchCluster *envoy_cluster_v3.Cluster
|
||||||
|
@ -465,8 +498,15 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
||||||
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
|
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
primaryTargetID := node.Resolver.Target
|
||||||
failover := node.Resolver.Failover
|
failover := node.Resolver.Failover
|
||||||
|
|
||||||
|
type targetLoadAssignmentOption struct {
|
||||||
|
targetID string
|
||||||
|
clusterName string
|
||||||
|
}
|
||||||
|
var targetLoadAssignmentOptions []targetLoadAssignmentOption
|
||||||
|
|
||||||
var numFailoverTargets int
|
var numFailoverTargets int
|
||||||
if failover != nil {
|
if failover != nil {
|
||||||
numFailoverTargets = len(failover.Targets)
|
numFailoverTargets = len(failover.Targets)
|
||||||
|
@ -474,39 +514,37 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
||||||
clusterNamePrefix := ""
|
clusterNamePrefix := ""
|
||||||
if numFailoverTargets > 0 && !forMeshGateway {
|
if numFailoverTargets > 0 && !forMeshGateway {
|
||||||
clusterNamePrefix = failoverClusterNamePrefix
|
clusterNamePrefix = failoverClusterNamePrefix
|
||||||
for _, failTargetID := range failover.Targets {
|
for _, targetID := range append([]string{primaryTargetID}, failover.Targets...) {
|
||||||
target := chain.Targets[failTargetID]
|
target := chain.Targets[targetID]
|
||||||
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
|
clusterName := target.Name
|
||||||
chain.Targets,
|
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetID)
|
||||||
upstreamEndpoints,
|
if targetUID.Peer != "" {
|
||||||
gatewayEndpoints,
|
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer)
|
||||||
failTargetID,
|
// We can't generate cluster on peers without the trust bundle. The
|
||||||
gatewayKey,
|
// trust bundle should be ready soon.
|
||||||
forMeshGateway,
|
if !ok {
|
||||||
|
s.Logger.Debug("peer trust bundle not ready for discovery chain target",
|
||||||
|
"peer", targetUID.Peer,
|
||||||
|
"target", targetID,
|
||||||
)
|
)
|
||||||
if !valid {
|
continue
|
||||||
continue // skip the failover target if we're still populating the snapshot
|
|
||||||
}
|
}
|
||||||
|
|
||||||
clusterName := CustomizeClusterName(target.Name, chain)
|
clusterName = generatePeeredClusterName(targetUID, tbs)
|
||||||
|
}
|
||||||
|
clusterName = CustomizeClusterName(clusterName, chain)
|
||||||
clusterName = failoverClusterNamePrefix + clusterName
|
clusterName = failoverClusterNamePrefix + clusterName
|
||||||
if escapeHatchCluster != nil {
|
if escapeHatchCluster != nil {
|
||||||
clusterName = escapeHatchCluster.Name
|
clusterName = escapeHatchCluster.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Logger.Debug("generating endpoints for", "cluster", clusterName)
|
targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{
|
||||||
|
targetID: targetID,
|
||||||
la := makeLoadAssignment(
|
clusterName: clusterName,
|
||||||
clusterName,
|
})
|
||||||
[]loadAssignmentEndpointGroup{endpointGroup},
|
|
||||||
gatewayKey,
|
|
||||||
)
|
|
||||||
resources = append(resources, la)
|
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
targetID := node.Resolver.Target
|
target := chain.Targets[primaryTargetID]
|
||||||
|
|
||||||
target := chain.Targets[targetID]
|
|
||||||
clusterName := CustomizeClusterName(target.Name, chain)
|
clusterName := CustomizeClusterName(target.Name, chain)
|
||||||
clusterName = clusterNamePrefix + clusterName
|
clusterName = clusterNamePrefix + clusterName
|
||||||
if escapeHatchCluster != nil {
|
if escapeHatchCluster != nil {
|
||||||
|
@ -515,12 +553,31 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
||||||
if forMeshGateway {
|
if forMeshGateway {
|
||||||
clusterName = meshGatewayExportedClusterNamePrefix + clusterName
|
clusterName = meshGatewayExportedClusterNamePrefix + clusterName
|
||||||
}
|
}
|
||||||
s.Logger.Debug("generating endpoints for", "cluster", clusterName)
|
targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{
|
||||||
|
targetID: primaryTargetID,
|
||||||
|
clusterName: clusterName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, targetInfo := range targetLoadAssignmentOptions {
|
||||||
|
s.Logger.Debug("generating endpoints for", "cluster", targetInfo.clusterName)
|
||||||
|
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, targetInfo.clusterName, targetUID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if loadAssignment != nil {
|
||||||
|
resources = append(resources, loadAssignment)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
|
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
|
||||||
chain.Targets,
|
chain.Targets,
|
||||||
upstreamEndpoints,
|
upstreamEndpoints,
|
||||||
gatewayEndpoints,
|
gatewayEndpoints,
|
||||||
targetID,
|
targetInfo.targetID,
|
||||||
gatewayKey,
|
gatewayKey,
|
||||||
forMeshGateway,
|
forMeshGateway,
|
||||||
)
|
)
|
||||||
|
@ -529,12 +586,13 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
||||||
}
|
}
|
||||||
|
|
||||||
la := makeLoadAssignment(
|
la := makeLoadAssignment(
|
||||||
clusterName,
|
targetInfo.clusterName,
|
||||||
[]loadAssignmentEndpointGroup{endpointGroup},
|
[]loadAssignmentEndpointGroup{endpointGroup},
|
||||||
gatewayKey,
|
gatewayKey,
|
||||||
)
|
)
|
||||||
resources = append(resources, la)
|
resources = append(resources, la)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return resources, nil
|
return resources, nil
|
||||||
}
|
}
|
||||||
|
@ -586,6 +644,7 @@ func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(cfgSnap
|
||||||
clusterEndpoints, err := s.endpointsFromDiscoveryChain(
|
clusterEndpoints, err := s.endpointsFromDiscoveryChain(
|
||||||
proxycfg.NewUpstreamIDFromServiceName(svc),
|
proxycfg.NewUpstreamIDFromServiceName(svc),
|
||||||
chain,
|
chain,
|
||||||
|
cfgSnap,
|
||||||
cfgSnap.Locality,
|
cfgSnap.Locality,
|
||||||
nil,
|
nil,
|
||||||
chainEndpoints,
|
chainEndpoints,
|
||||||
|
@ -640,11 +699,12 @@ func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpo
|
||||||
healthStatus = endpointGroup.OverrideHealth
|
healthStatus = endpointGroup.OverrideHealth
|
||||||
}
|
}
|
||||||
|
|
||||||
|
endpoint := &envoy_endpoint_v3.Endpoint{
|
||||||
|
Address: makeAddress(addr, port),
|
||||||
|
}
|
||||||
es = append(es, &envoy_endpoint_v3.LbEndpoint{
|
es = append(es, &envoy_endpoint_v3.LbEndpoint{
|
||||||
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
||||||
Endpoint: &envoy_endpoint_v3.Endpoint{
|
Endpoint: endpoint,
|
||||||
Address: makeAddress(addr, port),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
HealthStatus: healthStatus,
|
HealthStatus: healthStatus,
|
||||||
LoadBalancingWeight: makeUint32Value(weight),
|
LoadBalancingWeight: makeUint32Value(weight),
|
||||||
|
|
|
@ -284,6 +284,12 @@ func TestEndpointsFromSnapshot(t *testing.T) {
|
||||||
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
|
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "connect-proxy-with-chain-and-failover-to-cluster-peer",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
|
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
|
||||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
@ -396,6 +402,13 @@ func TestEndpointsFromSnapshot(t *testing.T) {
|
||||||
"failover", nil, nil, nil)
|
"failover", nil, nil, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "ingress-with-chain-and-failover-to-cluster-peer",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
|
||||||
|
"failover-to-cluster-peer", nil, nil, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
|
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
|
||||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue