Merge branch 'main' of github.com:hashicorp/consul into derekm/split-grpc-ports

This commit is contained in:
Derek Menteer 2022-09-06 10:51:04 -05:00
commit b50bc443f3
242 changed files with 7371 additions and 2287 deletions

3
.changelog/13998.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: expose new tracing configuration on envoy
```

3
.changelog/14238.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
envoy: adds additional Envoy outlier ejection parameters to passive health check configurations.
```

3
.changelog/14290.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
envoy: validate name before deleting proxy default configurations.
```

4
.changelog/14343.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:feature
ui: Use withCredentials for all HTTP API requests
```

3
.changelog/14364.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
peering: Fix issue preventing deletion and recreation of peerings in TERMINATED state.
```

3
.changelog/14373.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
xds: Set `max_ejection_percent` on Envoy's outlier detection to 100% for peered services.
```

5
.changelog/14378.txt Normal file
View File

@ -0,0 +1,5 @@
```release-note:bug
api: Fix a breaking change caused by renaming `QueryDatacenterOptions` to
`QueryFailoverOptions`. This adds `QueryDatacenterOptions` back as an alias to
`QueryFailoverOptions` and marks it as deprecated.
```

3
.changelog/14396.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
peering: Add support to failover to services running on cluster peers.
```

3
.changelog/14423.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
cli: Adds new subcommands for `peering` workflows. Refer to the [CLI docs](https://www.consul.io/commands/peering) for more information.
```

3
.changelog/14429.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
connect: Fixed an issue where intermediate certificates could build up in the root CA because they were never being pruned after expiring.
``

3
.changelog/14433.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
checks: If set, use proxy address for automatically added sidecar check instead of service address.
```

3
.changelog/14474.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
http: Add new `get-or-empty` operation to the txn api. Refer to the [API docs](https://www.consul.io/api-docs/txn#kv-operations) for more information.
```

3
.changelog/_2271.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
snapshot agent: **(Enterprise only)** Add support for path-based addressing when using s3 backend.
```

View File

@ -816,7 +816,7 @@ jobs:
# Get go binary from workspace
- attach_workspace:
at: .
# Build the consul-dev image from the already built binary
# Build the consul:local image from the already built binary
- run:
command: |
sudo rm -rf /usr/local/go
@ -887,8 +887,8 @@ jobs:
- attach_workspace:
at: .
- run: *install-gotestsum
# Build the consul-dev image from the already built binary
- run: docker build -t consul-dev -f ./build-support/docker/Consul-Dev.dockerfile .
# Build the consul:local image from the already built binary
- run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile .
- run:
name: Envoy Integration Tests
command: |
@ -902,6 +902,7 @@ jobs:
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
GOTESTSUM_FORMAT: standard-verbose
COMPOSE_INTERACTIVE_NO_CLI: 1
LAMBDA_TESTS_ENABLED: "true"
# tput complains if this isn't set to something.
TERM: ansi
- store_artifacts:

View File

@ -16,7 +16,7 @@ jobs:
backport:
if: github.event.pull_request.merged
runs-on: ubuntu-latest
container: hashicorpdev/backport-assistant:0.2.3
container: hashicorpdev/backport-assistant:0.2.5
steps:
- name: Run Backport Assistant for stable-website
run: |
@ -24,6 +24,7 @@ jobs:
env:
BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)"
BACKPORT_TARGET_TEMPLATE: "stable-website"
BACKPORT_MERGE_COMMIT: true
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Backport changes to latest release branch
run: |

View File

@ -8,6 +8,8 @@ linters:
- ineffassign
- unparam
- forbidigo
- gomodguard
- depguard
issues:
# Disable the default exclude list so that all excludes are explicitly
@ -75,6 +77,30 @@ linters-settings:
# Exclude godoc examples from forbidigo checks.
# Default: true
exclude_godoc_examples: false
gomodguard:
blocked:
# List of blocked modules.
modules:
# Blocked module.
- github.com/hashicorp/net-rpc-msgpackrpc:
recommendations:
- github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc
- github.com/hashicorp/go-msgpack:
recommendations:
- github.com/hashicorp/consul-net-rpc/go-msgpack
depguard:
list-type: denylist
include-go-root: true
# A list of packages for the list type specified.
# Default: []
packages:
- net/rpc
# A list of packages for the list type specified.
# Specify an error message to output when a denied package is used.
# Default: []
packages-with-error-message:
- net/rpc: 'only use forked copy in github.com/hashicorp/consul-net-rpc/net/rpc'
run:
timeout: 10m

View File

@ -16,6 +16,7 @@ PROTOC_GO_INJECT_TAG_VERSION='v1.3.0'
GOTAGS ?=
GOPATH=$(shell go env GOPATH)
GOARCH?=$(shell go env GOARCH)
MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1)
export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH)
@ -129,7 +130,7 @@ export GOLDFLAGS
# Allow skipping docker build during integration tests in CI since we already
# have a built binary
ENVOY_INTEG_DEPS?=dev-docker
ENVOY_INTEG_DEPS?=docker-envoy-integ
ifdef SKIP_DOCKER_BUILD
ENVOY_INTEG_DEPS=noop
endif
@ -152,7 +153,28 @@ dev-docker: linux
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
# 'consul:local' tag is needed to run the integration tests
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
@docker buildx use default && docker buildx build -t 'consul:local' \
--platform linux/$(GOARCH) \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--load \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
check-remote-dev-image-env:
ifndef REMOTE_DEV_IMAGE
$(error REMOTE_DEV_IMAGE is undefined: set this image to <your_docker_repo>/<your_docker_image>:<image_tag>, e.g. hashicorp/consul-k8s-dev:latest)
endif
remote-docker: check-remote-dev-image-env
$(MAKE) GOARCH=amd64 linux
$(MAKE) GOARCH=arm64 linux
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building and Pushing Consul Development container - $(REMOTE_DEV_IMAGE)"
@docker buildx use default && docker buildx build -t '$(REMOTE_DEV_IMAGE)' \
--platform linux/amd64,linux/arm64 \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--push \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
# should only run in CI and not locally.
@ -174,10 +196,10 @@ ifeq ($(CIRCLE_BRANCH), main)
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
endif
# linux builds a linux binary independent of the source platform
# linux builds a linux binary compatible with the source platform
linux:
@mkdir -p ./pkg/bin/linux_amd64
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./pkg/bin/linux_amd64 -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
@mkdir -p ./pkg/bin/linux_$(GOARCH)
CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o ./pkg/bin/linux_$(GOARCH) -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
# dist builds binaries for all platforms and packages them for distribution
dist:
@ -324,8 +346,22 @@ consul-docker: go-build-image
ui-docker: ui-build-image
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui
# Build image used to run integration tests locally.
docker-envoy-integ:
$(MAKE) GOARCH=amd64 linux
docker build \
--platform linux/amd64 $(NOCACHE) $(QUIET) \
-t 'consul:local' \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
$(CURDIR)/pkg/bin/linux_amd64 \
-f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
# Run integration tests.
# Use GO_TEST_FLAGS to run specific tests:
# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic"
# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment.
test-envoy-integ: $(ENVOY_INTEG_DEPS)
@go test -v -timeout=30m -tags integration ./test/integration/connect/envoy
@go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy
.PHONY: test-compat-integ
test-compat-integ: dev-docker

View File

@ -3786,7 +3786,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
fmt.Println("TCP Check:= ", v)
}
if hasNoCorrectTCPCheck {
t.Fatalf("Did not find the expected TCP Healtcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
t.Fatalf("Did not find the expected TCP Healthcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
}
require.Equal(t, sidecarSvc, gotSidecar)
})

View File

@ -1399,8 +1399,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
Protocol: "http",
MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeRemote},
PassiveHealthCheck: &structs.PassiveHealthCheck{
Interval: 10,
MaxFailures: 2,
Interval: 10,
MaxFailures: 2,
EnforcingConsecutive5xx: uintPointer(60),
},
},
Overrides: []*structs.UpstreamConfig{
@ -1432,8 +1433,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
Upstream: wildcard,
Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{
"Interval": int64(10),
"MaxFailures": int64(2),
"Interval": int64(10),
"MaxFailures": int64(2),
"EnforcingConsecutive5xx": int64(60),
},
"mesh_gateway": map[string]interface{}{
"Mode": "remote",
@ -1445,8 +1447,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
Upstream: mysql,
Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{
"Interval": int64(10),
"MaxFailures": int64(2),
"Interval": int64(10),
"MaxFailures": int64(2),
"EnforcingConsecutive5xx": int64(60),
},
"mesh_gateway": map[string]interface{}{
"Mode": "local",
@ -2507,3 +2510,7 @@ func Test_gateWriteToSecondary_AllowedKinds(t *testing.T) {
})
}
}
func uintPointer(v uint32) *uint32 {
return &v
}

View File

@ -153,64 +153,87 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
// we don't support calling this endpoint for a specific peer
if args.PeerName != "" {
return fmt.Errorf("this endpoint does not support specifying a peer: %q", args.PeerName)
}
// this maxIndex will be the max of the ServiceDump calls and the PeeringList call
var maxIndex uint64
// get a local dump for services
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
if err != nil {
return fmt.Errorf("could not get a service dump for local nodes: %w", err)
}
if index > maxIndex {
maxIndex = index
}
reply.Nodes = nodes
// get a list of all peerings
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("could not list peers for service dump %w", err)
}
if index > maxIndex {
maxIndex = index
}
for _, p := range listedPeerings {
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, p.Name)
// If PeerName is not empty, we return only the imported services from that peer
if args.PeerName != "" {
// get a local dump for services
index, nodes, err := state.ServiceDump(ws,
args.ServiceKind,
args.UseServiceKind,
// Note we fetch imported services with wildcard namespace because imported services' namespaces
// are in a different locality; regardless of our local namespace, we return all imported services
// of the local partition.
args.EnterpriseMeta.WithWildcardNamespace(),
args.PeerName)
if err != nil {
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
return fmt.Errorf("could not get a service dump for peer %q: %w", args.PeerName, err)
}
if index > maxIndex {
maxIndex = index
}
reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...)
}
reply.Index = maxIndex
reply.ImportedNodes = nodes
// Get, store, and filter gateway services
idx, gatewayServices, err := state.DumpGatewayServices(ws)
if err != nil {
return err
}
reply.Gateways = gatewayServices
} else {
// otherwise return both local and all imported services
if idx > maxIndex {
maxIndex = idx
}
reply.Index = maxIndex
// get a local dump for services
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
if err != nil {
return fmt.Errorf("could not get a service dump for local nodes: %w", err)
}
raw, err := filter.Execute(reply.Nodes)
if err != nil {
return fmt.Errorf("could not filter local service dump: %w", err)
if index > maxIndex {
maxIndex = index
}
reply.Nodes = nodes
// get a list of all peerings
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("could not list peers for service dump %w", err)
}
if index > maxIndex {
maxIndex = index
}
for _, p := range listedPeerings {
// Note we fetch imported services with wildcard namespace because imported services' namespaces
// are in a different locality; regardless of our local namespace, we return all imported services
// of the local partition.
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, args.EnterpriseMeta.WithWildcardNamespace(), p.Name)
if err != nil {
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
}
if index > maxIndex {
maxIndex = index
}
reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...)
}
// Get, store, and filter gateway services
idx, gatewayServices, err := state.DumpGatewayServices(ws)
if err != nil {
return err
}
reply.Gateways = gatewayServices
if idx > maxIndex {
maxIndex = idx
}
reply.Index = maxIndex
raw, err := filter.Execute(reply.Nodes)
if err != nil {
return fmt.Errorf("could not filter local service dump: %w", err)
}
reply.Nodes = raw.(structs.CheckServiceNodes)
}
reply.Nodes = raw.(structs.CheckServiceNodes)
importedRaw, err := filter.Execute(reply.ImportedNodes)
if err != nil {

View File

@ -49,7 +49,7 @@ func kvsPreApply(logger hclog.Logger, srv *Server, authz resolver.Result, op api
return false, err
}
case api.KVGet, api.KVGetTree:
case api.KVGet, api.KVGetTree, api.KVGetOrEmpty:
// Filtering for GETs is done on the output side.
case api.KVCheckSession, api.KVCheckIndex:

View File

@ -1098,11 +1098,36 @@ func setLeafSigningCert(caRoot *structs.CARoot, pem string) error {
return fmt.Errorf("error parsing leaf signing cert: %w", err)
}
if err := pruneExpiredIntermediates(caRoot); err != nil {
return err
}
caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem)
caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId)
return nil
}
// pruneExpiredIntermediates removes expired intermediate certificates
// from the given CARoot.
func pruneExpiredIntermediates(caRoot *structs.CARoot) error {
var newIntermediates []string
now := time.Now()
for _, intermediatePEM := range caRoot.IntermediateCerts {
cert, err := connect.ParseCert(intermediatePEM)
if err != nil {
return fmt.Errorf("error parsing leaf signing cert: %w", err)
}
// Only keep the intermediate cert if it's still valid.
if cert.NotAfter.After(now) {
newIntermediates = append(newIntermediates, intermediatePEM)
}
}
caRoot.IntermediateCerts = newIntermediates
return nil
}
// runRenewIntermediate periodically attempts to renew the intermediate cert.
func (c *CAManager) runRenewIntermediate(ctx context.Context) error {
isPrimary := c.serverConf.Datacenter == c.serverConf.PrimaryDatacenter

View File

@ -435,7 +435,6 @@ func TestCAManager_SignCertificate_WithExpiredCert(t *testing.T) {
errorMsg string
}{
{"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
{"intermediate expired", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), true, "intermediate expired: certificate expired, expiration date"},
{"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"},
// a cert that is not yet valid is ok, assume it will be valid soon enough
{"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""},

View File

@ -401,6 +401,18 @@ func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) {
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(t, err)
verifyLeafCert(t, activeRoot, cert.CertPEM)
// Wait for the primary's old intermediate to be pruned after expiring.
oldIntermediate := activeRoot.IntermediateCerts[0]
retry.Run(t, func(r *retry.R) {
store := s1.caManager.delegate.State()
_, storedRoot, err := store.CARootActive(nil)
r.Check(err)
if storedRoot.IntermediateCerts[0] == oldIntermediate {
r.Fatal("old intermediate should be gone")
}
})
}
func patchIntermediateCertRenewInterval(t *testing.T) {
@ -516,6 +528,18 @@ func TestCAManager_RenewIntermediate_Secondary(t *testing.T) {
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(t, err)
verifyLeafCert(t, activeRoot, cert.CertPEM)
// Wait for dc2's old intermediate to be pruned after expiring.
oldIntermediate := activeRoot.IntermediateCerts[0]
retry.Run(t, func(r *retry.R) {
store := s2.caManager.delegate.State()
_, storedRoot, err := store.CARootActive(nil)
r.Check(err)
if storedRoot.IntermediateCerts[0] == oldIntermediate {
r.Fatal("old intermediate should be gone")
}
})
}
func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {

View File

@ -112,7 +112,7 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
if status.NeverConnected {
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
} else {
healthy := status.IsHealthy()
healthy := s.peerStreamServer.Tracker.IsHealthy(status)
healthyInt := 0
if healthy {
healthyInt = 1
@ -305,7 +305,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
logger.Trace("establishing stream to peer")
streamStatus, err := s.peerStreamTracker.Register(peer.ID)
streamStatus, err := s.peerStreamServer.Tracker.Register(peer.ID)
if err != nil {
return fmt.Errorf("failed to register stream: %v", err)
}

View File

@ -40,6 +40,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
})
}
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
if testing.Short() {
t.Skip("too slow for testing.Short")
@ -137,9 +138,11 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
// Delete the peering to trigger the termination sequence.
deleted := &pbpeering.Peering{
ID: p.Peering.ID,
Name: "my-peer-acceptor",
DeletedAt: structs.TimeToProto(time.Now()),
ID: p.Peering.ID,
Name: "my-peer-acceptor",
State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: p.Peering.PeerServerAddresses,
DeletedAt: structs.TimeToProto(time.Now()),
}
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
dialer.logger.Trace("deleted peering for my-peer-acceptor")
@ -262,6 +265,7 @@ func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS b
deleted := &pbpeering.Peering{
ID: p.Peering.PeerID,
Name: "my-peer-dialer",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
}
@ -431,6 +435,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
Peering: &pbpeering.Peering{
ID: peerID,
Name: peerName,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
}))
@ -1165,6 +1170,7 @@ func TestLeader_Peering_NoDeletionWhenPeeringDisabled(t *testing.T) {
Peering: &pbpeering.Peering{
ID: peerID,
Name: peerName,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
}))
@ -1216,7 +1222,7 @@ func TestLeader_Peering_NoEstablishmentWhenPeeringDisabled(t *testing.T) {
}))
require.Never(t, func() bool {
_, found := s1.peerStreamTracker.StreamStatus(peerID)
_, found := s1.peerStreamServer.StreamStatus(peerID)
return found
}, 7*time.Second, 1*time.Second, "peering should not have been established")
}

View File

@ -370,9 +370,9 @@ type Server struct {
// peerStreamServer is a server used to handle peering streams from external clusters.
peerStreamServer *peerstream.Server
// peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens.
peeringServer *peering.Server
peerStreamTracker *peerstream.Tracker
peeringServer *peering.Server
// embedded struct to hold all the enterprise specific data
EnterpriseServer
@ -728,11 +728,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
})
serverDiscoveryServer.Register(externalGRPCServer)
s.peerStreamTracker = peerstream.NewTracker()
s.peeringBackend = NewPeeringBackend(s)
s.peerStreamServer = peerstream.NewServer(peerstream.Config{
Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
GetStore: func() peerstream.StateStore { return s.FSM().State() },
Logger: logger.Named("grpc-api.peerstream"),
ACLResolver: s.ACLResolver,
@ -746,7 +744,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
return s.ForwardGRPC(s.grpcConnPool, info, fn)
},
})
s.peerStreamTracker.SetHeartbeatTimeout(s.peerStreamServer.Config.IncomingHeartbeatTimeout)
s.peerStreamServer.Register(externalGRPCServer)
// Initialize internal gRPC server.
@ -795,7 +792,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler
p := peering.NewServer(peering.Config{
Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
Tracker: s.peerStreamServer.Tracker,
Logger: deps.Logger.Named("grpc-api.peering"),
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
// Only forward the request if the dc in the request matches the server's datacenter.

View File

@ -535,6 +535,12 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
if req.Peering.Name == "" {
return errors.New("Missing Peering Name")
}
if req.Peering.State == pbpeering.PeeringState_DELETING && (req.Peering.DeletedAt == nil || structs.IsZeroProtoTime(req.Peering.DeletedAt)) {
return errors.New("Missing deletion time for peering in deleting state")
}
if req.Peering.DeletedAt != nil && !structs.IsZeroProtoTime(req.Peering.DeletedAt) && req.Peering.State != pbpeering.PeeringState_DELETING {
return fmt.Errorf("Unexpected state for peering with deletion time: %s", pbpeering.PeeringStateToAPI(req.Peering.State))
}
// Ensure the name is unique (cannot conflict with another peering with a different ID).
_, existing, err := peeringReadTxn(tx, nil, Query{
@ -546,11 +552,32 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
}
if existing != nil {
if req.Peering.ShouldDial() != existing.ShouldDial() {
return fmt.Errorf("Cannot switch peering dialing mode from %t to %t", existing.ShouldDial(), req.Peering.ShouldDial())
}
if req.Peering.ID != existing.ID {
return fmt.Errorf("A peering already exists with the name %q and a different ID %q", req.Peering.Name, existing.ID)
}
// Nothing to do if our peer wants to terminate the peering but the peering is already marked for deletion.
if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_TERMINATED {
return nil
}
// No-op deletion
if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_DELETING {
return nil
}
// No-op termination
if existing.State == pbpeering.PeeringState_TERMINATED && req.Peering.State == pbpeering.PeeringState_TERMINATED {
return nil
}
// Prevent modifications to Peering marked for deletion.
if !existing.IsActive() {
// This blocks generating new peering tokens or re-establishing the peering until the peering is done deleting.
if existing.State == pbpeering.PeeringState_DELETING {
return fmt.Errorf("cannot write to peering that is marked for deletion")
}
@ -582,8 +609,8 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
req.Peering.ModifyIndex = idx
}
// Ensure associated secrets are cleaned up when a peering is marked for deletion.
if req.Peering.State == pbpeering.PeeringState_DELETING {
// Ensure associated secrets are cleaned up when a peering is marked for deletion or terminated.
if !req.Peering.IsActive() {
if err := peeringSecretsDeleteTxn(tx, req.Peering.ID, req.Peering.ShouldDial()); err != nil {
return fmt.Errorf("failed to delete peering secrets: %w", err)
}

View File

@ -950,6 +950,7 @@ func TestStore_Peering_Watch(t *testing.T) {
Peering: &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
})
@ -976,6 +977,7 @@ func TestStore_Peering_Watch(t *testing.T) {
err := s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{
ID: testBarPeerID,
Name: "bar",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
})
@ -1077,6 +1079,7 @@ func TestStore_PeeringList_Watch(t *testing.T) {
Peering: &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
@ -1112,16 +1115,22 @@ func TestStore_PeeringWrite(t *testing.T) {
// Each case depends on the previous.
s := NewStateStore(nil)
testTime := time.Now()
type expectations struct {
peering *pbpeering.Peering
secrets *pbpeering.PeeringSecrets
err string
}
type testcase struct {
name string
input *pbpeering.PeeringWriteRequest
expectSecrets *pbpeering.PeeringSecrets
expectErr string
name string
input *pbpeering.PeeringWriteRequest
expect expectations
}
run := func(t *testing.T, tc testcase) {
err := s.PeeringWrite(10, tc.input)
if tc.expectErr != "" {
testutil.RequireErrorContains(t, err, tc.expectErr)
if tc.expect.err != "" {
testutil.RequireErrorContains(t, err, tc.expect.err)
return
}
require.NoError(t, err)
@ -1133,52 +1142,176 @@ func TestStore_PeeringWrite(t *testing.T) {
_, p, err := s.PeeringRead(nil, q)
require.NoError(t, err)
require.NotNil(t, p)
require.Equal(t, tc.input.Peering.State, p.State)
require.Equal(t, tc.input.Peering.Name, p.Name)
require.Equal(t, tc.expect.peering.State, p.State)
require.Equal(t, tc.expect.peering.Name, p.Name)
require.Equal(t, tc.expect.peering.Meta, p.Meta)
if tc.expect.peering.DeletedAt != nil {
require.Equal(t, tc.expect.peering.DeletedAt, p.DeletedAt)
}
secrets, err := s.PeeringSecretsRead(nil, tc.input.Peering.ID)
require.NoError(t, err)
prototest.AssertDeepEqual(t, tc.expectSecrets, secrets)
prototest.AssertDeepEqual(t, tc.expect.secrets, secrets)
}
tcs := []testcase{
{
name: "create baz",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_ESTABLISHING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
SecretsRequest: &pbpeering.SecretsWriteRequest{
PeerID: testBazPeerID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
EstablishmentSecret: testBazSecretID,
Request: &pbpeering.SecretsWriteRequest_Establish{
Establish: &pbpeering.SecretsWriteRequest_EstablishRequest{
ActiveStreamSecret: testBazSecretID,
},
},
},
},
expectSecrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testBazSecretID,
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_ESTABLISHING,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testBazSecretID,
},
},
},
},
{
name: "cannot change ID for baz",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: "123",
Name: "baz",
State: pbpeering.PeeringState_FAILING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
err: `A peering already exists with the name "baz" and a different ID`,
},
},
{
name: "cannot change dialer status for baz",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: "123",
Name: "baz",
State: pbpeering.PeeringState_FAILING,
// Excluding the peer server addresses leads to baz not being considered a dialer.
// PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
err: "Cannot switch peering dialing mode from true to false",
},
},
{
name: "update baz",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_FAILING,
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_FAILING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expectSecrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Establishment: &pbpeering.PeeringSecrets_Establishment{
SecretID: testBazSecretID,
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_FAILING,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testBazSecretID,
},
},
},
},
{
name: "if no state was included in request it is inherited from existing",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Send undefined state.
// State: pbpeering.PeeringState_FAILING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Previous failing state is picked up.
State: pbpeering.PeeringState_FAILING,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testBazSecretID,
},
},
},
},
{
name: "mark baz as terminated",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
},
// Secrets for baz should have been deleted
secrets: nil,
},
},
{
name: "cannot modify peering during no-op termination",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
PeerServerAddresses: []string{"localhost:8502"},
// Attempt to add metadata
Meta: map[string]string{"foo": "bar"},
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
// Meta should be unchanged.
Meta: nil,
},
},
},
@ -1186,42 +1319,104 @@ func TestStore_PeeringWrite(t *testing.T) {
name: "mark baz for deletion",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: []string{"localhost:8502"},
DeletedAt: structs.TimeToProto(testTime),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
DeletedAt: structs.TimeToProto(testTime),
},
secrets: nil,
},
},
{
name: "deleting a deleted peering is a no-op",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: []string{"localhost:8502"},
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
// Secrets for baz should have been deleted
expectSecrets: nil,
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Still marked as deleting at the original testTime
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(testTime),
},
// Secrets for baz should have been deleted
secrets: nil,
},
},
{
name: "terminating a peering marked for deletion is a no-op",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Still marked as deleting
State: pbpeering.PeeringState_DELETING,
},
// Secrets for baz should have been deleted
secrets: nil,
},
},
{
name: "cannot update peering marked for deletion",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
ID: testBazPeerID,
Name: "baz",
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
// Attempt to add metadata
Meta: map[string]string{
"source": "kubernetes",
},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expectErr: "cannot write to peering that is marked for deletion",
expect: expectations{
err: "cannot write to peering that is marked for deletion",
},
},
{
name: "cannot create peering marked for deletion",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo",
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
ID: testFooPeerID,
Name: "foo",
PeerServerAddresses: []string{"localhost:8502"},
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expectErr: "cannot create a new peering marked for deletion",
expect: expectations{
err: "cannot create a new peering marked for deletion",
},
},
}
for _, tc := range tcs {
@ -1246,6 +1441,7 @@ func TestStore_PeeringDelete(t *testing.T) {
Peering: &pbpeering.Peering{
ID: testFooPeerID,
Name: "foo",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
}))
@ -1759,6 +1955,7 @@ func TestStateStore_PeeringsForService(t *testing.T) {
copied := pbpeering.Peering{
ID: tp.peering.ID,
Name: tp.peering.Name,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
}
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &copied}))
@ -2201,6 +2398,7 @@ func TestStore_TrustBundleListByService(t *testing.T) {
Peering: &pbpeering.Peering{
ID: peerID1,
Name: "peer1",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
},
}))

View File

@ -60,6 +60,13 @@ func (s *Store) txnKVS(tx WriteTxn, idx uint64, op *structs.TxnKVOp) (structs.Tx
err = fmt.Errorf("key %q doesn't exist", op.DirEnt.Key)
}
case api.KVGetOrEmpty:
_, entry, err = kvsGetTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta)
if entry == nil && err == nil {
entry = &op.DirEnt
entry.Value = nil
}
case api.KVGetTree:
var entries structs.DirEntries
_, entries, err = s.kvsListTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta)
@ -95,7 +102,7 @@ func (s *Store) txnKVS(tx WriteTxn, idx uint64, op *structs.TxnKVOp) (structs.Tx
// value (we have to clone so we don't modify the entry being used by
// the state store).
if entry != nil {
if op.Verb == api.KVGet {
if op.Verb == api.KVGet || op.Verb == api.KVGetOrEmpty {
result := structs.TxnResult{KV: entry}
return structs.TxnResults{&result}, nil
}

View File

@ -577,6 +577,22 @@ func TestStateStore_Txn_KVS(t *testing.T) {
},
},
},
&structs.TxnOp{
KV: &structs.TxnKVOp{
Verb: api.KVGetOrEmpty,
DirEnt: structs.DirEntry{
Key: "foo/update",
},
},
},
&structs.TxnOp{
KV: &structs.TxnKVOp{
Verb: api.KVGetOrEmpty,
DirEnt: structs.DirEntry{
Key: "foo/not-exists",
},
},
},
&structs.TxnOp{
KV: &structs.TxnKVOp{
Verb: api.KVCheckIndex,
@ -702,6 +718,22 @@ func TestStateStore_Txn_KVS(t *testing.T) {
},
},
},
&structs.TxnResult{
KV: &structs.DirEntry{
Key: "foo/update",
Value: []byte("stale"),
RaftIndex: structs.RaftIndex{
CreateIndex: 5,
ModifyIndex: 5,
},
},
},
&structs.TxnResult{
KV: &structs.DirEntry{
Key: "foo/not-exists",
Value: nil,
},
},
&structs.TxnResult{
KV: &structs.DirEntry{

View File

@ -41,8 +41,8 @@ var Gauges = []prometheus.GaugeDefinition{
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.",
},
{
Name: []string{"consul", "kv", "entries"},
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.10.3.",
Name: []string{"consul", "state", "kv_entries"},
Help: "Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3.",
},
{
Name: []string{"consul", "state", "connect_instances"},

View File

@ -26,11 +26,12 @@ const (
type Server struct {
Config
Tracker *Tracker
}
type Config struct {
Backend Backend
Tracker *Tracker
GetStore func() StateStore
Logger hclog.Logger
ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error)
@ -42,8 +43,8 @@ type Config struct {
// outgoingHeartbeatInterval is how often we send a heartbeat.
outgoingHeartbeatInterval time.Duration
// IncomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
IncomingHeartbeatTimeout time.Duration
// incomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
incomingHeartbeatTimeout time.Duration
}
//go:generate mockery --name ACLResolver --inpackage
@ -53,7 +54,6 @@ type ACLResolver interface {
func NewServer(cfg Config) *Server {
requireNotNil(cfg.Backend, "Backend")
requireNotNil(cfg.Tracker, "Tracker")
requireNotNil(cfg.GetStore, "GetStore")
requireNotNil(cfg.Logger, "Logger")
// requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required
@ -63,11 +63,12 @@ func NewServer(cfg Config) *Server {
if cfg.outgoingHeartbeatInterval == 0 {
cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval
}
if cfg.IncomingHeartbeatTimeout == 0 {
cfg.IncomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
if cfg.incomingHeartbeatTimeout == 0 {
cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
}
return &Server{
Config: cfg,
Config: cfg,
Tracker: NewTracker(cfg.incomingHeartbeatTimeout),
}
}

View File

@ -406,7 +406,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
// incomingHeartbeatCtx will complete if incoming heartbeats time out.
incomingHeartbeatCtx, incomingHeartbeatCtxCancel :=
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
// NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're
// re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned
// value, not the current value.
@ -575,6 +575,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
status.TrackRecvResourceSuccess()
}
// We are replying ACK or NACK depending on whether we successfully processed the response.
if err := streamSend(reply); err != nil {
return fmt.Errorf("failed to send to stream: %v", err)
}
@ -605,7 +606,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
// They just can't trace the execution properly for some reason (possibly golang/go#29587).
//nolint:govet
incomingHeartbeatCtx, incomingHeartbeatCtxCancel =
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
}
case update := <-subCh:
@ -642,7 +643,6 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
if err := streamSend(replResp); err != nil {
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
}
status.TrackSendSuccess()
}
}
}

View File

@ -499,9 +499,8 @@ func TestStreamResources_Server_Terminate(t *testing.T) {
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
}
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
})
srv, store := newTestServer(t, nil)
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -552,9 +551,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
}
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
})
srv, store := newTestServer(t, nil)
srv.Tracker.setClock(it.Now)
// Set the initial roots and CA configuration.
_, rootA := writeInitialRootsAndCA(t, store)
@ -572,7 +570,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
})
})
var lastSendAck, lastSendSuccess time.Time
var lastSendAck time.Time
testutil.RunStep(t, "ack tracked as success", func(t *testing.T) {
ack := &pbpeerstream.ReplicationMessage{
@ -587,16 +585,13 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
},
}
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 2, 0, time.UTC)
lastSendSuccess = time.Date(2000, time.January, 1, 0, 0, 3, 0, time.UTC)
lastSendAck = it.FutureNow(1)
err := client.Send(ack)
require.NoError(t, err)
expect := Status{
Connected: true,
LastAck: lastSendAck,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
Connected: true,
LastAck: lastSendAck,
}
retry.Run(t, func(r *retry.R) {
@ -624,20 +619,17 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
},
}
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 4, 0, time.UTC)
lastNack = time.Date(2000, time.January, 1, 0, 0, 5, 0, time.UTC)
lastNack = it.FutureNow(1)
err := client.Send(nack)
require.NoError(t, err)
lastNackMsg = "client peer was unable to apply resource: bad bad not good"
expect := Status{
Connected: true,
LastAck: lastSendAck,
LastNack: lastNack,
LastNackMessage: lastNackMsg,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
Connected: true,
LastAck: lastSendAck,
LastNack: lastNack,
LastNackMessage: lastNackMsg,
}
retry.Run(t, func(r *retry.R) {
@ -707,8 +699,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
ImportedServices: map[string]struct{}{
api.String(): {},
},
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
}
retry.Run(t, func(r *retry.R) {
@ -770,8 +760,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
ImportedServices: map[string]struct{}{
api.String(): {},
},
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
}
retry.Run(t, func(r *retry.R) {
@ -805,8 +793,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
ImportedServices: map[string]struct{}{
api.String(): {},
},
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
}
retry.Run(t, func(r *retry.R) {
@ -839,8 +825,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
ImportedServices: map[string]struct{}{
api.String(): {},
},
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
LastSendSuccess: lastSendSuccess,
}
retry.Run(t, func(r *retry.R) {
@ -1142,9 +1126,9 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) {
}
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.IncomingHeartbeatTimeout = 5 * time.Millisecond
c.incomingHeartbeatTimeout = 5 * time.Millisecond
})
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -1190,9 +1174,9 @@ func TestStreamResources_Server_SendsHeartbeats(t *testing.T) {
outgoingHeartbeatInterval := 5 * time.Millisecond
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.outgoingHeartbeatInterval = outgoingHeartbeatInterval
})
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -1249,9 +1233,9 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) {
incomingHeartbeatTimeout := 10 * time.Millisecond
srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.IncomingHeartbeatTimeout = incomingHeartbeatTimeout
c.incomingHeartbeatTimeout = incomingHeartbeatTimeout
})
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -2760,7 +2744,6 @@ func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.
store: store,
pub: publisher,
},
Tracker: NewTracker(),
GetStore: func() StateStore { return store },
Logger: testutil.Logger(t),
Datacenter: "dc1",

View File

@ -14,20 +14,27 @@ type Tracker struct {
mu sync.RWMutex
streams map[string]*MutableStatus
// heartbeatTimeout is the max duration a connection is allowed to be
// disconnected before the stream health is reported as non-healthy
heartbeatTimeout time.Duration
// timeNow is a shim for testing.
timeNow func() time.Time
heartbeatTimeout time.Duration
}
func NewTracker() *Tracker {
func NewTracker(heartbeatTimeout time.Duration) *Tracker {
if heartbeatTimeout == 0 {
heartbeatTimeout = defaultIncomingHeartbeatTimeout
}
return &Tracker{
streams: make(map[string]*MutableStatus),
timeNow: time.Now,
streams: make(map[string]*MutableStatus),
timeNow: time.Now,
heartbeatTimeout: heartbeatTimeout,
}
}
func (t *Tracker) SetClock(clock func() time.Time) {
// setClock is used for debugging purposes only.
func (t *Tracker) setClock(clock func() time.Time) {
if clock == nil {
t.timeNow = time.Now
} else {
@ -35,12 +42,6 @@ func (t *Tracker) SetClock(clock func() time.Time) {
}
}
func (t *Tracker) SetHeartbeatTimeout(heartbeatTimeout time.Duration) {
t.mu.Lock()
defer t.mu.Unlock()
t.heartbeatTimeout = heartbeatTimeout
}
// Register a stream for a given peer but do not mark it as connected.
func (t *Tracker) Register(id string) (*MutableStatus, error) {
t.mu.Lock()
@ -52,7 +53,7 @@ func (t *Tracker) Register(id string) (*MutableStatus, error) {
func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) {
status, ok := t.streams[id]
if !ok {
status = newMutableStatus(t.timeNow, t.heartbeatTimeout, initAsConnected)
status = newMutableStatus(t.timeNow, initAsConnected)
t.streams[id] = status
return status, true, nil
}
@ -136,6 +137,39 @@ func (t *Tracker) DeleteStatus(id string) {
delete(t.streams, id)
}
// IsHealthy is a calculates the health of a peering status.
// We define a peering as unhealthy if its status has been in the following
// states for longer than the configured incomingHeartbeatTimeout.
// - If it is disconnected
// - If the last received Nack is newer than last received Ack
// - If the last received error is newer than last received success
//
// If none of these conditions apply, we call the peering healthy.
func (t *Tracker) IsHealthy(s Status) bool {
// If stream is in a disconnected state for longer than the configured
// heartbeat timeout, report as unhealthy.
if !s.DisconnectTime.IsZero() &&
t.timeNow().Sub(s.DisconnectTime) > t.heartbeatTimeout {
return false
}
// If last Nack is after last Ack, it means the peer is unable to
// handle our replication message.
if s.LastNack.After(s.LastAck) &&
t.timeNow().Sub(s.LastAck) > t.heartbeatTimeout {
return false
}
// If last recv error is newer than last recv success, we were unable
// to handle the peer's replication message.
if s.LastRecvError.After(s.LastRecvResourceSuccess) &&
t.timeNow().Sub(s.LastRecvError) > t.heartbeatTimeout {
return false
}
return true
}
type MutableStatus struct {
mu sync.RWMutex
@ -152,8 +186,6 @@ type MutableStatus struct {
// Status contains information about the replication stream to a peer cluster.
// TODO(peering): There's a lot of fields here...
type Status struct {
heartbeatTimeout time.Duration
// Connected is true when there is an open stream for the peer.
Connected bool
@ -182,9 +214,6 @@ type Status struct {
// LastSendErrorMessage tracks the last error message when sending into the stream.
LastSendErrorMessage string
// LastSendSuccess tracks the time of the last success response sent into the stream.
LastSendSuccess time.Time
// LastRecvHeartbeat tracks when we last received a heartbeat from our peer.
LastRecvHeartbeat time.Time
@ -214,40 +243,11 @@ func (s *Status) GetExportedServicesCount() uint64 {
return uint64(len(s.ExportedServices))
}
// IsHealthy is a convenience func that returns true/ false for a peering status.
// We define a peering as unhealthy if its status satisfies one of the following:
// - If heartbeat hasn't been received within the IncomingHeartbeatTimeout
// - If the last sent error is newer than last sent success
// - If the last received error is newer than last received success
// If none of these conditions apply, we call the peering healthy.
func (s *Status) IsHealthy() bool {
if time.Now().Sub(s.LastRecvHeartbeat) > s.heartbeatTimeout {
// 1. If heartbeat hasn't been received for a while - report unhealthy
return false
}
if s.LastSendError.After(s.LastSendSuccess) {
// 2. If last sent error is newer than last sent success - report unhealthy
return false
}
if s.LastRecvError.After(s.LastRecvResourceSuccess) {
// 3. If last recv error is newer than last recv success - report unhealthy
return false
}
return true
}
func newMutableStatus(now func() time.Time, heartbeatTimeout time.Duration, connected bool) *MutableStatus {
if heartbeatTimeout.Microseconds() == 0 {
heartbeatTimeout = defaultIncomingHeartbeatTimeout
}
func newMutableStatus(now func() time.Time, connected bool) *MutableStatus {
return &MutableStatus{
Status: Status{
Connected: connected,
heartbeatTimeout: heartbeatTimeout,
NeverConnected: !connected,
Connected: connected,
NeverConnected: !connected,
},
timeNow: now,
doneCh: make(chan struct{}),
@ -271,12 +271,6 @@ func (s *MutableStatus) TrackSendError(error string) {
s.mu.Unlock()
}
func (s *MutableStatus) TrackSendSuccess() {
s.mu.Lock()
s.LastSendSuccess = s.timeNow().UTC()
s.mu.Unlock()
}
// TrackRecvResourceSuccess tracks receiving a replicated resource.
func (s *MutableStatus) TrackRecvResourceSuccess() {
s.mu.Lock()

View File

@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/sdk/testutil"
@ -14,95 +15,107 @@ const (
aPeerID = "63b60245-c475-426b-b314-4588d210859d"
)
func TestStatus_IsHealthy(t *testing.T) {
func TestTracker_IsHealthy(t *testing.T) {
type testcase struct {
name string
dontConnect bool
modifierFunc func(status *MutableStatus)
expectedVal bool
heartbeatTimeout time.Duration
name string
tracker *Tracker
modifierFunc func(status *MutableStatus)
expectedVal bool
}
tcs := []testcase{
{
name: "never connected, unhealthy",
expectedVal: false,
dontConnect: true,
},
{
name: "no heartbeat, unhealthy",
expectedVal: false,
},
{
name: "heartbeat is not received, unhealthy",
expectedVal: false,
name: "disconnect time within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
// set heartbeat
status.LastRecvHeartbeat = time.Now().Add(-1 * time.Second)
},
heartbeatTimeout: 1 * time.Second,
},
{
name: "send error before send success",
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
// set heartbeat
status.LastRecvHeartbeat = time.Now()
status.LastSendSuccess = time.Now()
status.LastSendError = time.Now()
status.DisconnectTime = time.Now()
},
},
{
name: "received error before received success",
name: "disconnect time past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
// set heartbeat
status.LastRecvHeartbeat = time.Now()
status.LastRecvResourceSuccess = time.Now()
status.LastRecvError = time.Now()
status.DisconnectTime = time.Now().Add(-1 * time.Minute)
},
},
{
name: "receive error before receive success within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "receive error before receive success within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "receive error before receive success past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
now := time.Now().Add(-2 * time.Second)
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "nack before ack within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastAck = now
status.LastNack = now.Add(1 * time.Second)
},
},
{
name: "nack before ack past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
now := time.Now().Add(-2 * time.Second)
status.LastAck = now
status.LastNack = now.Add(1 * time.Second)
},
},
{
name: "healthy",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
// set heartbeat
status.LastRecvHeartbeat = time.Now()
},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
tracker := NewTracker()
if tc.heartbeatTimeout.Microseconds() != 0 {
tracker.SetHeartbeatTimeout(tc.heartbeatTimeout)
tracker := tc.tracker
st, err := tracker.Connected(aPeerID)
require.NoError(t, err)
require.True(t, st.Connected)
if tc.modifierFunc != nil {
tc.modifierFunc(st)
}
if !tc.dontConnect {
st, err := tracker.Connected(aPeerID)
require.NoError(t, err)
require.True(t, st.Connected)
if tc.modifierFunc != nil {
tc.modifierFunc(st)
}
require.Equal(t, tc.expectedVal, st.IsHealthy())
} else {
st, found := tracker.StreamStatus(aPeerID)
require.False(t, found)
require.Equal(t, tc.expectedVal, st.IsHealthy())
}
assert.Equal(t, tc.expectedVal, tracker.IsHealthy(st.GetStatus()))
})
}
}
func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
tracker := NewTracker()
tracker := NewTracker(defaultIncomingHeartbeatTimeout)
peerID := "63b60245-c475-426b-b314-4588d210859d"
it := incrementalTime{
@ -120,8 +133,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
require.NoError(t, err)
expect := Status{
Connected: true,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
Connected: true,
}
status, ok := tracker.StreamStatus(peerID)
@ -147,9 +159,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC()
expect := Status{
Connected: true,
LastAck: lastSuccess,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
Connected: true,
LastAck: lastSuccess,
}
require.Equal(t, expect, status)
})
@ -159,10 +170,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
sequence++
expect := Status{
Connected: false,
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
LastAck: lastSuccess,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
Connected: false,
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
LastAck: lastSuccess,
}
status, ok := tracker.StreamStatus(peerID)
require.True(t, ok)
@ -174,9 +184,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
require.NoError(t, err)
expect := Status{
Connected: true,
LastAck: lastSuccess,
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
Connected: true,
LastAck: lastSuccess,
// DisconnectTime gets cleared on re-connect.
}
@ -203,7 +212,7 @@ func TestTracker_connectedStreams(t *testing.T) {
}
run := func(t *testing.T, tc testCase) {
tracker := NewTracker()
tracker := NewTracker(defaultIncomingHeartbeatTimeout)
if tc.setup != nil {
tc.setup(t, tracker)
}

View File

@ -280,16 +280,6 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
}
snap.Roots = roots
case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
if resp.Bundle != nil {
snap.ConnectProxy.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
}
case u.CorrelationID == peeringTrustBundlesWatchID:
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
if !ok {
@ -369,6 +359,17 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
// Clean up data
//
peeredChainTargets := make(map[UpstreamID]struct{})
for _, discoChain := range snap.ConnectProxy.DiscoveryChain {
for _, target := range discoChain.Targets {
if target.Peer == "" {
continue
}
uid := NewUpstreamIDFromTargetID(target.ID)
peeredChainTargets[uid] = struct{}{}
}
}
validPeerNames := make(map[string]struct{})
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
@ -383,6 +384,11 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
validPeerNames[uid.Peer] = struct{}{}
return true
}
// Peered upstream came from a discovery chain target
if _, ok := peeredChainTargets[uid]; ok {
validPeerNames[uid.Peer] = struct{}{}
return true
}
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid)
return true
})
@ -463,8 +469,14 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
continue
}
if _, ok := seenUpstreams[uid]; !ok {
for _, cancelFn := range targets {
for targetID, cancelFn := range targets {
cancelFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
}
delete(snap.ConnectProxy.WatchedUpstreams, uid)
}

View File

@ -5,7 +5,9 @@ import (
"fmt"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/proxycfg/internal/watch"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
type handlerIngressGateway struct {
@ -66,6 +68,9 @@ func (s *handlerIngressGateway) initialize(ctx context.Context) (ConfigSnapshot,
snap.IngressGateway.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc)
snap.IngressGateway.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
snap.IngressGateway.Listeners = make(map[IngressListenerKey]structs.IngressListener)
snap.IngressGateway.UpstreamPeerTrustBundles = watch.NewMap[string, *pbpeering.PeeringTrustBundle]()
snap.IngressGateway.PeerUpstreamEndpoints = watch.NewMap[UpstreamID, structs.CheckServiceNodes]()
snap.IngressGateway.PeerUpstreamEndpointsUseHostnames = make(map[UpstreamID]struct{})
return snap, nil
}
@ -152,6 +157,12 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent,
delete(snap.IngressGateway.WatchedUpstreams[uid], targetID)
delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID)
cancelUpstreamFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.IngressGateway.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.IngressGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
}
cancelFn()

View File

@ -814,6 +814,18 @@ func (s *ConfigSnapshot) MeshConfigTLSOutgoing() *structs.MeshDirectionalTLSConf
return mesh.TLS.Outgoing
}
func (s *ConfigSnapshot) ToConfigSnapshotUpstreams() (*ConfigSnapshotUpstreams, error) {
switch s.Kind {
case structs.ServiceKindConnectProxy:
return &s.ConnectProxy.ConfigSnapshotUpstreams, nil
case structs.ServiceKindIngressGateway:
return &s.IngressGateway.ConfigSnapshotUpstreams, nil
default:
// This is a coherence check and should never fail
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", s.Kind)
}
}
func (u *ConfigSnapshotUpstreams) UpstreamPeerMeta(uid UpstreamID) structs.PeeringServiceMeta {
nodes, _ := u.PeerUpstreamEndpoints.Get(uid)
if len(nodes) == 0 {

View File

@ -493,6 +493,11 @@ func TestState_WatchesAndUpdates(t *testing.T) {
Mode: structs.MeshGatewayModeNone,
},
},
structs.Upstream{
DestinationType: structs.UpstreamDestTypeService,
DestinationName: "api-failover-to-peer",
LocalBindPort: 10007,
},
structs.Upstream{
DestinationType: structs.UpstreamDestTypeService,
DestinationName: "api-dc2",
@ -552,6 +557,16 @@ func TestState_WatchesAndUpdates(t *testing.T) {
Mode: structs.MeshGatewayModeNone,
},
}),
fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
Name: "api-failover-to-peer",
EvaluateInDatacenter: "dc1",
EvaluateInNamespace: "default",
EvaluateInPartition: "default",
Datacenter: "dc1",
OverrideMeshGateway: structs.MeshGatewayConfig{
Mode: meshGatewayProxyConfigValue,
},
}),
fmt.Sprintf("discovery-chain:%s-dc2", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
Name: "api-dc2",
EvaluateInDatacenter: "dc1",
@ -639,6 +654,26 @@ func TestState_WatchesAndUpdates(t *testing.T) {
},
Err: nil,
},
{
CorrelationID: fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()),
Result: &structs.DiscoveryChainResponse{
Chain: discoverychain.TestCompileConfigEntries(t, "api-failover-to-peer", "default", "default", "dc1", "trustdomain.consul",
func(req *discoverychain.CompileRequest) {
req.OverrideMeshGateway.Mode = meshGatewayProxyConfigValue
}, &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "api-failover-to-peer",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Targets: []structs.ServiceResolverFailoverTarget{
{Peer: "cluster-01"},
},
},
},
}),
},
Err: nil,
},
},
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
require.True(t, snap.Valid())
@ -646,15 +681,18 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Equal(t, indexedRoots, snap.Roots)
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams)
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams)
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len())
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len())
require.True(t, snap.ConnectProxy.IntentionsSet)
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
require.True(t, snap.ConnectProxy.MeshConfigSet)
@ -667,6 +705,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
fmt.Sprintf("upstream-target:api-failover-remote.default.default.dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-remote", "", "dc2", true),
fmt.Sprintf("upstream-target:api-failover-local.default.default.dc2:%s-failover-local?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-local", "", "dc2", true),
fmt.Sprintf("upstream-target:api-failover-direct.default.default.dc2:%s-failover-direct?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-direct", "", "dc2", true),
upstreamPeerWatchIDPrefix + fmt.Sprintf("%s-failover-to-peer?peer=cluster-01", apiUID.String()): genVerifyServiceSpecificPeeredRequest("api-failover-to-peer", "", "", "cluster-01", true),
fmt.Sprintf("mesh-gateway:dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc2"),
fmt.Sprintf("mesh-gateway:dc1:%s-failover-local?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc1"),
},
@ -676,15 +715,18 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Equal(t, indexedRoots, snap.Roots)
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams)
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams)
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len())
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len())
require.True(t, snap.ConnectProxy.IntentionsSet)
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
},

View File

@ -280,6 +280,31 @@ func TestUpstreamNodesDC2(t testing.T) structs.CheckServiceNodes {
}
}
func TestUpstreamNodesPeerCluster01(t testing.T) structs.CheckServiceNodes {
peer := "cluster-01"
service := structs.TestNodeServiceWithNameInPeer(t, "web", peer)
return structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{
ID: "test1",
Node: "test1",
Address: "10.40.1.1",
PeerName: peer,
},
Service: service,
},
structs.CheckServiceNode{
Node: &structs.Node{
ID: "test2",
Node: "test2",
Address: "10.40.1.2",
PeerName: peer,
},
Service: service,
},
}
}
func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServiceNodes {
return structs.CheckServiceNodes{
structs.CheckServiceNode{

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/discoverychain"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
func setupTestVariationConfigEntriesAndSnapshot(
@ -72,6 +73,24 @@ func setupTestVariationConfigEntriesAndSnapshot(
Nodes: TestGatewayNodesDC2(t),
},
})
case "failover-to-cluster-peer":
events = append(events, UpdateEvent{
CorrelationID: "peer-trust-bundle:cluster-01",
Result: &pbpeering.TrustBundleReadResponse{
Bundle: &pbpeering.PeeringTrustBundle{
PeerName: "peer1",
TrustDomain: "peer1.domain",
ExportedPartition: "peer1ap",
RootPEMs: []string{"peer1-root-1"},
},
},
})
events = append(events, UpdateEvent{
CorrelationID: "upstream-peer:db?peer=cluster-01",
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestUpstreamNodesPeerCluster01(t),
},
})
case "failover-through-double-remote-gateway-triggered":
events = append(events, UpdateEvent{
CorrelationID: "upstream-target:db.default.default.dc1:" + dbUID.String(),
@ -255,6 +274,21 @@ func setupTestVariationDiscoveryChain(
},
},
)
case "failover-to-cluster-peer":
entries = append(entries,
&structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "db",
ConnectTimeout: 33 * time.Second,
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Targets: []structs.ServiceResolverFailoverTarget{
{Peer: "cluster-01"},
},
},
},
},
)
case "failover-through-double-remote-gateway-triggered":
fallthrough
case "failover-through-double-remote-gateway":

View File

@ -9,7 +9,9 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/consul/acl"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
type handlerUpstreams struct {
@ -21,9 +23,10 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
return fmt.Errorf("error filling agent cache: %v", u.Err)
}
upstreamsSnapshot := &snap.ConnectProxy.ConfigSnapshotUpstreams
if snap.Kind == structs.ServiceKindIngressGateway {
upstreamsSnapshot = &snap.IngressGateway.ConfigSnapshotUpstreams
upstreamsSnapshot, err := snap.ToConfigSnapshotUpstreams()
if err != nil {
return err
}
switch {
@ -98,19 +101,16 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
uid := UpstreamIDFromString(uidString)
filteredNodes := hostnameEndpoints(
s.logger,
GatewayKey{ /*empty so it never matches*/ },
resp.Nodes,
)
if len(filteredNodes) > 0 {
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set {
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
}
} else {
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, resp.Nodes); set {
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
}
s.setPeerEndpoints(upstreamsSnapshot, uid, resp.Nodes)
case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
if resp.Bundle != nil {
upstreamsSnapshot.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
}
case strings.HasPrefix(u.CorrelationID, "upstream-target:"):
@ -216,6 +216,23 @@ func removeColonPrefix(s string) (string, string, bool) {
return s[0:idx], s[idx+1:], true
}
func (s *handlerUpstreams) setPeerEndpoints(upstreamsSnapshot *ConfigSnapshotUpstreams, uid UpstreamID, nodes structs.CheckServiceNodes) {
filteredNodes := hostnameEndpoints(
s.logger,
GatewayKey{ /*empty so it never matches*/ },
nodes,
)
if len(filteredNodes) > 0 {
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set {
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
}
} else {
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, nodes); set {
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
}
}
}
func (s *handlerUpstreams) resetWatchesFromChain(
ctx context.Context,
uid UpstreamID,
@ -255,6 +272,12 @@ func (s *handlerUpstreams) resetWatchesFromChain(
delete(snap.WatchedUpstreams[uid], targetID)
delete(snap.WatchedUpstreamEndpoints[uid], targetID)
cancelFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
}
var (
@ -274,6 +297,7 @@ func (s *handlerUpstreams) resetWatchesFromChain(
service: target.Service,
filter: target.Subset.Filter,
datacenter: target.Datacenter,
peer: target.Peer,
entMeta: target.GetEnterpriseMetadata(),
}
err := s.watchUpstreamTarget(ctx, snap, opts)
@ -384,6 +408,7 @@ type targetWatchOpts struct {
service string
filter string
datacenter string
peer string
entMeta *acl.EnterpriseMeta
}
@ -397,11 +422,17 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
var finalMeta acl.EnterpriseMeta
finalMeta.Merge(opts.entMeta)
correlationID := "upstream-target:" + opts.chainID + ":" + opts.upstreamID.String()
uid := opts.upstreamID
correlationID := "upstream-target:" + opts.chainID + ":" + uid.String()
if opts.peer != "" {
uid = NewUpstreamIDFromTargetID(opts.chainID)
correlationID = upstreamPeerWatchIDPrefix + uid.String()
}
ctx, cancel := context.WithCancel(ctx)
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{
PeerName: opts.upstreamID.Peer,
PeerName: opts.peer,
Datacenter: opts.datacenter,
QueryOptions: structs.QueryOptions{
Token: s.token,
@ -422,6 +453,31 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
}
snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel
if uid.Peer == "" {
return nil
}
if ok := snap.PeerUpstreamEndpoints.IsWatched(uid); !ok {
snap.PeerUpstreamEndpoints.InitWatch(uid, cancel)
}
// Check whether a watch for this peer exists to avoid duplicates.
if ok := snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok {
peerCtx, cancel := context.WithCancel(ctx)
if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{
Request: &pbpeering.TrustBundleReadRequest{
Name: uid.Peer,
Partition: uid.PartitionOrDefault(),
},
QueryOptions: structs.QueryOptions{Token: s.token},
}, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil {
cancel()
return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err)
}
snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel)
}
return nil
}

View File

@ -726,11 +726,12 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete
return nil, err
}
if !existing.IsActive() {
if existing == nil || existing.State == pbpeering.PeeringState_DELETING {
// Return early when the Peering doesn't exist or is already marked for deletion.
// We don't return nil because the pb will fail to marshal.
return &pbpeering.PeeringDeleteResponse{}, nil
}
// We are using a write request due to needing to perform a deferred deletion.
// The peering gets marked for deletion by setting the DeletedAt field,
// and a leader routine will handle deleting the peering.

View File

@ -621,38 +621,50 @@ func TestPeeringService_Read_ACLEnforcement(t *testing.T) {
}
func TestPeeringService_Delete(t *testing.T) {
// TODO(peering): see note on newTestServer, refactor to not use this
s := newTestServer(t, nil)
p := &pbpeering.Peering{
ID: testUUID(t),
Name: "foo",
State: pbpeering.PeeringState_ESTABLISHING,
PeerCAPems: nil,
PeerServerName: "test",
PeerServerAddresses: []string{"addr1"},
tt := map[string]pbpeering.PeeringState{
"active peering": pbpeering.PeeringState_ACTIVE,
"terminated peering": pbpeering.PeeringState_TERMINATED,
}
err := s.Server.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{Peering: p})
require.NoError(t, err)
require.Nil(t, p.DeletedAt)
require.True(t, p.IsActive())
client := pbpeering.NewPeeringServiceClient(s.ClientConn(t))
for name, overrideState := range tt {
t.Run(name, func(t *testing.T) {
// TODO(peering): see note on newTestServer, refactor to not use this
s := newTestServer(t, nil)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel)
// A pointer is kept for the following peering so that we can modify the object without another PeeringWrite.
p := &pbpeering.Peering{
ID: testUUID(t),
Name: "foo",
PeerCAPems: nil,
PeerServerName: "test",
PeerServerAddresses: []string{"addr1"},
}
err := s.Server.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{Peering: p})
require.NoError(t, err)
require.Nil(t, p.DeletedAt)
require.True(t, p.IsActive())
_, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"})
require.NoError(t, err)
// Overwrite the peering state to simulate deleting from a non-initial state.
p.State = overrideState
retry.Run(t, func(r *retry.R) {
_, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"})
require.NoError(r, err)
client := pbpeering.NewPeeringServiceClient(s.ClientConn(t))
// Initially the peering will be marked for deletion but eventually the leader
// routine will clean it up.
require.Nil(r, resp)
})
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel)
_, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
_, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"})
require.NoError(r, err)
// Initially the peering will be marked for deletion but eventually the leader
// routine will clean it up.
require.Nil(r, resp)
})
})
}
}
func TestPeeringService_Delete_ACLEnforcement(t *testing.T) {

View File

@ -127,9 +127,20 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
if err != nil {
return nil, nil, "", err
}
// Setup default check if none given
// Setup default check if none given.
if len(checks) < 1 {
checks = sidecarDefaultChecks(ns.ID, sidecar.Proxy.LocalServiceAddress, sidecar.Port)
// The check should use the sidecar's address because it makes a request to the sidecar.
// If the sidecar's address is empty, we fall back to the address of the local service, as set in
// sidecar.Proxy.LocalServiceAddress, in the hope that the proxy is also accessible on that address
// (which in most cases it is because it's running as a sidecar in the same network).
// We could instead fall back to the address of the service as set by (ns.Address), but I've kept it using
// sidecar.Proxy.LocalServiceAddress so as to not change things too much in the
// process of fixing #14433.
checkAddress := sidecar.Address
if checkAddress == "" {
checkAddress = sidecar.Proxy.LocalServiceAddress
}
checks = sidecarDefaultChecks(ns.ID, checkAddress, sidecar.Port)
}
return sidecar, checks, token, nil
@ -202,14 +213,11 @@ func (a *Agent) sidecarPortFromServiceID(sidecarCompoundServiceID structs.Servic
return sidecarPort, nil
}
func sidecarDefaultChecks(serviceID string, localServiceAddress string, port int) []*structs.CheckType {
// Setup default check if none given
func sidecarDefaultChecks(serviceID string, address string, port int) []*structs.CheckType {
return []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
// Default to localhost rather than agent/service public IP. The checks
// can always be overridden if a non-loopback IP is needed.
TCP: ipaddr.FormatAddressPort(localServiceAddress, port),
Name: "Connect Sidecar Listening",
TCP: ipaddr.FormatAddressPort(address, port),
Interval: 10 * time.Second,
},
{

View File

@ -215,6 +215,141 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
token: "foo",
wantErr: "reserved for internal use",
},
{
name: "uses proxy address for check",
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{
Address: "123.123.123.123",
Proxy: &structs.ConnectProxyConfig{
LocalServiceAddress: "255.255.255.255",
},
},
},
Address: "255.255.255.255",
},
token: "foo",
wantNS: &structs.NodeService{
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Address: "123.123.123.123",
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web1",
LocalServiceAddress: "255.255.255.255",
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "123.123.123.123:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
},
{
name: "uses proxy.local_service_address for check if proxy address is empty",
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{
Address: "", // Proxy address empty.
Proxy: &structs.ConnectProxyConfig{
LocalServiceAddress: "1.2.3.4",
},
},
},
Address: "", // Service address empty.
},
token: "foo",
wantNS: &structs.NodeService{
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Address: "",
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web1",
LocalServiceAddress: "1.2.3.4",
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "1.2.3.4:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
},
{
name: "uses 127.0.0.1 for check if proxy and proxy.local_service_address are empty",
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{
Address: "",
Proxy: &structs.ConnectProxyConfig{
LocalServiceAddress: "",
},
},
},
Address: "",
},
token: "foo",
wantNS: &structs.NodeService{
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Address: "",
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web1",
LocalServiceAddress: "127.0.0.1",
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "127.0.0.1:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@ -3,12 +3,13 @@ package structs
import (
"errors"
"fmt"
"github.com/miekg/dns"
"net"
"strconv"
"strings"
"time"
"github.com/miekg/dns"
"github.com/hashicorp/go-multierror"
"github.com/mitchellh/hashstructure"
"github.com/mitchellh/mapstructure"
@ -362,6 +363,13 @@ func (e *ProxyConfigEntry) Normalize() error {
}
e.Kind = ProxyDefaults
// proxy default config only accepts global configs
// this check is replicated in normalize() and validate(),
// since validate is not called by all the endpoints (e.g., delete)
if e.Name != "" && e.Name != ProxyConfigGlobal {
return fmt.Errorf("invalid name (%q), only %q is supported", e.Name, ProxyConfigGlobal)
}
e.Name = ProxyConfigGlobal
e.EnterpriseMeta.Normalize()
@ -961,6 +969,11 @@ type PassiveHealthCheck struct {
// MaxFailures is the count of consecutive failures that results in a host
// being removed from the pool.
MaxFailures uint32 `json:",omitempty" alias:"max_failures"`
// EnforcingConsecutive5xx is the % chance that a host will be actually ejected
// when an outlier status is detected through consecutive 5xx.
// This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.
EnforcingConsecutive5xx *uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"`
}
func (chk *PassiveHealthCheck) Clone() *PassiveHealthCheck {

View File

@ -2754,8 +2754,9 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
MaxConcurrentRequests: intPointer(12),
},
"passive_health_check": &PassiveHealthCheck{
MaxFailures: 13,
Interval: 14 * time.Second,
MaxFailures: 13,
Interval: 14 * time.Second,
EnforcingConsecutive5xx: uintPointer(80),
},
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
},
@ -2770,8 +2771,9 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
MaxConcurrentRequests: intPointer(12),
},
"passive_health_check": &PassiveHealthCheck{
MaxFailures: 13,
Interval: 14 * time.Second,
MaxFailures: 13,
Interval: 14 * time.Second,
EnforcingConsecutive5xx: uintPointer(80),
},
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
},
@ -2944,6 +2946,28 @@ func TestParseUpstreamConfig(t *testing.T) {
}
}
func TestProxyConfigEntry(t *testing.T) {
cases := map[string]configEntryTestcase{
"proxy config name provided is not global": {
entry: &ProxyConfigEntry{
Name: "foo",
},
normalizeErr: `invalid name ("foo"), only "global" is supported`,
},
"proxy config has no name": {
entry: &ProxyConfigEntry{
Name: "",
},
expected: &ProxyConfigEntry{
Name: ProxyConfigGlobal,
Kind: ProxyDefaults,
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
},
},
}
testConfigEntryNormalizeAndValidate(t, cases)
}
func requireContainsLower(t *testing.T, haystack, needle string) {
t.Helper()
require.Contains(t, strings.ToLower(haystack), strings.ToLower(needle))
@ -3046,3 +3070,7 @@ func testConfigEntryNormalizeAndValidate(t *testing.T, cases map[string]configEn
})
}
}
func uintPointer(v uint32) *uint32 {
return &v
}

View File

@ -53,6 +53,28 @@ func TestNodeServiceWithName(t testing.T, name string) *NodeService {
}
}
const peerTrustDomain = "1c053652-8512-4373-90cf-5a7f6263a994.consul"
func TestNodeServiceWithNameInPeer(t testing.T, name string, peer string) *NodeService {
service := "payments"
return &NodeService{
Kind: ServiceKindTypical,
Service: name,
Port: 8080,
Connect: ServiceConnect{
PeerMeta: &PeeringServiceMeta{
SNI: []string{
service + ".default.default." + peer + ".external." + peerTrustDomain,
},
SpiffeID: []string{
"spiffe://" + peerTrustDomain + "/ns/default/dc/" + peer + "-dc/svc/" + service,
},
Protocol: "tcp",
},
},
}
}
// TestNodeServiceProxy returns a *NodeService representing a valid
// Connect proxy.
func TestNodeServiceProxy(t testing.T) *NodeService {

View File

@ -26,7 +26,7 @@ func TestUpstreams(t testing.T) Upstreams {
Config: map[string]interface{}{
// Float because this is how it is decoded by JSON decoder so this
// enables the value returned to be compared directly to a decoded JSON
// response without spurios type loss.
// response without spurious type loss.
"connect_timeout_ms": float64(1000),
},
},

View File

@ -185,6 +185,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
Address: node.Address,
Datacenter: node.Datacenter,
TaggedAddresses: node.TaggedAddresses,
PeerName: node.PeerName,
Meta: node.Meta,
RaftIndex: structs.RaftIndex{
ModifyIndex: node.ModifyIndex,
@ -207,6 +208,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
Service: structs.NodeService{
ID: svc.ID,
Service: svc.Service,
Kind: structs.ServiceKind(svc.Kind),
Tags: svc.Tags,
Address: svc.Address,
Meta: svc.Meta,
@ -226,6 +228,39 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
},
},
}
if svc.Proxy != nil {
out.Service.Service.Proxy = structs.ConnectProxyConfig{}
t := &out.Service.Service.Proxy
if svc.Proxy.DestinationServiceName != "" {
t.DestinationServiceName = svc.Proxy.DestinationServiceName
}
if svc.Proxy.DestinationServiceID != "" {
t.DestinationServiceID = svc.Proxy.DestinationServiceID
}
if svc.Proxy.LocalServiceAddress != "" {
t.LocalServiceAddress = svc.Proxy.LocalServiceAddress
}
if svc.Proxy.LocalServicePort != 0 {
t.LocalServicePort = svc.Proxy.LocalServicePort
}
if svc.Proxy.LocalServiceSocketPath != "" {
t.LocalServiceSocketPath = svc.Proxy.LocalServiceSocketPath
}
if svc.Proxy.MeshGateway.Mode != "" {
t.MeshGateway.Mode = structs.MeshGatewayMode(svc.Proxy.MeshGateway.Mode)
}
if svc.Proxy.TransparentProxy != nil {
if svc.Proxy.TransparentProxy.DialedDirectly {
t.TransparentProxy.DialedDirectly = svc.Proxy.TransparentProxy.DialedDirectly
}
if svc.Proxy.TransparentProxy.OutboundListenerPort != 0 {
t.TransparentProxy.OutboundListenerPort = svc.Proxy.TransparentProxy.OutboundListenerPort
}
}
}
opsRPC = append(opsRPC, out)
case in.Check != nil:
@ -265,6 +300,8 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
ServiceID: check.ServiceID,
ServiceName: check.ServiceName,
ServiceTags: check.ServiceTags,
PeerName: check.PeerName,
ExposedPort: check.ExposedPort,
Definition: structs.HealthCheckDefinition{
HTTP: check.Definition.HTTP,
TLSServerName: check.Definition.TLSServerName,

View File

@ -585,6 +585,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
"Output": "success",
"ServiceID": "",
"ServiceName": "",
"ExposedPort": 5678,
"Definition": {
"IntervalDuration": "15s",
"TimeoutDuration": "15s",
@ -600,12 +601,8 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
resp := httptest.NewRecorder()
obj, err := a.srv.Txn(resp, req)
if err != nil {
t.Fatalf("err: %v", err)
}
if resp.Code != 200 {
t.Fatalf("expected 200, got %d", resp.Code)
}
require.NoError(t, err)
require.Equal(t, 200, resp.Code, resp.Body)
txnResp, ok := obj.(structs.TxnResponse)
if !ok {
@ -662,12 +659,13 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
},
&structs.TxnResult{
Check: &structs.HealthCheck{
Node: a.config.NodeName,
CheckID: "nodecheck",
Name: "Node http check",
Status: api.HealthPassing,
Notes: "Http based health check",
Output: "success",
Node: a.config.NodeName,
CheckID: "nodecheck",
Name: "Node http check",
Status: api.HealthPassing,
Notes: "Http based health check",
Output: "success",
ExposedPort: 5678,
Definition: structs.HealthCheckDefinition{
Interval: 15 * time.Second,
Timeout: 15 * time.Second,
@ -686,3 +684,117 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
}
assert.Equal(t, expected, txnResp)
}
func TestTxnEndpoint_NodeService(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
// Make sure the fields of a check are handled correctly when both creating and
// updating, and test both sets of duration fields to ensure backwards compatibility.
buf := bytes.NewBuffer([]byte(fmt.Sprintf(`
[
{
"Service": {
"Verb": "set",
"Node": "%s",
"Service": {
"Service": "test",
"Port": 4444
}
}
},
{
"Service": {
"Verb": "set",
"Node": "%s",
"Service": {
"Service": "test-sidecar-proxy",
"Port": 20000,
"Kind": "connect-proxy",
"Proxy": {
"DestinationServiceName": "test",
"DestinationServiceID": "test",
"LocalServiceAddress": "127.0.0.1",
"LocalServicePort": 4444,
"upstreams": [
{
"DestinationName": "fake-backend",
"LocalBindPort": 25001
}
]
}
}
}
}
]
`, a.config.NodeName, a.config.NodeName)))
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
resp := httptest.NewRecorder()
obj, err := a.srv.Txn(resp, req)
require.NoError(t, err)
require.Equal(t, 200, resp.Code)
txnResp, ok := obj.(structs.TxnResponse)
if !ok {
t.Fatalf("bad type: %T", obj)
}
require.Equal(t, 2, len(txnResp.Results))
index := txnResp.Results[0].Service.ModifyIndex
expected := structs.TxnResponse{
Results: structs.TxnResults{
&structs.TxnResult{
Service: &structs.NodeService{
Service: "test",
ID: "test",
Port: 4444,
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
RaftIndex: structs.RaftIndex{
CreateIndex: index,
ModifyIndex: index,
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
},
&structs.TxnResult{
Service: &structs.NodeService{
Service: "test-sidecar-proxy",
ID: "test-sidecar-proxy",
Port: 20000,
Kind: "connect-proxy",
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "test",
DestinationServiceID: "test",
LocalServiceAddress: "127.0.0.1",
LocalServicePort: 4444,
},
TaggedAddresses: map[string]structs.ServiceAddress{
"consul-virtual": {
Address: "240.0.0.1",
Port: 20000,
},
},
RaftIndex: structs.RaftIndex{
CreateIndex: index,
ModifyIndex: index,
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
},
},
}
assert.Equal(t, expected, txnResp)
}

View File

@ -211,7 +211,9 @@ func (s *HTTPHandlers) UIServices(resp http.ResponseWriter, req *http.Request) (
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
if peer := req.URL.Query().Get("peer"); peer != "" {
args.PeerName = peer
}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}

View File

@ -88,29 +88,26 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C
clusters = append(clusters, passthroughs...)
}
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
explicit := upstream.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit {
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
continue
}
return upstream, !implicit && !explicit
}
chainEndpoints, ok := cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid]
if !ok {
// this should not happen
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
upstream, skip := getUpstream(uid)
if skip {
continue
}
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
uid,
upstream,
chain,
chainEndpoints,
cfgSnap,
false,
)
@ -127,18 +124,15 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C
// upstream in endpoints.go so that the sets of endpoints generated matches
// the sets of clusters.
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
explicit := upstreamCfg.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit {
// Not associated with a known explicit or implicit upstream so it is skipped.
upstream, skip := getUpstream(uid)
if skip {
continue
}
peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid)
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta)
upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, upstreamCfg, peerMeta, cfgSnap)
upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, cfg, peerMeta, cfgSnap)
if err != nil {
return nil, err
}
@ -652,17 +646,10 @@ func (s *ResourceGenerator) clustersFromSnapshotIngressGateway(cfgSnap *proxycfg
return nil, fmt.Errorf("no discovery chain for upstream %q", uid)
}
chainEndpoints, ok := cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid]
if !ok {
// this should not happen
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
}
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
uid,
&u,
chain,
chainEndpoints,
cfgSnap,
false,
)
@ -745,7 +732,7 @@ func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, nam
func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
uid proxycfg.UpstreamID,
upstream *structs.Upstream,
upstreamConfig structs.UpstreamConfig,
peerMeta structs.PeeringServiceMeta,
cfgSnap *proxycfg.ConfigSnapshot,
) (*envoy_cluster_v3.Cluster, error) {
@ -754,16 +741,21 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
err error
)
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta)
if cfg.EnvoyClusterJSON != "" {
c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
if upstreamConfig.EnvoyClusterJSON != "" {
c, err = makeClusterFromUserConfig(upstreamConfig.EnvoyClusterJSON)
if err != nil {
return c, err
}
// In the happy path don't return yet as we need to inject TLS config still.
}
tbs, ok := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
if err != nil {
return c, err
}
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer)
if !ok {
// this should never happen since we loop through upstreams with
// set trust bundles
@ -772,22 +764,29 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
clusterName := generatePeeredClusterName(uid, tbs)
outlierDetection := ToOutlierDetection(upstreamConfig.PassiveHealthCheck)
// We can't rely on health checks for services on cluster peers because they
// don't take into account service resolvers, splitters and routers. Setting
// MaxEjectionPercent too 100% gives outlier detection the power to eject the
// entire cluster.
outlierDetection.MaxEjectionPercent = &wrappers.UInt32Value{Value: 100}
s.Logger.Trace("generating cluster for", "cluster", clusterName)
if c == nil {
c = &envoy_cluster_v3.Cluster{
Name: clusterName,
ConnectTimeout: durationpb.New(time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond),
ConnectTimeout: durationpb.New(time.Duration(upstreamConfig.ConnectTimeoutMs) * time.Millisecond),
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
HealthyPanicThreshold: &envoy_type_v3.Percent{
Value: 0, // disable panic threshold
},
},
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits),
},
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
OutlierDetection: outlierDetection,
}
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" {
if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" {
if err := s.setHttp2ProtocolOptions(c); err != nil {
return c, err
}
@ -821,12 +820,11 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
false, /*onlyPassing*/
)
}
}
rootPEMs := cfgSnap.RootPEMs()
if uid.Peer != "" {
tbs, _ := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
tbs, _ := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer)
rootPEMs = tbs.ConcatenatedRootPEMs()
}
@ -961,7 +959,6 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
uid proxycfg.UpstreamID,
upstream *structs.Upstream,
chain *structs.CompiledDiscoveryChain,
chainEndpoints map[string]structs.CheckServiceNodes,
cfgSnap *proxycfg.ConfigSnapshot,
forMeshGateway bool,
) ([]*envoy_cluster_v3.Cluster, error) {
@ -978,7 +975,15 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
upstreamConfigMap = upstream.Config
}
cfg, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
// Mesh gateways are exempt because upstreamsSnapshot is only used for
// cluster peering targets and transative failover/redirects are unsupported.
if err != nil && !forMeshGateway {
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind)
}
rawUpstreamConfig, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
@ -986,13 +991,28 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
"error", err)
}
finalizeUpstreamConfig := func(cfg structs.UpstreamConfig, connectTimeout time.Duration) structs.UpstreamConfig {
if cfg.Protocol == "" {
cfg.Protocol = chain.Protocol
}
if cfg.Protocol == "" {
cfg.Protocol = "tcp"
}
if cfg.ConnectTimeoutMs == 0 {
cfg.ConnectTimeoutMs = int(connectTimeout / time.Millisecond)
}
return cfg
}
var escapeHatchCluster *envoy_cluster_v3.Cluster
if !forMeshGateway {
if cfg.EnvoyClusterJSON != "" {
if rawUpstreamConfig.EnvoyClusterJSON != "" {
if chain.Default {
// If you haven't done anything to setup the discovery chain, then
// you can use the envoy_cluster_json escape hatch.
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
escapeHatchCluster, err = makeClusterFromUserConfig(rawUpstreamConfig.EnvoyClusterJSON)
if err != nil {
return nil, err
}
@ -1006,14 +1026,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
var out []*envoy_cluster_v3.Cluster
for _, node := range chain.Nodes {
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
switch {
case node == nil:
return nil, fmt.Errorf("impossible to process a nil node")
case node.Type != structs.DiscoveryGraphNodeTypeResolver:
continue
case node.Resolver == nil:
return nil, fmt.Errorf("impossible to process a non-resolver node")
}
failover := node.Resolver.Failover
// These variables are prefixed with primary to avoid shaddowing bugs.
primaryTargetID := node.Resolver.Target
primaryTarget := chain.Targets[primaryTargetID]
primaryClusterName := CustomizeClusterName(primaryTarget.Name, chain)
upstreamConfig := finalizeUpstreamConfig(rawUpstreamConfig, node.Resolver.ConnectTimeout)
if forMeshGateway {
primaryClusterName = meshGatewayExportedClusterNamePrefix + primaryClusterName
}
@ -1026,22 +1052,38 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
continue
}
type targetClusterOptions struct {
type targetClusterOption struct {
targetID string
clusterName string
}
// Construct the information required to make target clusters. When
// failover is configured, create the aggregate cluster.
var targetClustersOptions []targetClusterOptions
var targetClustersOptions []targetClusterOption
if failover != nil && !forMeshGateway {
var failoverClusterNames []string
for _, tid := range append([]string{primaryTargetID}, failover.Targets...) {
target := chain.Targets[tid]
clusterName := CustomizeClusterName(target.Name, chain)
clusterName := target.Name
targetUID := proxycfg.NewUpstreamIDFromTargetID(tid)
if targetUID.Peer != "" {
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer)
// We can't generate cluster on peers without the trust bundle. The
// trust bundle should be ready soon.
if !ok {
s.Logger.Debug("peer trust bundle not ready for discovery chain target",
"peer", targetUID.Peer,
"target", tid,
)
continue
}
clusterName = generatePeeredClusterName(targetUID, tbs)
}
clusterName = CustomizeClusterName(clusterName, chain)
clusterName = failoverClusterNamePrefix + clusterName
targetClustersOptions = append(targetClustersOptions, targetClusterOptions{
targetClustersOptions = append(targetClustersOptions, targetClusterOption{
targetID: tid,
clusterName: clusterName,
})
@ -1070,7 +1112,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
out = append(out, c)
} else {
targetClustersOptions = append(targetClustersOptions, targetClusterOptions{
targetClustersOptions = append(targetClustersOptions, targetClusterOption{
targetID: primaryTargetID,
clusterName: primaryClusterName,
})
@ -1089,11 +1131,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
Datacenter: target.Datacenter,
Service: target.Service,
}.URI().String()
if uid.Peer != "" {
return nil, fmt.Errorf("impossible to get a peer discovery chain")
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID)
s.Logger.Debug("generating cluster for", "cluster", targetInfo.clusterName)
if targetUID.Peer != "" {
peerMeta := upstreamsSnapshot.UpstreamPeerMeta(targetUID)
upstreamCluster, err := s.makeUpstreamClusterForPeerService(targetUID, upstreamConfig, peerMeta, cfgSnap)
if err != nil {
continue
}
// Override the cluster name to include the failover-target~ prefix.
upstreamCluster.Name = targetInfo.clusterName
out = append(out, upstreamCluster)
continue
}
s.Logger.Trace("generating cluster for", "cluster", targetInfo.clusterName)
c := &envoy_cluster_v3.Cluster{
Name: targetInfo.clusterName,
AltStatName: targetInfo.clusterName,
@ -1114,9 +1165,9 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
},
// TODO(peering): make circuit breakers or outlier detection work?
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits),
},
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
OutlierDetection: ToOutlierDetection(upstreamConfig.PassiveHealthCheck),
}
var lb *structs.LoadBalancer
@ -1127,19 +1178,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
return nil, fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", targetInfo.clusterName, err)
}
var proto string
if !forMeshGateway {
proto = cfg.Protocol
}
if proto == "" {
proto = chain.Protocol
}
if proto == "" {
proto = "tcp"
}
if proto == "http2" || proto == "grpc" {
if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" {
if err := s.setHttp2ProtocolOptions(c); err != nil {
return nil, err
}
@ -1148,7 +1187,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
configureTLS := true
if forMeshGateway {
// We only initiate TLS if we're doing an L7 proxy.
configureTLS = structs.IsProtocolHTTPLike(proto)
configureTLS = structs.IsProtocolHTTPLike(upstreamConfig.Protocol)
}
if configureTLS {
@ -1221,7 +1260,6 @@ func (s *ResourceGenerator) makeExportedUpstreamClustersForMeshGateway(cfgSnap *
proxycfg.NewUpstreamIDFromServiceName(svc),
nil,
chain,
nil,
cfgSnap,
true,
)

View File

@ -169,6 +169,18 @@ func TestClustersFromSnapshot(t *testing.T) {
}, nil)
},
},
{
name: "custom-passive-healthcheck",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Proxy.Upstreams[0].Config["passive_health_check"] = map[string]interface{}{
"enforcing_consecutive_5xx": float64(80),
"max_failures": float64(5),
"interval": float64(10),
}
}, nil)
},
},
{
name: "custom-max-inbound-connections",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
@ -257,6 +269,12 @@ func TestClustersFromSnapshot(t *testing.T) {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
},
},
{
name: "connect-proxy-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil)
},
},
{
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
@ -495,6 +513,13 @@ func TestClustersFromSnapshot(t *testing.T) {
"failover", nil, nil, nil)
},
},
{
name: "ingress-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-to-cluster-peer", nil, nil, nil)
},
},
{
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {

View File

@ -27,6 +27,12 @@ type ProxyConfig struct {
// Note: This escape hatch is compatible with the discovery chain.
PublicListenerJSON string `mapstructure:"envoy_public_listener_json"`
// ListenerTracingJSON is a complete override ("escape hatch") for the
// listeners tracing configuration.
//
// Note: This escape hatch is compatible with the discovery chain.
ListenerTracingJSON string `mapstructure:"envoy_listener_tracing_json"`
// LocalClusterJSON is a complete override ("escape hatch") for the
// local application cluster.
//
@ -168,5 +174,10 @@ func ToOutlierDetection(p *structs.PassiveHealthCheck) *envoy_cluster_v3.Outlier
if p.MaxFailures != 0 {
od.Consecutive_5Xx = &wrappers.UInt32Value{Value: p.MaxFailures}
}
if p.EnforcingConsecutive5xx != nil {
od.EnforcingConsecutive_5Xx = &wrappers.UInt32Value{Value: *p.EnforcingConsecutive5xx}
}
return od
}

View File

@ -50,14 +50,19 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Len()+
len(cfgSnap.ConnectProxy.WatchedUpstreamEndpoints))
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
explicit := upstream.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit {
return upstream, !implicit && !explicit
}
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
upstream, skip := getUpstream(uid)
if skip {
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
continue
}
@ -70,6 +75,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
es, err := s.endpointsFromDiscoveryChain(
uid,
chain,
cfgSnap,
cfgSnap.Locality,
upstreamConfigMap,
cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid],
@ -86,12 +92,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
// upstream in clusters.go so that the sets of endpoints generated matches
// the sets of clusters.
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
explicit := upstreamCfg.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit {
// Not associated with a known explicit or implicit upstream so it is skipped.
_, skip := getUpstream(uid)
if skip {
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
continue
}
@ -104,22 +107,14 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
clusterName := generatePeeredClusterName(uid, tbs)
// Also skip peer instances with a hostname as their address. EDS
// cannot resolve hostnames, so we provide them through CDS instead.
if _, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpointsUseHostnames[uid]; ok {
continue
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, clusterName, uid)
if err != nil {
return nil, err
}
endpoints, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Get(uid)
if ok {
la := makeLoadAssignment(
clusterName,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
proxycfg.GatewayKey{ /*empty so it never matches*/ },
)
resources = append(resources, la)
if loadAssignment != nil {
resources = append(resources, loadAssignment)
}
}
@ -375,6 +370,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotIngressGateway(cfgSnap *proxycf
es, err := s.endpointsFromDiscoveryChain(
uid,
cfgSnap.IngressGateway.DiscoveryChain[uid],
cfgSnap,
proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: u.DestinationPartition},
u.Config,
cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid],
@ -412,9 +408,38 @@ func makePipeEndpoint(path string) *envoy_endpoint_v3.LbEndpoint {
}
}
func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, uid proxycfg.UpstreamID) (*envoy_endpoint_v3.ClusterLoadAssignment, error) {
var la *envoy_endpoint_v3.ClusterLoadAssignment
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
if err != nil {
return la, err
}
// Also skip peer instances with a hostname as their address. EDS
// cannot resolve hostnames, so we provide them through CDS instead.
if _, ok := upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid]; ok {
return la, nil
}
endpoints, ok := upstreamsSnapshot.PeerUpstreamEndpoints.Get(uid)
if !ok {
return nil, nil
}
la = makeLoadAssignment(
clusterName,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
proxycfg.GatewayKey{ /*empty so it never matches*/ },
)
return la, nil
}
func (s *ResourceGenerator) endpointsFromDiscoveryChain(
uid proxycfg.UpstreamID,
chain *structs.CompiledDiscoveryChain,
cfgSnap *proxycfg.ConfigSnapshot,
gatewayKey proxycfg.GatewayKey,
upstreamConfigMap map[string]interface{},
upstreamEndpoints map[string]structs.CheckServiceNodes,
@ -432,6 +457,14 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
upstreamConfigMap = make(map[string]interface{}) // TODO:needed?
}
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
// Mesh gateways are exempt because upstreamsSnapshot is only used for
// cluster peering targets and transative failover/redirects are unsupported.
if err != nil && !forMeshGateway {
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind)
}
var resources []proto.Message
var escapeHatchCluster *envoy_cluster_v3.Cluster
@ -465,8 +498,15 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
continue
}
primaryTargetID := node.Resolver.Target
failover := node.Resolver.Failover
type targetLoadAssignmentOption struct {
targetID string
clusterName string
}
var targetLoadAssignmentOptions []targetLoadAssignmentOption
var numFailoverTargets int
if failover != nil {
numFailoverTargets = len(failover.Targets)
@ -474,66 +514,84 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
clusterNamePrefix := ""
if numFailoverTargets > 0 && !forMeshGateway {
clusterNamePrefix = failoverClusterNamePrefix
for _, failTargetID := range failover.Targets {
target := chain.Targets[failTargetID]
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
chain.Targets,
upstreamEndpoints,
gatewayEndpoints,
failTargetID,
gatewayKey,
forMeshGateway,
)
if !valid {
continue // skip the failover target if we're still populating the snapshot
}
for _, targetID := range append([]string{primaryTargetID}, failover.Targets...) {
target := chain.Targets[targetID]
clusterName := target.Name
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer)
// We can't generate cluster on peers without the trust bundle. The
// trust bundle should be ready soon.
if !ok {
s.Logger.Debug("peer trust bundle not ready for discovery chain target",
"peer", targetUID.Peer,
"target", targetID,
)
continue
}
clusterName := CustomizeClusterName(target.Name, chain)
clusterName = generatePeeredClusterName(targetUID, tbs)
}
clusterName = CustomizeClusterName(clusterName, chain)
clusterName = failoverClusterNamePrefix + clusterName
if escapeHatchCluster != nil {
clusterName = escapeHatchCluster.Name
}
s.Logger.Debug("generating endpoints for", "cluster", clusterName)
la := makeLoadAssignment(
clusterName,
[]loadAssignmentEndpointGroup{endpointGroup},
gatewayKey,
)
resources = append(resources, la)
targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{
targetID: targetID,
clusterName: clusterName,
})
}
}
targetID := node.Resolver.Target
target := chain.Targets[targetID]
clusterName := CustomizeClusterName(target.Name, chain)
clusterName = clusterNamePrefix + clusterName
if escapeHatchCluster != nil {
clusterName = escapeHatchCluster.Name
}
if forMeshGateway {
clusterName = meshGatewayExportedClusterNamePrefix + clusterName
}
s.Logger.Debug("generating endpoints for", "cluster", clusterName)
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
chain.Targets,
upstreamEndpoints,
gatewayEndpoints,
targetID,
gatewayKey,
forMeshGateway,
)
if !valid {
continue // skip the cluster if we're still populating the snapshot
} else {
target := chain.Targets[primaryTargetID]
clusterName := CustomizeClusterName(target.Name, chain)
clusterName = clusterNamePrefix + clusterName
if escapeHatchCluster != nil {
clusterName = escapeHatchCluster.Name
}
if forMeshGateway {
clusterName = meshGatewayExportedClusterNamePrefix + clusterName
}
targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{
targetID: primaryTargetID,
clusterName: clusterName,
})
}
la := makeLoadAssignment(
clusterName,
[]loadAssignmentEndpointGroup{endpointGroup},
gatewayKey,
)
resources = append(resources, la)
for _, targetInfo := range targetLoadAssignmentOptions {
s.Logger.Debug("generating endpoints for", "cluster", targetInfo.clusterName)
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID)
if targetUID.Peer != "" {
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, targetInfo.clusterName, targetUID)
if err != nil {
return nil, err
}
if loadAssignment != nil {
resources = append(resources, loadAssignment)
}
continue
}
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
chain.Targets,
upstreamEndpoints,
gatewayEndpoints,
targetInfo.targetID,
gatewayKey,
forMeshGateway,
)
if !valid {
continue // skip the cluster if we're still populating the snapshot
}
la := makeLoadAssignment(
targetInfo.clusterName,
[]loadAssignmentEndpointGroup{endpointGroup},
gatewayKey,
)
resources = append(resources, la)
}
}
return resources, nil
@ -586,6 +644,7 @@ func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(cfgSnap
clusterEndpoints, err := s.endpointsFromDiscoveryChain(
proxycfg.NewUpstreamIDFromServiceName(svc),
chain,
cfgSnap,
cfgSnap.Locality,
nil,
chainEndpoints,
@ -640,11 +699,12 @@ func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpo
healthStatus = endpointGroup.OverrideHealth
}
endpoint := &envoy_endpoint_v3.Endpoint{
Address: makeAddress(addr, port),
}
es = append(es, &envoy_endpoint_v3.LbEndpoint{
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
Endpoint: &envoy_endpoint_v3.Endpoint{
Address: makeAddress(addr, port),
},
Endpoint: endpoint,
},
HealthStatus: healthStatus,
LoadBalancingWeight: makeUint32Value(weight),

View File

@ -284,6 +284,12 @@ func TestEndpointsFromSnapshot(t *testing.T) {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
},
},
{
name: "connect-proxy-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil)
},
},
{
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
@ -396,6 +402,13 @@ func TestEndpointsFromSnapshot(t *testing.T) {
"failover", nil, nil, nil)
},
},
{
name: "ingress-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-to-cluster-peer", nil, nil, nil)
},
},
{
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {

View File

@ -3,7 +3,6 @@ package xds
import (
"errors"
"fmt"
envoy_extensions_filters_listener_http_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3"
"net"
"net/url"
"regexp"
@ -12,6 +11,8 @@ import (
"strings"
"time"
envoy_extensions_filters_listener_http_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3"
envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
@ -107,6 +108,19 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
}
}
proxyCfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
}
var tracing *envoy_http_v3.HttpConnectionManager_Tracing
if proxyCfg.ListenerTracingJSON != "" {
if tracing, err = makeTracingFromUserConfig(proxyCfg.ListenerTracingJSON); err != nil {
s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err)
}
}
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
@ -153,6 +167,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
filterName: filterName,
protocol: cfg.Protocol,
useRDS: useRDS,
tracing: tracing,
})
if err != nil {
return nil, err
@ -178,6 +193,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
filterName: filterName,
protocol: cfg.Protocol,
useRDS: useRDS,
tracing: tracing,
})
if err != nil {
return nil, err
@ -249,6 +265,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
filterName: routeName,
protocol: svcConfig.Protocol,
useRDS: true,
tracing: tracing,
})
if err != nil {
return err
@ -265,6 +282,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
clusterName: clusterName,
filterName: clusterName,
protocol: svcConfig.Protocol,
tracing: tracing,
})
if err != nil {
return err
@ -376,6 +394,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
protocol: cfg.Protocol,
useRDS: false,
statPrefix: "upstream_peered.",
tracing: tracing,
})
if err != nil {
return nil, err
@ -533,6 +552,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
filterName: uid.EnvoyID(),
routeName: uid.EnvoyID(),
protocol: cfg.Protocol,
tracing: tracing,
})
if err != nil {
return nil, err
@ -1188,12 +1208,20 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot
l = makePortListener(name, addr, port, envoy_core_v3.TrafficDirection_INBOUND)
var tracing *envoy_http_v3.HttpConnectionManager_Tracing
if cfg.ListenerTracingJSON != "" {
if tracing, err = makeTracingFromUserConfig(cfg.ListenerTracingJSON); err != nil {
s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err)
}
}
filterOpts := listenerFilterOpts{
protocol: cfg.Protocol,
filterName: name,
routeName: name,
cluster: LocalAppClusterName,
requestTimeoutMs: cfg.LocalRequestTimeoutMs,
tracing: tracing,
}
if useHTTPFilter {
filterOpts.httpAuthzFilter, err = makeRBACHTTPFilter(
@ -1310,6 +1338,7 @@ func (s *ResourceGenerator) makeExposedCheckListener(cfgSnap *proxycfg.ConfigSna
statPrefix: "",
routePath: path.Path,
httpAuthzFilter: nil,
// in the exposed check listener we don't set the tracing configuration
}
f, err := makeListenerFilter(opts)
if err != nil {
@ -1542,6 +1571,19 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg.
filterChain.Filters = append(filterChain.Filters, authFilter)
}
proxyCfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
}
var tracing *envoy_http_v3.HttpConnectionManager_Tracing
if proxyCfg.ListenerTracingJSON != "" {
if tracing, err = makeTracingFromUserConfig(proxyCfg.ListenerTracingJSON); err != nil {
s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err)
}
}
// Lastly we setup the actual proxying component. For L4 this is a straight
// tcp proxy. For L7 this is a very hands-off HTTP proxy just to inject an
// HTTP filter to do intention checks here instead.
@ -1552,6 +1594,7 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg.
cluster: tgtwyOpts.cluster,
statPrefix: "upstream.",
routePath: "",
tracing: tracing,
}
if useHTTPFilter {
@ -1798,6 +1841,7 @@ type filterChainOpts struct {
statPrefix string
forwardClientDetails bool
forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails
tracing *envoy_http_v3.HttpConnectionManager_Tracing
}
func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envoy_listener_v3.FilterChain, error) {
@ -1813,6 +1857,7 @@ func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envo
statPrefix: opts.statPrefix,
forwardClientDetails: opts.forwardClientDetails,
forwardClientPolicy: opts.forwardClientPolicy,
tracing: opts.tracing,
})
if err != nil {
return nil, err
@ -1955,6 +2000,7 @@ type listenerFilterOpts struct {
httpAuthzFilter *envoy_http_v3.HttpFilter
forwardClientDetails bool
forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails
tracing *envoy_http_v3.HttpConnectionManager_Tracing
}
func makeListenerFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) {
@ -2014,6 +2060,19 @@ func makeStatPrefix(prefix, filterName string) string {
return fmt.Sprintf("%s%s", prefix, strings.Replace(filterName, ":", "_", -1))
}
func makeTracingFromUserConfig(configJSON string) (*envoy_http_v3.HttpConnectionManager_Tracing, error) {
// Type field is present so decode it as a any.Any
var any any.Any
if err := jsonpb.UnmarshalString(configJSON, &any); err != nil {
return nil, err
}
var t envoy_http_v3.HttpConnectionManager_Tracing
if err := proto.Unmarshal(any.Value, &t); err != nil {
return nil, err
}
return &t, nil
}
func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) {
router, err := makeEnvoyHTTPFilter("envoy.filters.http.router", &envoy_http_router_v3.Router{})
if err != nil {
@ -2034,6 +2093,10 @@ func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error)
},
}
if opts.tracing != nil {
cfg.Tracing = opts.tracing
}
if opts.useRDS {
if opts.cluster != "" {
return nil, fmt.Errorf("cannot specify cluster name when using RDS")

View File

@ -772,6 +772,15 @@ func TestListenersFromSnapshot(t *testing.T) {
name: "transparent-proxy-terminating-gateway",
create: proxycfg.TestConfigSnapshotTransparentProxyTerminatingGatewayCatalogDestinationsOnly,
},
{
name: "custom-trace-listener",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Proxy.Config["protocol"] = "http"
ns.Proxy.Config["envoy_listener_tracing_json"] = customTraceJSON(t)
}, nil)
},
},
}
latestEnvoyVersion := proxysupport.EnvoyVersions[0]
@ -947,6 +956,40 @@ func customHTTPListenerJSON(t testinf.T, opts customHTTPListenerJSONOptions) str
return buf.String()
}
func customTraceJSON(t testinf.T) string {
t.Helper()
return `
{
"@type" : "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing",
"provider" : {
"name" : "envoy.tracers.zipkin",
"typed_config" : {
"@type" : "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
"collector_cluster" : "otelcolector",
"collector_endpoint" : "/api/v2/spans",
"collector_endpoint_version" : "HTTP_JSON",
"shared_span_context" : false
}
},
"custom_tags" : [
{
"tag" : "custom_header",
"request_header" : {
"name" : "x-custom-traceid",
"default_value" : ""
}
},
{
"tag" : "alloc_id",
"environment" : {
"name" : "NOMAD_ALLOC_ID"
}
}
]
}
`
}
type configFetcherFunc func() string
var _ ConfigFetcher = (configFetcherFunc)(nil)

View File

@ -0,0 +1,219 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"clusterType": {
"name": "envoy.clusters.aggregate",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig",
"clusters": [
"failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"failover-target~db.default.cluster-01.external.peer1.domain"
]
}
},
"connectTimeout": "33s",
"lbPolicy": "CLUSTER_PROVIDED"
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "failover-target~db.default.cluster-01.external.peer1.domain",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "1s",
"circuitBreakers": {
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {
}
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "peer1-root-1\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cluster-01-dc/svc/payments"
}
]
}
},
"sni": "payments.default.default.cluster-01.external.1c053652-8512-4373-90cf-5a7f6263a994.consul"
}
}
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"altStatName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "33s",
"circuitBreakers": {
},
"outlierDetection": {
},
"commonLbConfig": {
"healthyPanicThreshold": {
}
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
}
]
}
},
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
}
}
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "5s",
"circuitBreakers": {
},
"outlierDetection": {
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target"
},
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target"
}
]
}
},
"sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul"
}
}
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "local_app",
"type": "STATIC",
"connectTimeout": "5s",
"loadAssignment": {
"clusterName": "local_app",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
}
}
]
}
]
}
}
],
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"nonce": "00000001"
}

View File

@ -58,7 +58,7 @@
"dnsRefreshRate": "10s",
"dnsLookupFamily": "V4_ONLY",
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {
@ -115,7 +115,7 @@
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {

View File

@ -0,0 +1,147 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "5s",
"circuitBreakers": {
},
"outlierDetection": {
"consecutive5xx": 5,
"interval": "0.000000010s",
"enforcingConsecutive5xx": 80
},
"commonLbConfig": {
"healthyPanicThreshold": {
}
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
}
]
}
},
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
}
}
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "5s",
"circuitBreakers": {
},
"outlierDetection": {
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target"
},
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target"
}
]
}
},
"sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul"
}
}
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "local_app",
"type": "STATIC",
"connectTimeout": "5s",
"loadAssignment": {
"clusterName": "local_app",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
}
}
]
}
]
}
}
],
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"nonce": "00000001"
}

View File

@ -0,0 +1,139 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"clusterType": {
"name": "envoy.clusters.aggregate",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig",
"clusters": [
"failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"failover-target~db.default.cluster-01.external.peer1.domain"
]
}
},
"connectTimeout": "33s",
"lbPolicy": "CLUSTER_PROVIDED"
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "failover-target~db.default.cluster-01.external.peer1.domain",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "33s",
"circuitBreakers": {
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {
}
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "peer1-root-1\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cluster-01-dc/svc/payments"
}
]
}
},
"sni": "payments.default.default.cluster-01.external.1c053652-8512-4373-90cf-5a7f6263a994.consul"
}
}
},
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"altStatName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"type": "EDS",
"edsClusterConfig": {
"edsConfig": {
"ads": {
},
"resourceApiVersion": "V3"
}
},
"connectTimeout": "33s",
"circuitBreakers": {
},
"outlierDetection": {
},
"commonLbConfig": {
"healthyPanicThreshold": {
}
},
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
},
"matchSubjectAltNames": [
{
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
}
]
}
},
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
}
}
}
],
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"nonce": "00000001"
}

View File

@ -18,7 +18,7 @@
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {
@ -75,7 +75,7 @@
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {
@ -157,7 +157,7 @@
},
"outlierDetection": {
"maxEjectionPercent": 100
},
"commonLbConfig": {
"healthyPanicThreshold": {

View File

@ -0,0 +1,109 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"clusterName": "failover-target~db.default.cluster-01.external.peer1.domain",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.40.1.1",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
},
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.40.1.2",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"clusterName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.10.1.1",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
},
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.10.1.2",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"clusterName": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.10.1.1",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
},
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.20.1.2",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
}
],
"typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"nonce": "00000001"
}

View File

@ -0,0 +1,75 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"clusterName": "failover-target~db.default.cluster-01.external.peer1.domain",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.40.1.1",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
},
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.40.1.2",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
},
{
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"clusterName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.10.1.1",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
},
{
"endpoint": {
"address": {
"socketAddress": {
"address": "10.10.1.2",
"portValue": 8080
}
}
},
"healthStatus": "HEALTHY",
"loadBalancingWeight": 1
}
]
}
]
}
],
"typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
"nonce": "00000001"
}

View File

@ -0,0 +1,180 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
"name": "db:127.0.0.1:9191",
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 9191
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.filters.network.tcp_proxy",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy",
"statPrefix": "upstream.db.default.default.dc1",
"cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
}
}
]
}
],
"trafficDirection": "OUTBOUND"
},
{
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
"name": "prepared_query:geo-cache:127.10.10.10:8181",
"address": {
"socketAddress": {
"address": "127.10.10.10",
"portValue": 8181
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.filters.network.tcp_proxy",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy",
"statPrefix": "upstream.prepared_query_geo-cache",
"cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul"
}
}
]
}
],
"trafficDirection": "OUTBOUND"
},
{
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
"name": "public_listener:0.0.0.0:9999",
"address": {
"socketAddress": {
"address": "0.0.0.0",
"portValue": 9999
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.filters.network.http_connection_manager",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"statPrefix": "public_listener",
"routeConfig": {
"name": "public_listener",
"virtualHosts": [
{
"name": "public_listener",
"domains": [
"*"
],
"routes": [
{
"match": {
"prefix": "/"
},
"route": {
"cluster": "local_app"
}
}
]
}
]
},
"httpFilters": [
{
"name": "envoy.filters.http.rbac",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC",
"rules": {
}
}
},
{
"name": "envoy.filters.http.router",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
}
}
],
"tracing": {
"customTags": [
{
"tag": "custom_header",
"requestHeader": {
"name": "x-custom-traceid"
}
},
{
"tag": "alloc_id",
"environment": {
"name": "NOMAD_ALLOC_ID"
}
}
],
"provider": {
"name": "envoy.tracers.zipkin",
"typedConfig": {
"@type": "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
"collectorCluster": "otelcolector",
"collectorEndpoint": "/api/v2/spans",
"sharedSpanContext": false,
"collectorEndpointVersion": "HTTP_JSON"
}
}
},
"forwardClientCertDetails": "APPEND_FORWARD",
"setCurrentClientCertDetails": {
"subject": true,
"cert": true,
"chain": true,
"dns": true,
"uri": true
}
}
}
],
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext",
"commonTlsContext": {
"tlsParams": {
},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
},
"requireClientCertificate": true
}
}
}
],
"trafficDirection": "INBOUND"
}
],
"typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener",
"nonce": "00000001"
}

View File

@ -363,7 +363,7 @@ func TestAPI_AgentServicesWithFilterOpts(t *testing.T) {
}
require.NoError(t, agent.ServiceRegister(reg))
opts := &QueryOptions{Namespace: splitDefaultNamespace}
opts := &QueryOptions{Namespace: defaultNamespace}
services, err := agent.ServicesWithFilterOpts("foo in Tags", opts)
require.NoError(t, err)
require.Len(t, services, 1)
@ -791,8 +791,8 @@ func TestAPI_AgentService(t *testing.T) {
Warning: 1,
},
Meta: map[string]string{},
Namespace: splitDefaultNamespace,
Partition: splitDefaultPartition,
Namespace: defaultNamespace,
Partition: defaultPartition,
Datacenter: "dc1",
}
require.Equal(t, expect, got)
@ -932,7 +932,7 @@ func TestAPI_AgentUpdateTTLOpts(t *testing.T) {
}
}
opts := &QueryOptions{Namespace: splitDefaultNamespace}
opts := &QueryOptions{Namespace: defaultNamespace}
if err := agent.UpdateTTLOpts("service:foo", "foo", HealthWarning, opts); err != nil {
t.Fatalf("err: %v", err)
@ -1007,7 +1007,7 @@ func TestAPI_AgentChecksWithFilterOpts(t *testing.T) {
reg.TTL = "15s"
require.NoError(t, agent.CheckRegister(reg))
opts := &QueryOptions{Namespace: splitDefaultNamespace}
opts := &QueryOptions{Namespace: defaultNamespace}
checks, err := agent.ChecksWithFilterOpts("Name == foo", opts)
require.NoError(t, err)
require.Len(t, checks, 1)
@ -1382,7 +1382,7 @@ func TestAPI_ServiceMaintenanceOpts(t *testing.T) {
}
// Specify namespace in query option
opts := &QueryOptions{Namespace: splitDefaultNamespace}
opts := &QueryOptions{Namespace: defaultNamespace}
// Enable maintenance mode
if err := agent.EnableServiceMaintenanceOpts("redis", "broken", opts); err != nil {
@ -1701,7 +1701,7 @@ func TestAPI_AgentHealthServiceOpts(t *testing.T) {
requireServiceHealthID := func(t *testing.T, serviceID, expected string, shouldExist bool) {
msg := fmt.Sprintf("service id:%s, shouldExist:%v, expectedStatus:%s : bad %%s", serviceID, shouldExist, expected)
opts := &QueryOptions{Namespace: splitDefaultNamespace}
opts := &QueryOptions{Namespace: defaultNamespace}
state, out, err := agent.AgentHealthServiceByIDOpts(serviceID, opts)
require.Nil(t, err, msg, "err")
require.Equal(t, expected, state, msg, "state")
@ -1715,7 +1715,7 @@ func TestAPI_AgentHealthServiceOpts(t *testing.T) {
requireServiceHealthName := func(t *testing.T, serviceName, expected string, shouldExist bool) {
msg := fmt.Sprintf("service name:%s, shouldExist:%v, expectedStatus:%s : bad %%s", serviceName, shouldExist, expected)
opts := &QueryOptions{Namespace: splitDefaultNamespace}
opts := &QueryOptions{Namespace: defaultNamespace}
state, outs, err := agent.AgentHealthServiceByNameOpts(serviceName, opts)
require.Nil(t, err, msg, "err")
require.Equal(t, expected, state, msg, "state")

View File

@ -20,6 +20,7 @@ type Node struct {
CreateIndex uint64
ModifyIndex uint64
Partition string `json:",omitempty"`
PeerName string `json:",omitempty"`
}
type ServiceAddress struct {

View File

@ -51,7 +51,7 @@ func TestAPI_CatalogNodes(t *testing.T) {
want := &Node{
ID: s.Config.NodeID,
Node: s.Config.NodeName,
Partition: splitDefaultPartition,
Partition: defaultPartition,
Address: "127.0.0.1",
Datacenter: "dc1",
TaggedAddresses: map[string]string{
@ -1144,8 +1144,8 @@ func TestAPI_CatalogGatewayServices_Terminating(t *testing.T) {
expect := []*GatewayService{
{
Service: CompoundServiceName{Name: "api", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
Gateway: CompoundServiceName{Name: "terminating", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
Service: CompoundServiceName{Name: "api", Namespace: defaultNamespace, Partition: defaultPartition},
Gateway: CompoundServiceName{Name: "terminating", Namespace: defaultNamespace, Partition: defaultPartition},
GatewayKind: ServiceKindTerminatingGateway,
CAFile: "api/ca.crt",
CertFile: "api/client.crt",
@ -1153,8 +1153,8 @@ func TestAPI_CatalogGatewayServices_Terminating(t *testing.T) {
SNI: "my-domain",
},
{
Service: CompoundServiceName{Name: "redis", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
Gateway: CompoundServiceName{Name: "terminating", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
Service: CompoundServiceName{Name: "redis", Namespace: defaultNamespace, Partition: defaultPartition},
Gateway: CompoundServiceName{Name: "terminating", Namespace: defaultNamespace, Partition: defaultPartition},
GatewayKind: ServiceKindTerminatingGateway,
CAFile: "ca.crt",
CertFile: "client.crt",
@ -1212,15 +1212,15 @@ func TestAPI_CatalogGatewayServices_Ingress(t *testing.T) {
expect := []*GatewayService{
{
Service: CompoundServiceName{Name: "api", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
Gateway: CompoundServiceName{Name: "ingress", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
Service: CompoundServiceName{Name: "api", Namespace: defaultNamespace, Partition: defaultPartition},
Gateway: CompoundServiceName{Name: "ingress", Namespace: defaultNamespace, Partition: defaultPartition},
GatewayKind: ServiceKindIngressGateway,
Protocol: "tcp",
Port: 8888,
},
{
Service: CompoundServiceName{Name: "redis", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
Gateway: CompoundServiceName{Name: "ingress", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
Service: CompoundServiceName{Name: "redis", Namespace: defaultNamespace, Partition: defaultPartition},
Gateway: CompoundServiceName{Name: "ingress", Namespace: defaultNamespace, Partition: defaultPartition},
GatewayKind: ServiceKindIngressGateway,
Protocol: "tcp",
Port: 9999,

View File

@ -196,6 +196,11 @@ type PassiveHealthCheck struct {
// MaxFailures is the count of consecutive failures that results in a host
// being removed from the pool.
MaxFailures uint32 `alias:"max_failures"`
// EnforcingConsecutive5xx is the % chance that a host will be actually ejected
// when an outlier status is detected through consecutive 5xx.
// This setting can be used to disable ejection or to ramp it up slowly.
EnforcingConsecutive5xx uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"`
}
// UpstreamLimits describes the limits that are associated with a specific

View File

@ -139,8 +139,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test-failover",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
DefaultSubset: "v1",
Subsets: map[string]ServiceResolverSubset{
"v1": {
@ -159,7 +159,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
},
"v1": {
Service: "alternate",
Namespace: splitDefaultNamespace,
Namespace: defaultNamespace,
},
"v3": {
Targets: []ServiceResolverFailoverTarget{
@ -182,12 +182,12 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test-redirect",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
Redirect: &ServiceResolverRedirect{
Service: "test-failover",
ServiceSubset: "v2",
Namespace: splitDefaultNamespace,
Namespace: defaultNamespace,
Datacenter: "d",
},
},
@ -198,8 +198,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test-redirect",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
Redirect: &ServiceResolverRedirect{
Service: "test-failover",
Peer: "cluster-01",
@ -212,14 +212,14 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
entry: &ServiceSplitterConfigEntry{
Kind: ServiceSplitter,
Name: "test-split",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
Splits: []ServiceSplit{
{
Weight: 90,
Service: "test-failover",
ServiceSubset: "v1",
Namespace: splitDefaultNamespace,
Namespace: defaultNamespace,
RequestHeaders: &HTTPHeaderModifiers{
Set: map[string]string{
"x-foo": "bar",
@ -232,7 +232,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
{
Weight: 10,
Service: "test-redirect",
Namespace: splitDefaultNamespace,
Namespace: defaultNamespace,
},
},
Meta: map[string]string{
@ -247,8 +247,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
entry: &ServiceRouterConfigEntry{
Kind: ServiceRouter,
Name: "test-route",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
Routes: []ServiceRoute{
{
Match: &ServiceRouteMatch{
@ -265,8 +265,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
Destination: &ServiceRouteDestination{
Service: "test-failover",
ServiceSubset: "v2",
Namespace: splitDefaultNamespace,
Partition: splitDefaultPartition,
Namespace: defaultNamespace,
Partition: defaultPartition,
PrefixRewrite: "/",
RequestTimeout: 5 * time.Second,
NumRetries: 5,
@ -358,8 +358,8 @@ func TestAPI_ConfigEntry_ServiceResolver_LoadBalancer(t *testing.T) {
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test-least-req",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
LoadBalancer: &LoadBalancer{
Policy: "least_request",
LeastRequestConfig: &LeastRequestConfig{ChoiceCount: 10},
@ -372,8 +372,8 @@ func TestAPI_ConfigEntry_ServiceResolver_LoadBalancer(t *testing.T) {
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test-ring-hash",
Namespace: splitDefaultNamespace,
Partition: splitDefaultPartition,
Namespace: defaultNamespace,
Partition: defaultPartition,
LoadBalancer: &LoadBalancer{
Policy: "ring_hash",
RingHashConfig: &RingHashConfig{

View File

@ -57,7 +57,7 @@ type ServiceConsumer struct {
func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices }
func (e *ExportedServicesConfigEntry) GetName() string { return e.Name }
func (e *ExportedServicesConfigEntry) GetPartition() string { return e.Name }
func (e *ExportedServicesConfigEntry) GetNamespace() string { return splitDefaultNamespace }
func (e *ExportedServicesConfigEntry) GetNamespace() string { return "" }
func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { return e.Meta }
func (e *ExportedServicesConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
func (e *ExportedServicesConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }

View File

@ -17,7 +17,7 @@ func TestAPI_ConfigEntries_ExportedServices(t *testing.T) {
testutil.RunStep(t, "set and get", func(t *testing.T) {
exports := &ExportedServicesConfigEntry{
Name: PartitionDefaultName,
Partition: splitDefaultPartition,
Partition: defaultPartition,
Meta: map[string]string{
"gir": "zim",
},
@ -48,7 +48,7 @@ func TestAPI_ConfigEntries_ExportedServices(t *testing.T) {
Services: []ExportedService{
{
Name: "db",
Namespace: splitDefaultNamespace,
Namespace: defaultNamespace,
Consumers: []ServiceConsumer{
{
PeerName: "alpha",
@ -60,7 +60,7 @@ func TestAPI_ConfigEntries_ExportedServices(t *testing.T) {
"foo": "bar",
"gir": "zim",
},
Partition: splitDefaultPartition,
Partition: defaultPartition,
}
_, wm, err := entries.Set(updated, nil)

View File

@ -215,8 +215,8 @@ func TestAPI_ConfigEntries(t *testing.T) {
"foo": "bar",
"gir": "zim",
},
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
}
ce := c.ConfigEntries()

View File

@ -87,7 +87,7 @@ func TestAPI_CoordinateUpdate(t *testing.T) {
newCoord.Height = 0.5
entry := &CoordinateEntry{
Node: node,
Partition: splitDefaultPartition,
Partition: defaultPartition,
Coord: newCoord,
}
_, err = coord.Update(entry, nil)

View File

@ -45,6 +45,8 @@ type HealthCheck struct {
Type string
Namespace string `json:",omitempty"`
Partition string `json:",omitempty"`
ExposedPort int
PeerName string `json:",omitempty"`
Definition HealthCheckDefinition
@ -176,8 +178,7 @@ type HealthChecks []*HealthCheck
// attached, this function determines the best representative of the status as
// as single string using the following heuristic:
//
// maintenance > critical > warning > passing
//
// maintenance > critical > warning > passing
func (c HealthChecks) AggregatedStatus() string {
var passing, warning, critical, maintenance bool
for _, check := range c {

View File

@ -223,8 +223,8 @@ func TestAPI_HealthChecks(t *testing.T) {
ServiceName: "foo",
ServiceTags: []string{"bar"},
Type: "ttl",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
},
}

View File

@ -6,5 +6,5 @@ package api
// The following defaults return "default" in enterprise and "" in OSS.
// This constant is useful when a default value is needed for an
// operation that will reject non-empty values in OSS.
const splitDefaultNamespace = ""
const splitDefaultPartition = ""
const defaultNamespace = ""
const defaultPartition = ""

View File

@ -17,6 +17,9 @@ type QueryFailoverOptions struct {
Targets []QueryFailoverTarget
}
// Deprecated: use QueryFailoverOptions instead.
type QueryDatacenterOptions = QueryFailoverOptions
type QueryFailoverTarget struct {
// PeerName specifies a peer to try during failover.
PeerName string

View File

@ -67,6 +67,7 @@ const (
KVLock KVOp = "lock"
KVUnlock KVOp = "unlock"
KVGet KVOp = "get"
KVGetOrEmpty KVOp = "get-or-empty"
KVGetTree KVOp = "get-tree"
KVCheckSession KVOp = "check-session"
KVCheckIndex KVOp = "check-index"

View File

@ -187,7 +187,7 @@ func TestAPI_ClientTxn(t *testing.T) {
CreateIndex: ret.Results[0].KV.CreateIndex,
ModifyIndex: ret.Results[0].KV.ModifyIndex,
Namespace: ret.Results[0].KV.Namespace,
Partition: splitDefaultPartition,
Partition: defaultPartition,
},
},
&TxnResult{
@ -199,14 +199,14 @@ func TestAPI_ClientTxn(t *testing.T) {
CreateIndex: ret.Results[1].KV.CreateIndex,
ModifyIndex: ret.Results[1].KV.ModifyIndex,
Namespace: ret.Results[0].KV.Namespace,
Partition: splitDefaultPartition,
Partition: defaultPartition,
},
},
&TxnResult{
Node: &Node{
ID: nodeID,
Node: "foo",
Partition: splitDefaultPartition,
Partition: defaultPartition,
Address: "2.2.2.2",
Datacenter: "dc1",
CreateIndex: ret.Results[2].Node.CreateIndex,
@ -218,8 +218,8 @@ func TestAPI_ClientTxn(t *testing.T) {
ID: "foo1",
CreateIndex: ret.Results[3].Service.CreateIndex,
ModifyIndex: ret.Results[3].Service.CreateIndex,
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
},
},
&TxnResult{
@ -237,8 +237,8 @@ func TestAPI_ClientTxn(t *testing.T) {
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
},
Type: "tcp",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
CreateIndex: ret.Results[4].Check.CreateIndex,
ModifyIndex: ret.Results[4].Check.CreateIndex,
},
@ -258,8 +258,8 @@ func TestAPI_ClientTxn(t *testing.T) {
DeregisterCriticalServiceAfterDuration: 160 * time.Second,
},
Type: "tcp",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
CreateIndex: ret.Results[4].Check.CreateIndex,
ModifyIndex: ret.Results[4].Check.CreateIndex,
},
@ -279,8 +279,8 @@ func TestAPI_ClientTxn(t *testing.T) {
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
},
Type: "udp",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
CreateIndex: ret.Results[4].Check.CreateIndex,
ModifyIndex: ret.Results[4].Check.CreateIndex,
},
@ -300,8 +300,8 @@ func TestAPI_ClientTxn(t *testing.T) {
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
},
Type: "udp",
Partition: splitDefaultPartition,
Namespace: splitDefaultNamespace,
Partition: defaultPartition,
Namespace: defaultNamespace,
CreateIndex: ret.Results[4].Check.CreateIndex,
ModifyIndex: ret.Results[4].Check.CreateIndex,
},
@ -342,14 +342,14 @@ func TestAPI_ClientTxn(t *testing.T) {
CreateIndex: ret.Results[0].KV.CreateIndex,
ModifyIndex: ret.Results[0].KV.ModifyIndex,
Namespace: ret.Results[0].KV.Namespace,
Partition: splitDefaultPartition,
Partition: defaultPartition,
},
},
&TxnResult{
Node: &Node{
ID: s.Config.NodeID,
Node: s.Config.NodeName,
Partition: splitDefaultPartition,
Partition: defaultPartition,
Address: "127.0.0.1",
Datacenter: "dc1",
TaggedAddresses: map[string]string{

View File

@ -0,0 +1,5 @@
ARG CONSUL_IMAGE_VERSION=latest
FROM consul:${CONSUL_IMAGE_VERSION}
RUN apk update && apk add iptables
ARG TARGETARCH
COPY linux_${TARGETARCH}/consul /bin/consul

View File

@ -98,6 +98,10 @@ func (f *HTTPFlags) Datacenter() string {
return f.datacenter.String()
}
func (f *HTTPFlags) Partition() string {
return f.partition.String()
}
func (f *HTTPFlags) Stale() bool {
if f.stale.v == nil {
return false

View File

@ -0,0 +1,91 @@
package delete
import (
"context"
"flag"
"fmt"
"github.com/mitchellh/cli"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/command/flags"
)
func New(ui cli.Ui) *cmd {
c := &cmd{UI: ui}
c.init()
return c
}
type cmd struct {
UI cli.Ui
flags *flag.FlagSet
http *flags.HTTPFlags
help string
name string
}
func (c *cmd) init() {
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.")
c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags())
flags.Merge(c.flags, c.http.PartitionFlag())
c.help = flags.Usage(help, c.flags)
}
func (c *cmd) Run(args []string) int {
if err := c.flags.Parse(args); err != nil {
return 1
}
if c.name == "" {
c.UI.Error("Missing the required -name flag")
return 1
}
client, err := c.http.APIClient()
if err != nil {
c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err))
return 1
}
peerings := client.Peerings()
_, err = peerings.Delete(context.Background(), c.name, &api.WriteOptions{})
if err != nil {
c.UI.Error(fmt.Sprintf("Error deleting peering for %s: %v", c.name, err))
return 1
}
c.UI.Info(fmt.Sprintf("Successfully submitted peering connection, %s, for deletion", c.name))
return 0
}
func (c *cmd) Synopsis() string {
return synopsis
}
func (c *cmd) Help() string {
return flags.Usage(c.help, nil)
}
const (
synopsis = "Delete a peering connection"
help = `
Usage: consul peering delete [options] -name <peer name>
Delete a peering connection. Consul deletes all data imported from the peer
in the background. The peering connection is removed after all associated
data has been deleted. Operators can still read the peering connections
while the data is being removed. A 'DeletedAt' field will be populated with
the timestamp of when the peering was marked for deletion.
Example:
$ consul peering delete -name west-dc
`
)

View File

@ -0,0 +1,70 @@
package delete
import (
"context"
"strings"
"testing"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testrpc"
)
func TestDeleteCommand_noTabs(t *testing.T) {
t.Parallel()
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
t.Fatal("help has tabs")
}
}
func TestDeleteCommand(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
acceptor := agent.NewTestAgent(t, ``)
t.Cleanup(func() { _ = acceptor.Shutdown() })
testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1")
acceptingClient := acceptor.Client()
t.Run("name is required", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
}
code := cmd.Run(args)
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag")
})
t.Run("delete connection", func(t *testing.T) {
req := api.PeeringGenerateTokenRequest{PeerName: "foo"}
_, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req, &api.WriteOptions{})
require.NoError(t, err, "Could not generate peering token at acceptor")
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
"-name=foo",
}
code := cmd.Run(args)
require.Equal(t, 0, code)
output := ui.OutputWriter.String()
require.Contains(t, output, "Success")
})
}

View File

@ -0,0 +1,109 @@
package establish
import (
"context"
"flag"
"fmt"
"github.com/mitchellh/cli"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/command/flags"
)
func New(ui cli.Ui) *cmd {
c := &cmd{UI: ui}
c.init()
return c
}
type cmd struct {
UI cli.Ui
flags *flag.FlagSet
http *flags.HTTPFlags
help string
name string
peeringToken string
meta map[string]string
}
func (c *cmd) init() {
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.")
c.flags.StringVar(&c.peeringToken, "peering-token", "", "(Required) The peering token from the accepting cluster.")
c.flags.Var((*flags.FlagMapValue)(&c.meta), "meta",
"Metadata to associate with the peering, formatted as key=value. This flag "+
"may be specified multiple times to set multiple meta fields.")
c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags())
flags.Merge(c.flags, c.http.PartitionFlag())
c.help = flags.Usage(help, c.flags)
}
func (c *cmd) Run(args []string) int {
if err := c.flags.Parse(args); err != nil {
return 1
}
if c.name == "" {
c.UI.Error("Missing the required -name flag")
return 1
}
if c.peeringToken == "" {
c.UI.Error("Missing the required -peering-token flag")
return 1
}
client, err := c.http.APIClient()
if err != nil {
c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err))
return 1
}
peerings := client.Peerings()
req := api.PeeringEstablishRequest{
PeerName: c.name,
PeeringToken: c.peeringToken,
Partition: c.http.Partition(),
Meta: c.meta,
}
_, _, err = peerings.Establish(context.Background(), req, &api.WriteOptions{})
if err != nil {
c.UI.Error(fmt.Sprintf("Error establishing peering for %s: %v", req.PeerName, err))
return 1
}
c.UI.Info(fmt.Sprintf("Successfully established peering connection with %s", req.PeerName))
return 0
}
func (c *cmd) Synopsis() string {
return synopsis
}
func (c *cmd) Help() string {
return flags.Usage(c.help, nil)
}
const (
synopsis = "Establish a peering connection"
help = `
Usage: consul peering establish [options] -name <peer name> -peering-token <token>
Establish a peering connection. The name provided will be used locally by
this cluster to refer to the peering connection. The peering token can
only be used once to establish the connection.
Example:
$ consul peering establish -name west-dc -peering-token <token>
`
)

View File

@ -0,0 +1,127 @@
package establish
import (
"context"
"fmt"
"strings"
"testing"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testrpc"
)
func TestEstablishCommand_noTabs(t *testing.T) {
t.Parallel()
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
t.Fatal("help has tabs")
}
}
func TestEstablishCommand(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
acceptor := agent.NewTestAgent(t, ``)
t.Cleanup(func() { _ = acceptor.Shutdown() })
dialer := agent.NewTestAgent(t, ``)
t.Cleanup(func() { _ = dialer.Shutdown() })
testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1")
testrpc.WaitForTestAgent(t, dialer.RPC, "dc1")
acceptingClient := acceptor.Client()
dialingClient := dialer.Client()
t.Run("name is required", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + dialer.HTTPAddr(),
"-peering-token=1234abcde",
}
code := cmd.Run(args)
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag")
})
t.Run("peering token is required", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + dialer.HTTPAddr(),
"-name=bar",
}
code := cmd.Run(args)
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
require.Contains(t, ui.ErrorWriter.String(), "Missing the required -peering-token flag")
})
t.Run("establish connection", func(t *testing.T) {
// Grab the token from the acceptor
req := api.PeeringGenerateTokenRequest{PeerName: "foo"}
res, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req, &api.WriteOptions{})
require.NoError(t, err, "Could not generate peering token at acceptor")
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + dialer.HTTPAddr(),
"-name=bar",
fmt.Sprintf("-peering-token=%s", res.PeeringToken),
}
code := cmd.Run(args)
require.Equal(t, 0, code)
output := ui.OutputWriter.String()
require.Contains(t, output, "Success")
})
t.Run("establish connection with options", func(t *testing.T) {
// Grab the token from the acceptor
req := api.PeeringGenerateTokenRequest{PeerName: "foo"}
res, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req, &api.WriteOptions{})
require.NoError(t, err, "Could not generate peering token at acceptor")
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + dialer.HTTPAddr(),
"-name=bar",
fmt.Sprintf("-peering-token=%s", res.PeeringToken),
"-meta=env=production",
"-meta=region=us-west-1",
}
code := cmd.Run(args)
require.Equal(t, 0, code)
output := ui.OutputWriter.String()
require.Contains(t, output, "Success")
//Meta
peering, _, err := dialingClient.Peerings().Read(context.Background(), "bar", &api.QueryOptions{})
require.NoError(t, err)
actual, ok := peering.Meta["env"]
require.True(t, ok)
require.Equal(t, "production", actual)
actual, ok = peering.Meta["region"]
require.True(t, ok)
require.Equal(t, "us-west-1", actual)
})
}

View File

@ -0,0 +1,139 @@
package generate
import (
"context"
"encoding/json"
"flag"
"fmt"
"strings"
"github.com/mitchellh/cli"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/command/flags"
"github.com/hashicorp/consul/command/peering"
)
func New(ui cli.Ui) *cmd {
c := &cmd{UI: ui}
c.init()
return c
}
type cmd struct {
UI cli.Ui
flags *flag.FlagSet
http *flags.HTTPFlags
help string
name string
externalAddresses []string
meta map[string]string
format string
}
func (c *cmd) init() {
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.")
c.flags.Var((*flags.FlagMapValue)(&c.meta), "meta",
"Metadata to associate with the peering, formatted as key=value. This flag "+
"may be specified multiple times to set multiple metadata fields.")
c.flags.Var((*flags.AppendSliceValue)(&c.externalAddresses), "server-external-addresses",
"A list of addresses to put into the generated token, formatted as a comma-separate list. "+
"Addresses are the form of <host or IP>:port. "+
"This could be used to specify load balancer(s) or external IPs to reach the servers from "+
"the dialing side, and will override any server addresses obtained from the \"consul\" service.")
c.flags.StringVar(
&c.format,
"format",
peering.PeeringFormatPretty,
fmt.Sprintf("Output format {%s} (default: %s)", strings.Join(peering.GetSupportedFormats(), "|"), peering.PeeringFormatPretty),
)
c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags())
flags.Merge(c.flags, c.http.PartitionFlag())
c.help = flags.Usage(help, c.flags)
}
func (c *cmd) Run(args []string) int {
if err := c.flags.Parse(args); err != nil {
return 1
}
if c.name == "" {
c.UI.Error("Missing the required -name flag")
return 1
}
if !peering.FormatIsValid(c.format) {
c.UI.Error(fmt.Sprintf("Invalid format, valid formats are {%s}", strings.Join(peering.GetSupportedFormats(), "|")))
return 1
}
client, err := c.http.APIClient()
if err != nil {
c.UI.Error(fmt.Sprintf("Error connect to Consul agent: %s", err))
return 1
}
peerings := client.Peerings()
req := api.PeeringGenerateTokenRequest{
PeerName: c.name,
Partition: c.http.Partition(),
Meta: c.meta,
ServerExternalAddresses: c.externalAddresses,
}
res, _, err := peerings.GenerateToken(context.Background(), req, &api.WriteOptions{})
if err != nil {
c.UI.Error(fmt.Sprintf("Error generating peering token for %s: %v", req.PeerName, err))
return 1
}
if c.format == peering.PeeringFormatJSON {
output, err := json.Marshal(res)
if err != nil {
c.UI.Error(fmt.Sprintf("Error marshalling JSON: %s", err))
return 1
}
c.UI.Output(string(output))
return 0
}
c.UI.Info(res.PeeringToken)
return 0
}
func (c *cmd) Synopsis() string {
return synopsis
}
func (c *cmd) Help() string {
return flags.Usage(c.help, nil)
}
const (
synopsis = "Generate a peering token"
help = `
Usage: consul peering generate-token [options] -name <peer name>
Generate a peering token. The name provided will be used locally by
this cluster to refer to the peering connection. Re-generating a token
for a given name will not interrupt any active connection, but will
invalidate any unused token for that name.
Example:
$ consul peering generate-token -name west-dc
Example using a load balancer in front of Consul servers:
$ consul peering generate-token -name west-dc -server-external-addresses load-balancer.elb.us-west-1.amazonaws.com:8502
`
)

View File

@ -0,0 +1,141 @@
package generate
import (
"context"
"encoding/base64"
"encoding/json"
"strings"
"testing"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testrpc"
)
func TestGenerateCommand_noTabs(t *testing.T) {
t.Parallel()
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
t.Fatal("help has tabs")
}
}
func TestGenerateCommand(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := agent.NewTestAgent(t, ``)
t.Cleanup(func() { _ = a.Shutdown() })
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
client := a.Client()
t.Run("name is required", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + a.HTTPAddr(),
}
code := cmd.Run(args)
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag")
})
t.Run("invalid format", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + a.HTTPAddr(),
"-name=foo",
"-format=toml",
}
code := cmd.Run(args)
require.Equal(t, 1, code, "exited successfully when it should have failed")
output := ui.ErrorWriter.String()
require.Contains(t, output, "Invalid format")
})
t.Run("generate token", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + a.HTTPAddr(),
"-name=foo",
}
code := cmd.Run(args)
require.Equal(t, 0, code)
token, err := base64.StdEncoding.DecodeString(ui.OutputWriter.String())
require.NoError(t, err, "error decoding token")
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"")
})
t.Run("generate token with options", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + a.HTTPAddr(),
"-name=bar",
"-server-external-addresses=1.2.3.4,5.6.7.8",
"-meta=env=production",
"-meta=region=us-east-1",
}
code := cmd.Run(args)
require.Equal(t, 0, code)
token, err := base64.StdEncoding.DecodeString(ui.OutputWriter.String())
require.NoError(t, err, "error decoding token")
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"")
//ServerExternalAddresses
require.Contains(t, string(token), "1.2.3.4")
require.Contains(t, string(token), "5.6.7.8")
//Meta
peering, _, err := client.Peerings().Read(context.Background(), "bar", &api.QueryOptions{})
require.NoError(t, err)
actual, ok := peering.Meta["env"]
require.True(t, ok)
require.Equal(t, "production", actual)
actual, ok = peering.Meta["region"]
require.True(t, ok)
require.Equal(t, "us-east-1", actual)
})
t.Run("read with json", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + a.HTTPAddr(),
"-name=baz",
"-format=json",
}
code := cmd.Run(args)
require.Equal(t, 0, code)
output := ui.OutputWriter.Bytes()
var outputRes api.PeeringGenerateTokenResponse
require.NoError(t, json.Unmarshal(output, &outputRes))
token, err := base64.StdEncoding.DecodeString(outputRes.PeeringToken)
require.NoError(t, err, "error decoding token")
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"")
})
}

View File

@ -0,0 +1,139 @@
package list
import (
"context"
"encoding/json"
"flag"
"fmt"
"sort"
"strings"
"github.com/mitchellh/cli"
"github.com/ryanuber/columnize"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/command/flags"
"github.com/hashicorp/consul/command/peering"
)
func New(ui cli.Ui) *cmd {
c := &cmd{UI: ui}
c.init()
return c
}
type cmd struct {
UI cli.Ui
flags *flag.FlagSet
http *flags.HTTPFlags
help string
format string
}
func (c *cmd) init() {
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
c.flags.StringVar(
&c.format,
"format",
peering.PeeringFormatPretty,
fmt.Sprintf("Output format {%s} (default: %s)", strings.Join(peering.GetSupportedFormats(), "|"), peering.PeeringFormatPretty),
)
c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags())
flags.Merge(c.flags, c.http.PartitionFlag())
c.help = flags.Usage(help, c.flags)
}
func (c *cmd) Run(args []string) int {
if err := c.flags.Parse(args); err != nil {
return 1
}
if !peering.FormatIsValid(c.format) {
c.UI.Error(fmt.Sprintf("Invalid format, valid formats are {%s}", strings.Join(peering.GetSupportedFormats(), "|")))
return 1
}
client, err := c.http.APIClient()
if err != nil {
c.UI.Error(fmt.Sprintf("Error connect to Consul agent: %s", err))
return 1
}
peerings := client.Peerings()
res, _, err := peerings.List(context.Background(), &api.QueryOptions{})
if err != nil {
c.UI.Error("Error listing peerings")
return 1
}
list := peeringList(res)
sort.Sort(list)
if c.format == peering.PeeringFormatJSON {
output, err := json.Marshal(list)
if err != nil {
c.UI.Error(fmt.Sprintf("Error marshalling JSON: %s", err))
return 1
}
c.UI.Output(string(output))
return 0
}
if len(res) == 0 {
c.UI.Info(fmt.Sprintf("There are no peering connections."))
return 0
}
result := make([]string, 0, len(list))
header := "Name\x1fState\x1fImported Svcs\x1fExported Svcs\x1fMeta"
result = append(result, header)
for _, peer := range list {
metaPairs := make([]string, 0, len(peer.Meta))
for k, v := range peer.Meta {
metaPairs = append(metaPairs, fmt.Sprintf("%s=%s", k, v))
}
meta := strings.Join(metaPairs, ",")
line := fmt.Sprintf("%s\x1f%s\x1f%d\x1f%d\x1f%s",
peer.Name, peer.State, peer.ImportedServiceCount, peer.ExportedServiceCount, meta)
result = append(result, line)
}
output := columnize.Format(result, &columnize.Config{Delim: string([]byte{0x1f})})
c.UI.Output(output)
return 0
}
func (c *cmd) Synopsis() string {
return synopsis
}
func (c *cmd) Help() string {
return flags.Usage(c.help, nil)
}
const (
synopsis = "List peering connections"
help = `
Usage: consul peering list [options]
List all peering connections. The results will be filtered according
to ACL policy configuration.
Example:
$ consul peering list
`
)
// peeringList applies sort.Interface to a list of peering connections for sorting by name.
type peeringList []*api.Peering
func (d peeringList) Len() int { return len(d) }
func (d peeringList) Less(i, j int) bool { return d[i].Name < d[j].Name }
func (d peeringList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }

View File

@ -0,0 +1,133 @@
package list
import (
"context"
"encoding/json"
"strings"
"testing"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testrpc"
)
func TestListCommand_noTabs(t *testing.T) {
t.Parallel()
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
t.Fatal("help has tabs")
}
}
func TestListCommand(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
acceptor := agent.NewTestAgent(t, ``)
t.Cleanup(func() { _ = acceptor.Shutdown() })
testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1")
acceptingClient := acceptor.Client()
t.Run("invalid format", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
"-format=toml",
}
code := cmd.Run(args)
require.Equal(t, 1, code, "exited successfully when it should have failed")
output := ui.ErrorWriter.String()
require.Contains(t, output, "Invalid format")
})
t.Run("no results - pretty", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
}
code := cmd.Run(args)
require.Equal(t, 0, code)
output := ui.OutputWriter.String()
require.Contains(t, output, "no peering connections")
})
t.Run("two results for pretty print", func(t *testing.T) {
generateReq := api.PeeringGenerateTokenRequest{PeerName: "foo"}
_, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{})
require.NoError(t, err, "Could not generate peering token at acceptor for \"foo\"")
generateReq = api.PeeringGenerateTokenRequest{PeerName: "bar"}
_, _, err = acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{})
require.NoError(t, err, "Could not generate peering token at acceptor for \"bar\"")
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
}
code := cmd.Run(args)
require.Equal(t, 0, code)
output := ui.OutputWriter.String()
require.Equal(t, 3, strings.Count(output, "\n")) // There should be three lines including the header
lines := strings.Split(output, "\n")
require.Contains(t, lines[0], "Name")
require.Contains(t, lines[1], "bar")
require.Contains(t, lines[2], "foo")
})
t.Run("no results - json", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
"-format=json",
}
code := cmd.Run(args)
require.Equal(t, 0, code)
output := ui.OutputWriter.String()
require.Contains(t, output, "[]")
})
t.Run("two results for JSON print", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
"-format=json",
}
code := cmd.Run(args)
require.Equal(t, 0, code)
output := ui.OutputWriter.Bytes()
var outputList []*api.Peering
require.NoError(t, json.Unmarshal(output, &outputList))
require.Len(t, outputList, 2)
require.Equal(t, "bar", outputList[0].Name)
require.Equal(t, "foo", outputList[1].Name)
})
}

View File

@ -0,0 +1,69 @@
package peering
import (
"github.com/mitchellh/cli"
"github.com/hashicorp/consul/command/flags"
)
const (
PeeringFormatJSON = "json"
PeeringFormatPretty = "pretty"
)
func GetSupportedFormats() []string {
return []string{PeeringFormatJSON, PeeringFormatPretty}
}
func FormatIsValid(f string) bool {
return f == PeeringFormatPretty || f == PeeringFormatJSON
}
func New() *cmd {
return &cmd{}
}
type cmd struct{}
func (c *cmd) Run(args []string) int {
return cli.RunResultHelp
}
func (c *cmd) Synopsis() string {
return synopsis
}
func (c *cmd) Help() string {
return flags.Usage(help, nil)
}
const synopsis = "Create and manage peering connections between Consul clusters"
const help = `
Usage: consul peering <subcommand> [options] [args]
This command has subcommands for interacting with Cluster Peering
connections. Here are some simple examples, and more detailed
examples are available in the subcommands or the documentation.
Generate a peering token:
$ consul peering generate-token -name west-dc
Establish a peering connection:
$ consul peering establish -name east-dc -peering-token <token>
List all the local peering connections:
$ consul peering list
Print the status of a peering connection:
$ consul peering read -name west-dc
Delete and close a peering connection:
$ consul peering delete -name west-dc
For more examples, ask for subcommand help or view the documentation.
`

View File

@ -0,0 +1,164 @@
package read
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"strings"
"time"
"github.com/mitchellh/cli"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/command/flags"
"github.com/hashicorp/consul/command/peering"
)
func New(ui cli.Ui) *cmd {
c := &cmd{UI: ui}
c.init()
return c
}
type cmd struct {
UI cli.Ui
flags *flag.FlagSet
http *flags.HTTPFlags
help string
name string
format string
}
func (c *cmd) init() {
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.")
c.flags.StringVar(
&c.format,
"format",
peering.PeeringFormatPretty,
fmt.Sprintf("Output format {%s} (default: %s)", strings.Join(peering.GetSupportedFormats(), "|"), peering.PeeringFormatPretty),
)
c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags())
flags.Merge(c.flags, c.http.PartitionFlag())
c.help = flags.Usage(help, c.flags)
}
func (c *cmd) Run(args []string) int {
if err := c.flags.Parse(args); err != nil {
return 1
}
if c.name == "" {
c.UI.Error("Missing the required -name flag")
return 1
}
if !peering.FormatIsValid(c.format) {
c.UI.Error(fmt.Sprintf("Invalid format, valid formats are {%s}", strings.Join(peering.GetSupportedFormats(), "|")))
return 1
}
client, err := c.http.APIClient()
if err != nil {
c.UI.Error(fmt.Sprintf("Error connect to Consul agent: %s", err))
return 1
}
peerings := client.Peerings()
res, _, err := peerings.Read(context.Background(), c.name, &api.QueryOptions{})
if err != nil {
c.UI.Error("Error reading peerings")
return 1
}
if res == nil {
c.UI.Error(fmt.Sprintf("No peering with name %s found.", c.name))
return 1
}
if c.format == peering.PeeringFormatJSON {
output, err := json.Marshal(res)
if err != nil {
c.UI.Error(fmt.Sprintf("Error marshalling JSON: %s", err))
return 1
}
c.UI.Output(string(output))
return 0
}
c.UI.Output(formatPeering(res))
return 0
}
func formatPeering(peering *api.Peering) string {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("Name: %s\n", peering.Name))
buffer.WriteString(fmt.Sprintf("ID: %s\n", peering.ID))
if peering.Partition != "" {
buffer.WriteString(fmt.Sprintf("Partition: %s\n", peering.Partition))
}
if peering.DeletedAt != nil {
buffer.WriteString(fmt.Sprintf("DeletedAt: %s\n", peering.DeletedAt.Format(time.RFC3339)))
}
buffer.WriteString(fmt.Sprintf("State: %s\n", peering.State))
if peering.Meta != nil && len(peering.Meta) > 0 {
buffer.WriteString("Meta:\n")
for k, v := range peering.Meta {
buffer.WriteString(fmt.Sprintf(" %s=%s\n", k, v))
}
}
buffer.WriteString("\n")
buffer.WriteString(fmt.Sprintf("Peer ID: %s\n", peering.PeerID))
buffer.WriteString(fmt.Sprintf("Peer Server Name: %s\n", peering.PeerServerName))
buffer.WriteString(fmt.Sprintf("Peer CA Pems: %d\n", len(peering.PeerCAPems)))
if peering.PeerServerAddresses != nil && len(peering.PeerServerAddresses) > 0 {
buffer.WriteString("Peer Server Addresses:\n")
for _, v := range peering.PeerServerAddresses {
buffer.WriteString(fmt.Sprintf(" %s", v))
}
}
buffer.WriteString("\n")
buffer.WriteString(fmt.Sprintf("Imported Services: %d\n", peering.ImportedServiceCount))
buffer.WriteString(fmt.Sprintf("Exported Services: %d\n", peering.ExportedServiceCount))
buffer.WriteString("\n")
buffer.WriteString(fmt.Sprintf("Create Index: %d\n", peering.CreateIndex))
buffer.WriteString(fmt.Sprintf("Modify Index: %d\n", peering.ModifyIndex))
return buffer.String()
}
func (c *cmd) Synopsis() string {
return synopsis
}
func (c *cmd) Help() string {
return flags.Usage(c.help, nil)
}
const (
synopsis = "Read a peering connection"
help = `
Usage: consul peering read [options] -name <peer name>
Read a peering connection with the provided name. If one is not found,
the command will exit with a non-zero code. The result will be filtered according
to ACL policy configuration.
Example:
$ consul peering read -name west-dc
`
)

View File

@ -0,0 +1,135 @@
package read
import (
"context"
"encoding/json"
"strings"
"testing"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testrpc"
)
func TestReadCommand_noTabs(t *testing.T) {
t.Parallel()
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
t.Fatal("help has tabs")
}
}
func TestReadCommand(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
acceptor := agent.NewTestAgent(t, ``)
t.Cleanup(func() { _ = acceptor.Shutdown() })
testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1")
acceptingClient := acceptor.Client()
t.Run("no name flag", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
}
code := cmd.Run(args)
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag")
})
t.Run("invalid format", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
"-name=foo",
"-format=toml",
}
code := cmd.Run(args)
require.Equal(t, 1, code, "exited successfully when it should have failed")
output := ui.ErrorWriter.String()
require.Contains(t, output, "Invalid format")
})
t.Run("peering does not exist", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
"-name=foo",
}
code := cmd.Run(args)
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
require.Contains(t, ui.ErrorWriter.String(), "No peering with name")
})
t.Run("read with pretty print", func(t *testing.T) {
generateReq := api.PeeringGenerateTokenRequest{
PeerName: "foo",
Meta: map[string]string{
"env": "production",
},
}
_, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{})
require.NoError(t, err, "Could not generate peering token at acceptor for \"foo\"")
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
"-name=foo",
}
code := cmd.Run(args)
require.Equal(t, 0, code)
output := ui.OutputWriter.String()
require.Greater(t, strings.Count(output, "\n"), 0) // Checking for some kind of empty output
// Spot check some fields and values
require.Contains(t, output, "foo")
require.Contains(t, output, api.PeeringStatePending)
require.Contains(t, output, "env=production")
require.Contains(t, output, "Imported Services")
require.Contains(t, output, "Exported Services")
})
t.Run("read with json", func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
args := []string{
"-http-addr=" + acceptor.HTTPAddr(),
"-name=foo",
"-format=json",
}
code := cmd.Run(args)
require.Equal(t, 0, code)
output := ui.OutputWriter.Bytes()
var outputPeering api.Peering
require.NoError(t, json.Unmarshal(output, &outputPeering))
require.Equal(t, "foo", outputPeering.Name)
require.Equal(t, "production", outputPeering.Meta["env"])
})
}

View File

@ -96,6 +96,12 @@ import (
operraft "github.com/hashicorp/consul/command/operator/raft"
operraftlist "github.com/hashicorp/consul/command/operator/raft/listpeers"
operraftremove "github.com/hashicorp/consul/command/operator/raft/removepeer"
"github.com/hashicorp/consul/command/peering"
peerdelete "github.com/hashicorp/consul/command/peering/delete"
peerestablish "github.com/hashicorp/consul/command/peering/establish"
peergenerate "github.com/hashicorp/consul/command/peering/generate"
peerlist "github.com/hashicorp/consul/command/peering/list"
peerread "github.com/hashicorp/consul/command/peering/read"
"github.com/hashicorp/consul/command/reload"
"github.com/hashicorp/consul/command/rtt"
"github.com/hashicorp/consul/command/services"
@ -214,6 +220,12 @@ func RegisteredCommands(ui cli.Ui) map[string]mcli.CommandFactory {
entry{"operator raft", func(cli.Ui) (cli.Command, error) { return operraft.New(), nil }},
entry{"operator raft list-peers", func(ui cli.Ui) (cli.Command, error) { return operraftlist.New(ui), nil }},
entry{"operator raft remove-peer", func(ui cli.Ui) (cli.Command, error) { return operraftremove.New(ui), nil }},
entry{"peering", func(cli.Ui) (cli.Command, error) { return peering.New(), nil }},
entry{"peering delete", func(ui cli.Ui) (cli.Command, error) { return peerdelete.New(ui), nil }},
entry{"peering generate-token", func(ui cli.Ui) (cli.Command, error) { return peergenerate.New(ui), nil }},
entry{"peering establish", func(ui cli.Ui) (cli.Command, error) { return peerestablish.New(ui), nil }},
entry{"peering list", func(ui cli.Ui) (cli.Command, error) { return peerlist.New(ui), nil }},
entry{"peering read", func(ui cli.Ui) (cli.Command, error) { return peerread.New(ui), nil }},
entry{"reload", func(ui cli.Ui) (cli.Command, error) { return reload.New(ui), nil }},
entry{"rtt", func(ui cli.Ui) (cli.Command, error) { return rtt.New(ui), nil }},
entry{"services", func(cli.Ui) (cli.Command, error) { return services.New(), nil }},

View File

@ -57,7 +57,7 @@ func AssertElementsMatch[V any](
}
}
if len(outX) == len(outY) && len(outX) == len(listX) {
if len(outX) == len(outY) && len(listX) == len(listY) {
return // matches
}

Some files were not shown because too many files have changed in this diff Show More